Prommail captured all logs, but when pushed to Loki, Loki did not synchronize all logs
- grafana version:9.4.7 Ubuntu 23.04
- data sources : Loki
- Query the keywords common to each log file by explore
- Purpose: Obtain device status changes through monitoring diary files
- Realization: promtail grabs the logs and pushes them to loki
- problem:promtail crawled all the logs, but after pushing found that the number of loki logs is not correct, lost a lot of logs
- Configuration files:
loki :2.8.0
auth_enabled: false
server:
http_listen_port: 3100
grpc_listen_port: 9096
# log_level: debug
limits_config:
reject_old_samples: true
reject_old_samples_max_age: 24h
retention_period: 24h
# max_query_series: 10000
max_query_parallelism: 2 # Maximum number of queries that will be scheduled in parallel by the frontend.
max_query_lookback: 24h
query_timeout: 5m
enforce_metric_name: false
ingestion_rate_mb: 40
ingestion_burst_size_mb: 20
#max_global_streams_per_user: 0
# frontend:
# max_outstanding_per_tenant: 1024
# query_scheduler:
# max_outstanding_requests_per_tenant: 50
common:
instance_addr: 127.0.0.1
path_prefix: /tmp/loki
storage:
filesystem:
chunks_directory: /tmp/loki/chunks
rules_directory: /tmp/loki/rules
replication_factor: 1
ring:
kvstore:
store: inmemory
query_range:
results_cache:
cache:
embedded_cache:
enabled: true
max_size_mb: 10000
schema_config:
configs:
- from: 2020-10-24
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
table_manager:
retention_deletes_enabled: true
retention_period: 24h
compactor:
working_directory: /tmp/loki/retention
shared_store: filesystem
# compaction_interval: 10m
retention_enabled: true
# retention_delete_delay: 10s
retention_delete_worker_count: 150
#todo
compaction_interval: 24h
retention_delete_delay: 48h
ruler:
storage:
type: local
local:
directory: /loki/rules
rule_path: /loki/rules-temp
alertmanager_url: http://192.168.152.160:9093
ring:
kvstore:
store: inmemory
enable_api: true
enable_alertmanager_v2: true
promtail:2.43.0
server:
http_listen_port: 9080
grpc_listen_port: 0
log_level: debug
positions:
filename: /tmp/positions.yaml
clients:
- url: http://localhost:3100/loki/api/v1/push
# batchsize: 102400
timeout: 30s
limits_config:
readline_rate_enabled: true
max_streams: 1000
readline_rate_drop: false
# max_line_size: 2M
#readline_burst: 1000
scrape_configs:
- job_name: deviceLog
static_configs:
- targets:
- localhost
labels:
job: localserver
__path__: /home/tjc/log/localserver/002*/*.log
- targets:
- localhost
labels:
job: bob
__path__: /home/tjc/log/bob/002*/*.log
- targets:
- localhost
labels:
job: media
__path__: /home/tjc/log/media/002*/*.log
promtail log
level=info ts=2023-06-08T08:24:08.553160574Z caller=filetargetmanager.go:178 msg="received file watcher event" name=/home/tjc/log/bob/002271/997rx.log op=CREATE
level=debug ts=2023-06-08T08:24:08.553215833Z caller=filetargetmanager.go:432 msg="new file does not match glob" filename=/home/tjc/log/bob/002271/997rx.log
level=debug ts=2023-06-08T08:24:08.553232104Z caller=filetargetmanager.go:432 msg="new file does not match glob" filename=/home/tjc/log/bob/002271/997rx.log
level=debug ts=2023-06-08T08:24:08.553264148Z caller=filetarget.go:330 msg="tailing new file" filename=/home/tjc/log/bob/002271/997rx.log
ts=2023-06-08T08:24:08.553401481Z caller=log.go:168 level=info msg="Seeked /home/tjc/log/bob/002271/997rx.log - &{Offset:0 Whence:0}"
level=info ts=2023-06-08T08:24:08.553434201Z caller=tailer.go:143 component=tailer msg="tail routine: started" path=/home/tjc/log/bob/002271/997rx.log
- No error message in the log
- Configure according to the official documentation