There are a large number of the following errors in my Loki logs:
level=error ts=2025-08-22T03:00:55.634544899Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.634518378Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.634581171Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.634620185Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.634914375Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.634922272Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T03:00:55.63494059Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Unknown desc = queue is stopped" addr=172.26.107.124:3110
level=error ts=2025-08-22T17:24:34.276695534Z caller=errors.go:26 org_id=fake traceID=669e1573509a9c13 message="closing iterator" error="context canceled"
level=error ts=2025-08-22T17:26:25.093933892Z caller=errors.go:26 org_id=fake traceID=67a05d0f1b488877 message="closing iterator" error="context canceled"
level=error ts=2025-08-22T17:26:25.71324661Z caller=errors.go:26 org_id=fake traceID=6c2e9c821899ab2e message="closing iterator" error="context canceled"
level=error ts=2025-08-22T17:26:25.77658427Z caller=errors.go:26 org_id=fake traceID=4a3a1fe21030526a message="closing iterator" error="context canceled"
level=error ts=2025-08-22T17:29:04.441102967Z caller=errors.go:26 org_id=fake traceID=3f22f4c5563da5cf message="closing iterator" error="context canceled"
level=error ts=2025-08-23T16:56:16.594962213Z caller=memchunk.go:489 msg="invalid block found" err="invalid chunk checksum"
level=error ts=2025-08-23T16:56:16.594969729Z caller=memchunk.go:492 msg="block offset does not match expected one, will try reading with expected offset" actual=599173 expected=599193
level=error ts=2025-08-23T16:56:16.5949911Z caller=memchunk.go:489 msg="invalid block found" err="invalid chunk checksum"
level=error ts=2025-08-23T16:56:16.59500209Z caller=memchunk.go:492 msg="block offset does not match expected one, will try reading with expected offset" actual=619220 expected=619240
level=error ts=2025-08-23T16:56:16.595020824Z caller=memchunk.go:489 msg="invalid block found" err="invalid chunk checksum"
level=error ts=2025-08-23T16:56:16.595028085Z caller=memchunk.go:492 msg="block offset does not match expected one, will try reading with expected offset" actual=639354 expected=639374
level=error ts=2025-08-23T16:56:16.595046209Z caller=memchunk.go:489 msg="invalid block found" err="invalid chunk checksum"
level=error ts=2025-08-23T16:56:16.595057837Z caller=memchunk.go:492 msg="block offset does not match expected one, will try reading with expected offset" actual=659282 expected=659302
level=error ts=2025-08-22T08:23:48.170975831Z caller=errors.go:26 org_id=fake traceID=6ed841603b9473f2 message="closing iterator" error="5 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; context canceled"
level=error ts=2025-08-22T08:23:48.171132221Z caller=scheduler_processor.go:175 component=querier org_id=fake traceID=6ed841603b9473f2 msg="error notifying scheduler about finished query" err=EOF addr=172.26.107.125:3110
level=error ts=2025-08-22T08:23:48.171186187Z caller=scheduler_processor.go:175 component=querier org_id=fake traceID=6ed841603b9473f2 msg="error notifying scheduler about finished query" err=EOF addr=172.26.107.124:3110
level=error ts=2025-08-22T08:23:48.171604449Z caller=parallel_chunk_fetch.go:71 msg="error fetching chunks" err="context canceled"
ts=2025-08-22T08:23:48.171755048Z caller=spanlogger.go:111 user=fake caller=log.go:168 level=error msg="failed downloading chunks" err="context canceled"
level=error ts=2025-08-22T08:23:48.171839055Z caller=errors.go:26 org_id=fake traceID=6ed841603b9473f2 message="closing iterator" error="context canceled"
level=error ts=2025-08-22T08:23:48.176005344Z caller=parallel_chunk_fetch.go:71 msg="error fetching chunks" err="context canceled"
ts=2025-08-22T08:23:48.176088511Z caller=spanlogger.go:111 user=fake caller=log.go:168 level=error msg="failed downloading chunks" err="context canceled"
level=error ts=2025-08-22T08:23:48.176142302Z caller=errors.go:26 org_id=fake traceID=6ed841603b9473f2 message="closing iterator" error="context canceled"
level=error ts=2025-08-22T08:51:23.375433862Z caller=scheduler_processor.go:111 component=querier msg="error processing requests from scheduler" err="rpc error: code = Canceled desc = context canceled" addr=172.26.107.125:3110
level=error ts=2025-08-22T08:51:23.375939196Z caller=errors.go:26 org_id=fake traceID=17f22ba7cfb5af57 message="closing iterator" error="5 errors: rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; rpc error: code = Canceled desc = context canceled; context canceled"
level=error ts=2025-08-22T08:51:23.376182893Z caller=scheduler_processor.go:175 component=querier org_id=fake traceID=17f22ba7cfb5af57 msg="error notifying scheduler about finished query" err=EOF addr=172.26.107.125:3110
My Loki version is:
loki, version 3.2.0 (branch: k218, revision: 659f5421)
build user: root@003ce357cdf4
build date: 2024-09-18T16:21:52Z
go version: go1.22.6
platform: linux/amd64
tags: netgo
The complete configuration of Loki is:
auth_enabled: false
memberlist:
bind_port: 7946
join_members:
- <IP1>:7946
- <IP2>:7946
- <IP3>:7946
- <IP4>:7946
server:
http_listen_port: 3100
http_tls_config:
cert_file: /export/server/loki/ssl/loki.crt
key_file: /export/server/loki/ssl/loki.key
client_ca_file: /export/server/loki/ssl/ca.crt
client_auth_type: RequireAndVerifyClientCert
tls_min_version: VersionTLS12
tls_cipher_suites: TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
grpc_listen_port: 3110
grpc_server_max_recv_msg_size: 109951162777600 # 100G
grpc_server_max_send_msg_size: 109951162777600 # 100G
log_level: error # info debug error
common:
replication_factor: 1
ring:
replication_factor: 1
heartbeat_period: 2s
heartbeat_timeout: 5s
kvstore:
store: memberlist
path_prefix: /export/loki-data
ingester:
chunk_retain_period: 30m
flush_check_period: 1m
max_chunk_age: 2h
chunk_idle_period: 1h
chunk_target_size: 10485760
sync_min_utilization: 1
autoforget_unhealthy: true
wal:
enabled: true
lifecycler:
final_sleep: 10s
min_ready_duration: 3s
ring:
replication_factor: 1
schema_config:
configs:
- from: 2020-05-15
store: tsdb
object_store: s3
schema: v13
index:
prefix: loki_
period: 24h
storage_config:
tsdb_shipper:
resync_interval: 1m
aws:
s3: s3://*****:*****@*****/loki
querier:
max_concurrent: 30
query_range:
max_retries: 2
frontend_worker:
query_frontend_grpc_client:
max_recv_msg_size: 107374182400
max_send_msg_size: 107374182400
query_scheduler_grpc_client:
max_recv_msg_size: 107374182400
max_send_msg_size: 107374182400
compactor:
compaction_interval: 60m
retention_delete_delay: 10m
retention_delete_worker_count: 150
retention_enabled: true
delete_request_store: aws
analytics:
reporting_enabled: false
limits_config:
max_global_streams_per_user: 200000
max_streams_per_user: 200000
ingestion_rate_strategy: global
ingestion_rate_mb: 1024
ingestion_burst_size_mb: 1024
reject_old_samples: false
max_query_length: 100d
max_query_parallelism: 500
max_entries_limit_per_query: 0
max_concurrent_tail_requests: 200
increment_duplicate_timestamp: true
max_line_size: 10240KB
tsdb_max_query_parallelism: 1280
split_queries_by_interval: 24h
query_timeout: 60m
split_metadata_queries_by_interval: 10m
per_stream_rate_limit: 1G
per_stream_rate_limit_burst: 1G
max_query_series: 5000
shard_streams:
enabled: false
retention_period: 30d
Although it does not affect usage, these logs appear too frequently. How can I resolve this?