Non-responsive queries on fresh install

I have a clean install of loki and can’t get labels or query any logs. Do I need to scale differently? Do I need something different in my config? My goal is to consolidate logs from all my containers in my small swarm and let me inspect them in a central location (and preferably setup alerting from it).

I tried docker run -it --rm -e LOKI_ADDR=http://ip.here:3100 grafana/logcli:2.9.1-amd64 labels jobs and it’s been 40min so far with no response. It was logging for a day or so, the chunks folder got ~2G and I tried querying it today in grafana and it timed out. I basically did:

rm -R /nvme/path/to/grafana/loki/*/*
chown -R 10001:10001 /nvme/path/to/grafana/loki/*

The container has been running for 31min and has this sizes:

5.7M    boltdb-shipper-active
1.1M    boltdb-shipper-cache
16K     boltdb-shipper-compactor
8.0M    chunks
0       compactor
0       rules
8.1M    wal

config:

auth_enabled: false

server:
  http_listen_port: 3100
  grpc_listen_port: 9096

common:
  path_prefix: /loki
  storage:
    filesystem:
      chunks_directory: /loki/chunks
      rules_directory: /loki/rules
  replication_factor: 1
  ring:
    instance_addr: 127.0.0.1
    kvstore:
      store: inmemory

ingester:
  chunk_idle_period: 3m       # Any chunk not receiving new logs in this time will be flushed
  max_chunk_age: 3m           # All chunks will be flushed when they hit this age, default is 1h
  chunk_target_size: 1048576  # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or 
  chunk_retain_period: 30s    # Must be greater than index read cache TTL if using an index cache 
  max_transfer_retries: 0     # Chunk transfers disabled

compactor:
  working_directory: /loki/boltdb-shipper-compactor
  shared_store: filesystem
  retention_enabled: true
  delete_request_cancel_period: 1m #(default = 24h)
  deletion_mode: filter-and-delete

schema_config:
  configs:
    - from: 2020-10-24
      store: boltdb-shipper
      object_store: filesystem
      schema: v11
      index:
        prefix: index_
        period: 24h

limits_config:
  retention_period: 744h
  enforce_metric_name: false
  reject_old_samples: false
  ingestion_rate_mb: 16 # <float> | default = 4] / 8
  ingestion_burst_size_mb: 32 # <int> | default = 6]/16
  per_stream_rate_limit: 16MB # <string|int> | default = "3MB"/5
  max_global_streams_per_user: 0
  max_query_length: 0h # Default: 721h
  max_query_parallelism: 32 # Old Default: 14
  max_streams_per_user: 0 # Old Default: 10000 /0
  max_entries_limit_per_query: 7000 #default 5000 / 7000
  allow_deletes: true

# https://community.grafana.com/t/grafana-loki-data-lookup-performance-issue-something-is-slow/67553
query_range:
  split_queries_by_interval: 0
  parallelise_shardable_queries: false

querier:
  max_concurrent: 2048

frontend:
  max_outstanding_per_tenant: 4096
  compress_responses: true


ruler:
  alertmanager_url: http://localhost:9093

deployment:

version: '3.4'
networks:
  backend:
    driver: overlay
    external: true
services:
  loki:
    image: grafana/loki
    networks:
      - backend
    logging:
      driver: json-file
    command: -config.file=/etc/loki/local-config.yaml
    ports:
      - target: 3100
        published: 3100
        mode: ingress
    deploy:
      replicas: 1
      placement:
        constraints:
          - "node.role==worker"
      labels:
        - "traefik.enable=true"
        - "traefik.docker.network=backend"
        - "traefik.http.services.loki.loadbalancer.server.port=3100"
        - "traefik.http.routers.loki.rule=Host(`host.domain`)"
        - "traefik.http.routers.loki.tls=true"
        - "traefik.http.routers.loki.entrypoints=websecure"
    volumes:
      - "/hdd/path/to/grafana/loki-config.yaml:/etc/loki/local-config.yaml"
      - "/nvme/path/to/grafana/loki:/loki"

  promtail:
    image: grafana/promtail
    networks:
      - backend
    deploy:
      mode: global
      placement:
        constraints:
          - "node.role==worker"
    volumes:
      - "/hdd/path/to/grafana/promtail-config.yaml:/etc/promtail/config.yaml"
      - "/var/log:/var/log:ro"
      - "/var/lib/docker/containers:/var/lib/docker/containers:ro"

log:

level=warn ts=2023-10-10T19:46:55.131483381Z caller=limits.go:300 msg="The compactor.allow-deletes configuration option has been deprecated and will be ignored. Instead, use deletion_mode in the limits_configs to adjust deletion functionality"
level=warn ts=2023-10-10T19:46:55.131599183Z caller=compactor.go:137 msg="boltdb.shipper.compactor.deletion-mode has been deprecated and will be ignored. This has been moved to the deletion_mode per tenant configuration."
level=warn ts=2023-10-10T19:46:55.131754486Z caller=loki.go:286 msg="per-tenant timeout not configured, using default engine timeout (\"5m0s\"). This behavior will change in the next major to always use the default per-tenant timeout (\"5m\")."
level=info ts=2023-10-10T19:46:55.134761227Z caller=main.go:108 msg="Starting Loki" version="(version=2.8.5, branch=HEAD, revision=03cd6c82b)"
level=info ts=2023-10-10T19:46:55.135749538Z caller=server.go:323 http=[::]:3100 grpc=[::]:9096 msg="server listening on addresses"
level=warn ts=2023-10-10T19:46:56.965742367Z caller=cache.go:114 msg="fifocache config is deprecated. use embedded-cache instead"
level=warn ts=2023-10-10T19:46:56.965848529Z caller=experimental.go:20 msg="experimental feature in use" feature="In-memory (FIFO) cache - chunksembedded-cache"
level=info ts=2023-10-10T19:46:56.966862779Z caller=table_manager.go:134 msg="uploading tables"
level=info ts=2023-10-10T19:46:56.968487102Z caller=table_manager.go:262 msg="query readiness setup completed" duration=4.35µs distinct_users_len=0
level=info ts=2023-10-10T19:46:56.968548433Z caller=shipper.go:131 msg="starting index shipper in RW mode"
level=info ts=2023-10-10T19:46:57.551426498Z caller=shipper_index_client.go:78 msg="starting boltdb shipper in RW mode"
level=info ts=2023-10-10T19:46:57.55151716Z caller=table_manager.go:166 msg="handing over indexes to shipper"
level=info ts=2023-10-10T19:46:57.556896608Z caller=worker.go:112 msg="Starting querier worker using query-scheduler and scheduler ring for addresses"
level=info ts=2023-10-10T19:46:57.557577822Z caller=mapper.go:47 msg="cleaning up mapped rules directory" path=/loki/rules-temp
level=info ts=2023-10-10T19:46:58.021722723Z caller=module_service.go:82 msg=initialising module=cache-generation-loader
level=info ts=2023-10-10T19:46:58.021813215Z caller=module_service.go:82 msg=initialising module=server
level=info ts=2023-10-10T19:46:58.021928178Z caller=module_service.go:82 msg=initialising module=memberlist-kv
level=info ts=2023-10-10T19:46:58.021964198Z caller=module_service.go:82 msg=initialising module=query-frontend-tripperware
level=info ts=2023-10-10T19:46:58.022002199Z caller=module_service.go:82 msg=initialising module=ring
level=info ts=2023-10-10T19:46:58.02206284Z caller=module_service.go:82 msg=initialising module=store
level=info ts=2023-10-10T19:46:58.022182153Z caller=ring.go:263 msg="ring doesn't exist in KV store yet"
level=info ts=2023-10-10T19:46:58.022290025Z caller=client.go:255 msg="value is nil" key=collectors/ring index=1
level=info ts=2023-10-10T19:46:58.022357446Z caller=module_service.go:82 msg=initialising module=query-scheduler-ring
level=info ts=2023-10-10T19:46:58.022474689Z caller=ring.go:263 msg="ring doesn't exist in KV store yet"
level=info ts=2023-10-10T19:46:58.02252073Z caller=client.go:255 msg="value is nil" key=collectors/scheduler index=1
level=info ts=2023-10-10T19:46:58.02254685Z caller=module_service.go:82 msg=initialising module=ingester-querier
level=info ts=2023-10-10T19:46:58.022634932Z caller=module_service.go:82 msg=initialising module=usage-report
level=info ts=2023-10-10T19:46:58.022829936Z caller=basic_lifecycler.go:261 msg="instance not found in the ring" instance=27c7ff40b595 ring=scheduler
level=info ts=2023-10-10T19:46:58.022890497Z caller=basic_lifecycler_delegates.go:63 msg="not loading tokens from file, tokens file path is empty"
level=info ts=2023-10-10T19:46:58.023180703Z caller=module_service.go:82 msg=initialising module=compactor
level=info ts=2023-10-10T19:46:58.023203043Z caller=module_service.go:82 msg=initialising module=ruler
level=info ts=2023-10-10T19:46:58.023246094Z caller=ring.go:263 msg="ring doesn't exist in KV store yet"
level=info ts=2023-10-10T19:46:58.023249584Z caller=ruler.go:499 msg="ruler up and running"
level=info ts=2023-10-10T19:46:58.023308005Z caller=client.go:255 msg="value is nil" key=collectors/compactor index=3
level=info ts=2023-10-10T19:46:58.023361196Z caller=basic_lifecycler.go:261 msg="instance not found in the ring" instance=27c7ff40b595 ring=compactor
level=info ts=2023-10-10T19:46:58.023361296Z caller=module_service.go:82 msg=initialising module=distributor
level=info ts=2023-10-10T19:46:58.023369376Z caller=basic_lifecycler_delegates.go:63 msg="not loading tokens from file, tokens file path is empty"
level=info ts=2023-10-10T19:46:58.023437418Z caller=module_service.go:82 msg=initialising module=ingester
level=info ts=2023-10-10T19:46:58.023452678Z caller=ringmanager.go:201 msg="waiting until scheduler is JOINING in the ring"
level=info ts=2023-10-10T19:46:58.023470948Z caller=ringmanager.go:205 msg="scheduler is JOINING in the ring"
level=info ts=2023-10-10T19:46:58.023491899Z caller=ingester.go:416 msg="recovering from checkpoint"
level=info ts=2023-10-10T19:46:58.023513589Z caller=ringmanager.go:214 msg="waiting until scheduler is ACTIVE in the ring"
level=info ts=2023-10-10T19:46:58.023582391Z caller=lifecycler.go:547 msg="not loading tokens from file, tokens file path is empty"
level=info ts=2023-10-10T19:46:58.023594911Z caller=compactor.go:332 msg="waiting until compactor is JOINING in the ring"
level=info ts=2023-10-10T19:46:58.023604911Z caller=compactor.go:336 msg="compactor is JOINING in the ring"
level=info ts=2023-10-10T19:46:58.023643672Z caller=lifecycler.go:576 msg="instance not found in ring, adding with no tokens" ring=distributor
level=info ts=2023-10-10T19:46:58.023685483Z caller=compactor.go:346 msg="waiting until compactor is ACTIVE in the ring"
level=warn ts=2023-10-10T19:46:58.023670942Z caller=logging.go:86 traceID=36561c5e213c3874 orgID=fake msg="POST /loki/api/v1/push (500) 647.633µs Response: \"empty ring\\n\" ws: false; Content-Length: 487; Content-Type: application/x-protobuf; User-Agent: promtail/main-a8d5815; "
level=info ts=2023-10-10T19:46:58.023766554Z caller=lifecycler.go:416 msg="auto-joining cluster after timeout" ring=distributor
level=info ts=2023-10-10T19:46:58.024009059Z caller=recovery.go:40 msg="no checkpoint found, treating as no-op"
level=info ts=2023-10-10T19:46:58.024078951Z caller=ingester.go:432 msg="recovered WAL checkpoint recovery finished" elapsed=618.532µs errors=false
level=info ts=2023-10-10T19:46:58.024094621Z caller=ingester.go:438 msg="recovering from WAL"
level=warn ts=2023-10-10T19:46:58.024155483Z caller=logging.go:86 traceID=361ee3849c35ff1a orgID=fake msg="POST /loki/api/v1/push (500) 271.266µs Response: \"empty ring\\n\" ws: false; Content-Length: 499; Content-Type: application/x-protobuf; User-Agent: promtail/main-a8d5815; "
level=warn ts=2023-10-10T19:46:58.024188934Z caller=logging.go:86 traceID=3ef9ca1e198ddd9b orgID=fake msg="POST /loki/api/v1/push (500) 242.406µs Response: \"empty ring\\n\" ws: false; Content-Length: 592; Content-Type: application/x-protobuf; User-Agent: promtail/main-a8d5815; "
level=info ts=2023-10-10T19:46:58.024272375Z caller=ingester.go:454 msg="WAL segment recovery finished" elapsed=811.697µs errors=false
level=info ts=2023-10-10T19:46:58.024292186Z caller=ingester.go:402 msg="closing recoverer"
level=info ts=2023-10-10T19:46:58.024302076Z caller=ingester.go:410 msg="WAL recovery finished" time=841.648µs
level=info ts=2023-10-10T19:46:58.024349567Z caller=wal.go:156 msg=started component=wal
level=info ts=2023-10-10T19:46:58.024378577Z caller=lifecycler.go:547 msg="not loading tokens from file, tokens file path is empty"
level=info ts=2023-10-10T19:46:58.024426048Z caller=lifecycler.go:576 msg="instance not found in ring, adding with no tokens" ring=ingester
level=info ts=2023-10-10T19:46:58.02450742Z caller=lifecycler.go:416 msg="auto-joining cluster after timeout" ring=ingester
level=info ts=2023-10-10T19:46:58.125045784Z caller=compactor.go:350 msg="compactor is ACTIVE in the ring"
level=info ts=2023-10-10T19:46:58.168656967Z caller=ringmanager.go:218 msg="scheduler is ACTIVE in the ring"
level=info ts=2023-10-10T19:46:58.168772119Z caller=module_service.go:82 msg=initialising module=query-scheduler
level=info ts=2023-10-10T19:46:58.16884477Z caller=module_service.go:82 msg=initialising module=querier
level=info ts=2023-10-10T19:46:58.168970343Z caller=module_service.go:82 msg=initialising module=query-frontend
level=info ts=2023-10-10T19:46:58.169485523Z caller=loki.go:500 msg="Loki started"
level=info ts=2023-10-10T19:47:01.169816982Z caller=scheduler.go:599 msg="this scheduler is in the ReplicationSet, will now accept requests."
level=info ts=2023-10-10T19:47:01.169840393Z caller=worker.go:209 msg="adding connection" addr=127.0.0.1:9096
level=info ts=2023-10-10T19:47:03.126135666Z caller=compactor.go:411 msg="this instance has been chosen to run the compactor, starting compactor"
level=info ts=2023-10-10T19:47:03.126236048Z caller=compactor.go:440 msg="waiting 10m0s for ring to stay stable and previous compactions to finish before starting compactor"
level=info ts=2023-10-10T19:47:08.170229497Z caller=frontend_scheduler_worker.go:107 msg="adding connection to scheduler" addr=127.0.0.1:9096
level=info ts=2023-10-10T19:47:28.024655068Z caller=flush.go:168 msg="flushing stream" user=fake fp=a32e69a85e735212 immediate=false num_chunks=1 labels="{container_name=\"name_here.1.xiy7xlf8fp5tx7aqkomxtqxwq\", filename=\"/var/log/docker/id/json.log\", host=\"swarm1\", source=\"stdout\", swarm_service=\"name_here\", swarm_stack=\"name\"}"
===== more lines like that for other services =====
level=info ts=2023-10-10T19:47:56.967511893Z caller=table_manager.go:134 msg="uploading tables"
level=info ts=2023-10-10T19:47:57.552135392Z caller=table_manager.go:166 msg="handing over indexes to shipper"
level=info ts=2023-10-10T19:47:57.552267244Z caller=table.go:318 msg="handing over indexes to shipper index_19640"
level=info ts=2023-10-10T19:47:57.552282085Z caller=table.go:334 msg="finished handing over table index_19640"
level=info ts=2023-10-10T19:48:39.845217153Z caller=roundtrip.go:157 org_id=fake msg="executing query" type=range query="{swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg != `access`" length=6h0m0s step=6h0m0s query_hash=2987847713
level=info ts=2023-10-10T19:48:39.847467769Z caller=engine.go:220 component=querier org_id=fake msg="executing query" type=range query="{swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg != `access`" length=30m0s step=6h0m0s query_hash=2987847713
level=info ts=2023-10-10T19:48:39.850274436Z caller=table_manager.go:190 msg="downloading all files for table index_19640"
ts=2023-10-10T19:48:39.85295709Z caller=spanlogger.go:85 table-name=index_19640 user-id=fake org_id=fake level=info msg="downloaded index set at query time" duration=705.034µs
level=info ts=2023-10-10T19:48:39.868278779Z caller=roundtrip.go:157 org_id=fake msg="executing query" type=range query="{swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg!=`access`" length=6h0m0s step=10s query_hash=693146181
ts=2023-10-10T19:48:40.8236893Z caller=spanlogger.go:85 table-name=index_19640 org_id=fake level=info msg="downloaded index set at query time" duration=970.647759ms
level=info ts=2023-10-10T19:48:40.824242072Z caller=metrics.go:152 component=querier org_id=fake latency=fast query="{swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg != `access`" query_hash=2987847713 query_type=filter range_type=range length=11m20.702s start_delta=6h0m1.526235972s end_delta=5h48m40.824236132s step=6h0m0s duration=974.370704ms status=200 limit=10 returned_lines=0 throughput=0B total_bytes=0B lines_per_second=0 total_lines=0 total_entries=0 store_chunks_download_time=0s queue_time=65.441µs splits=0 shards=0 cache_chunk_req=0 cache_chunk_hit=0 cache_chunk_bytes_stored=0 cache_chunk_bytes_fetched=0 cache_chunk_download_time=0s cache_index_req=0 cache_index_hit=0 cache_index_download_time=0s cache_result_req=0 cache_result_hit=0 cache_result_download_time=0s
===== this line repeated *several* times =====
level=info ts=2023-10-10T19:48:41.425582629Z caller=roundtrip.go:157 org_id=fake msg="executing query" type=range query="sum by (level) (count_over_time({swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg!=`access`[1m]))" length=6h0m0s step=1m0s query_hash=160744884
level=info ts=2023-10-10T19:48:41.426767913Z caller=engine.go:220 component=querier org_id=fake msg="executing query" type=range query="sum by (level) (count_over_time({swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg!=`access`[1m]))" length=19m0s step=1m0s query_hash=160744884
==== line repeated *several* times =====
level=warn ts=2023-10-10T19:48:44.170413752Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:48:44.170582796Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:48:44.170712258Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
===== these 2 lines repeated *several* times =====
level=info ts=2023-10-10T19:48:56.967093847Z caller=table_manager.go:134 msg="uploading tables"
level=info ts=2023-10-10T19:48:57.551858168Z caller=table_manager.go:166 msg="handing over indexes to shipper"
level=info ts=2023-10-10T19:48:57.551935069Z caller=table.go:318 msg="handing over indexes to shipper index_19640"
level=info ts=2023-10-10T19:48:57.551945489Z caller=table.go:334 msg="finished handing over table index_19640"
level=warn ts=2023-10-10T19:48:59.16972816Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:48:59.169853703Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:48:59.169914114Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
===== these 2 lines repeated *several* times =====
level=info ts=2023-10-10T19:49:06.673463878Z caller=roundtrip.go:195 org_id=fake msg="executing query" type=labels label=jobs length=1h0m0s query=
ts=2023-10-10T19:49:06.67551973Z caller=spanlogger.go:85 user=fake level=info org_id=fake latency=fast query_type=labels length=1h0m0s duration=1.161453ms status=200 label=jobs query= splits=0 throughput=0B total_bytes=0B total_entries=0
level=warn ts=2023-10-10T19:49:09.170437767Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:49:09.170606651Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:09.170708733Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:09.844054135Z caller=retry.go:73 org_id=fake msg="error processing request" try=0 query="{swarm_service=\"name_here\"} | json | __error__=`` | status = `500` | msg != `access`" err="context canceled"
level=warn ts=2023-10-10T19:49:14.16952275Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:49:14.169721714Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:14.169814706Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=warn ts=2023-10-10T19:49:19.169538706Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:49:19.169703259Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:19.169808501Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=warn ts=2023-10-10T19:49:24.169843809Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:49:24.170033523Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:24.170131405Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=info ts=2023-10-10T19:49:28.024899982Z caller=flush.go:168 msg="flushing stream" user=fake fp=9a4e037c3b69a04e immediate=false num_chunks=2 labels="{filename=\"/var/log/syslog\", job=\"varlogs\"}"
level=info ts=2023-10-10T19:49:28.024921372Z caller=flush.go:168 msg="flushing stream" user=fake fp=19b82441ff5a35f8 immediate=false num_chunks=1 labels="{filename=\"/var/log/kern.log\", job=\"varlogs\"}"
level=warn ts=2023-10-10T19:49:29.16910042Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T19:49:29.169227283Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T19:49:29.169292124Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=info ts=2023-10-10T19:49:56.96724007Z caller=table_manager.go:134 msg="uploading tables"
level=info ts=2023-10-10T19:49:57.552537081Z caller=table_manager.go:166 msg="handing over indexes to shipper"
level=info ts=2023-10-10T19:49:57.552645493Z caller=table.go:318 msg="handing over indexes to shipper index_19640"
level=info ts=2023-10-10T19:49:57.552658493Z caller=table.go:334 msg="finished handing over table index_19640"
level=info ts=2023-10-10T20:07:03.12775215Z caller=marker.go:202 msg="no marks file found"
level=info ts=2023-10-10T20:07:03.127823971Z caller=compactor.go:454 msg="applying retention with compaction"
level=info ts=2023-10-10T20:07:03.127893242Z caller=expiration.go:78 msg="overall smallest retention period 1694290023.127, default smallest retention period 1694290023.127"
ts=2023-10-10T20:07:03.127956304Z caller=spanlogger.go:85 level=info msg="building index list cache"
ts=2023-10-10T20:07:03.128089356Z caller=spanlogger.go:85 level=info msg="index list cache built" duration=120.132µs
level=info ts=2023-10-10T20:07:03.128126357Z caller=compactor.go:609 msg="compacting table" table-name=index_19640
level=info ts=2023-10-10T20:07:03.129465905Z caller=table.go:131 table-name=index_19640 msg="listed files" count=1
level=info ts=2023-10-10T20:07:03.139302003Z caller=util.go:85 table-name=index_19640 file-name=27c7ff40b595-1696967216968559833-1696967248.gz msg="downloaded file" total_time=6.339528ms
level=info ts=2023-10-10T20:07:05.368501844Z caller=index_set.go:269 table-name=index_19640 msg="removing source db files from storage" count=1
level=info ts=2023-10-10T20:07:05.884927676Z caller=compactor.go:614 msg="finished compacting table" table-name=index_19640

my /etc/docker/daemon.json:

{
    "debug" : true,
    "log-driver": "loki",
    "log-opts": {
        "loki-url": "http://ip.here:3100/loki/api/v1/push",
        "loki-batch-size": "400"
    }
}

If I try curl -vvv http://ip.here:3100/loki/api/v1/label/jobs/values, I see this in the logs until I manually cancel the curl:

level=info ts=2023-10-10T20:27:14.919376814Z caller=roundtrip.go:195 org_id=fake msg="executing query" type=labels label=jobs length=1h0m0s query=
ts=2023-10-10T20:27:14.9212248Z caller=spanlogger.go:85 user=fake level=info org_id=fake latency=fast query_type=labels length=1h0m0s duration=928.718µs status=200 label=jobs query= splits=0 throughput=0B total_bytes=0B total_entries=0
level=warn ts=2023-10-10T20:27:19.169676275Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T20:27:19.169839299Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T20:27:19.16991181Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=warn ts=2023-10-10T20:27:24.169390517Z caller=pool.go:193 msg="removing frontend failing healthcheck" addr=10.0.0.71:9096 reason="rpc error: code = DeadlineExceeded desc = context deadline exceeded"
level=error ts=2023-10-10T20:27:24.16953668Z caller=scheduler_processor.go:208 org_id=fake frontend=10.0.0.71:9096 msg="error notifying frontend about finished query" err="rpc error: code = Canceled desc = grpc: the client connection is closing"
level=error ts=2023-10-10T20:27:24.169605521Z caller=scheduler_processor.go:252 org_id=fake frontend=10.0.0.71:9096 msg="error health checking" err="rpc error: code = Canceled desc = grpc: the client connection is closing"

Luckily logs are able to come in to some degree: for some reason if loki goes down I loose access to all containers until I change the default log driver or get loki running again.