Cannot get some Pod logs using Grafana Loki

Alloy errors:

{"ts":"2025-07-09T16:48:19.379147889Z","level":"error","msg":"error handling event","component_path":"/","component_id":"loki.source.kubernetes_events.cluster_events","err":"no involved object for event"}
{"ts":"2025-07-09T19:26:04.551193739Z","level":"warn","msg":"could not determine if container terminated; will retry tailing","target":"logging/alloy-9gpp9:alloy","component_path":"/","component_id":"loki.source.kubernetes.pod_logs","err":"client rate limiter Wait returned an error: context canceled"}`
{"ts":"2025-07-09T19:26:04.551833307Z","level":"warn","msg":"tailer stopped; will retry","target":"logging/alloy-9gpp9:config-reloader","component_path":"/","component_id":"loki.source.kubernetes.pod_logs","err":"client rate limiter Wait returned an error: context canceled"}`
{"ts":"2025-07-09T19:26:04.565120335Z","level":"error","msg":"final error sending batch","component_path":"/","component_id":"loki.write.endpoint","component":"client","host":"loki-distributor.logging.svc.cluster.local:3100","status":400,"tenant":"fake","error":"server returned HTTP status 400 Bad Request (400): entry with timestamp 2025-07-06 00:24:09.185385151 +0000 UTC ignored, reason: 'entry too far behind, entry timestamp is: 2025-07-06T00:24:09Z, oldest acceptable timestamp is: 2025-07-09T18:26:02Z',"}`

Distributor config:

kubectl exec -it loki-distributor-df585bf65-cr7bv -n logging -- cat /etc/loki/config/config.yaml

auth_enabled: true
bloom_build:
  builder:
    planner_address: loki-bloom-planner-headless.logging.svc.cluster.local:9095
  enabled: false
bloom_gateway:
  client:
    addresses: dnssrvnoa+_grpc._tcp.loki-bloom-gateway-headless.logging.svc.cluster.local
  enabled: false
chunk_store_config:
  chunk_cache_config:
    background:
      writeback_buffer: 10000
      writeback_goroutines: 1
      writeback_size_limit: 100MB
    default_validity: 0s
    memcached:
      batch_size: 4
      parallelism: 5
    memcached_client:
      addresses: dnssrvnoa+_memcached-client._tcp.loki-chunks-cache.logging.svc
      consistent_hash: true
      max_idle_conns: 72
      timeout: 2000ms
common:
  compactor_address: 'http://loki-compactor:3100'
  path_prefix: /var/loki
  replication_factor: 3
  storage:
    s3:
      bucketnames: eks-loki-chunk-prod
      insecure: false
      region: us-east-2
      s3forcepathstyle: false
compactor:
  delete_request_store: s3
  retention_enabled: true
frontend:
  scheduler_address: loki-query-scheduler.logging.svc.cluster.local:9095
  tail_proxy_url: http://loki-querier.logging.svc.cluster.local:3100
frontend_worker:
  scheduler_address: loki-query-scheduler.logging.svc.cluster.local:9095
index_gateway:
  mode: simple
ingester:
  chunk_encoding: snappy
limits_config:
  allow_structured_metadata: true
  ingestion_burst_size_mb: 200
  ingestion_rate_mb: 100
  max_cache_freshness_per_query: 10m
  query_timeout: 300s
  reject_old_samples: false
  reject_old_samples_max_age: 168h
  retention_period: 672h
  split_queries_by_interval: 15m
  volume_enabled: true
memberlist:
  join_members:
  - loki-memberlist
pattern_ingester:
  enabled: true
querier:
  max_concurrent: 4
query_range:
  align_queries_with_step: true
  cache_results: true
  results_cache:
    cache:
      background:
        writeback_buffer: 500000
        writeback_goroutines: 1
        writeback_size_limit: 500MB
      default_validity: 12h
      memcached_client:
        addresses: dnssrvnoa+_memcached-client._tcp.loki-results-cache.logging.svc
        consistent_hash: true
        timeout: 500ms
        update_interval: 1m
ruler:
  storage:
    s3:
      bucketnames: eks-loki-ruler-prod
      insecure: false
      region: us-east-2
      s3forcepathstyle: false
    type: s3
  wal:
    dir: /var/loki/ruler-wal
runtime_config:
  file: /etc/loki/runtime-config/runtime-config.yaml
schema_config:
  configs:
  - from: "2024-04-01"
    index:
      period: 24h
      prefix: loki_index_
    object_store: s3
    schema: v13
    store: tsdb
server:
  grpc_listen_port: 9095
  http_listen_port: 3100
  http_server_read_timeout: 600s
  http_server_write_timeout: 600s
storage_config:
  aws:
    bucketnames: eks-loki-chunk-prod
    region: us-east-2
    s3forcepathstyle: false
  bloom_shipper:
    working_directory: /var/loki/data/bloomshipper
  boltdb_shipper:
    index_gateway_client:
      server_address: dns+loki-index-gateway-headless.logging.svc.cluster.local:9095
  hedging:
    at: 250ms
    max_per_second: 20
    up_to: 3
  tsdb_shipper:
    index_gateway_client:
      server_address: dns+loki-index-gateway-headless.logging.svc.cluster.local:9095
tracing:
  enabled: true

Alloy values file:

alloy:
  mounts:
    varlog: true
  configMap:
    content: |
      logging {
        level  = "info"
        format = "json"
      }

      discovery.kubernetes "pods" {
        role = "pod"
      }

      local.file_match "node_logs" {
        path_targets = [{
            // Monitor syslog to scrape node-logs
            __path__  = "/var/log/syslog",
            job       = "node/syslog",
            node_name = sys.env("HOSTNAME"),
            cluster   = "prod-eks",
        }]
      }

      loki.source.file "node_logs" {
        targets    = local.file_match.node_logs.targets
        forward_to = [loki.write.endpoint.receiver]
      }

      // discovery.relabel rewrites the label set of the input targets by applying one or more relabeling rules.
      // If no rules are defined, then the input targets are exported as-is.
      discovery.relabel "pod_logs" {
        targets = discovery.kubernetes.pods.targets

        // Label creation - "namespace" field from "__meta_kubernetes_namespace"
        rule {
          source_labels = ["__meta_kubernetes_namespace"]
          action = "replace"
          target_label = "namespace"
        }

        // Label creation - "pod" field from "__meta_kubernetes_pod_name"
        rule {
          source_labels = ["__meta_kubernetes_pod_name"]
          action = "replace"
          target_label = "pod"
        }

        // Label creation - "container" field from "__meta_kubernetes_pod_container_name"
        rule {
          source_labels = ["__meta_kubernetes_pod_container_name"]
          action = "replace"
          target_label = "container"
        }

        // Label creation -  "app" field from "__meta_kubernetes_pod_label_app_kubernetes_io_name"
        rule {
          source_labels = ["__meta_kubernetes_pod_label_app_kubernetes_io_name"]
          action = "replace"
          target_label = "app"
        }

        // Label creation -  "job" field from "__meta_kubernetes_namespace" and "__meta_kubernetes_pod_container_name"
        // Concatenate values __meta_kubernetes_namespace/__meta_kubernetes_pod_container_name
        rule {
          source_labels = ["__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name"]
          action = "replace"
          target_label = "job"
          separator = "/"
          replacement = "$1"
        }

        // --- CORRECTED RULE FOR BOTTLE ROCKET LOG PATH ---
        // This rule now precisely constructs the __path__ for logs located in /var/log/containers/
        // using the pod name, namespace, container name, and the full container ID.
        rule {
          source_labels = ["__meta_kubernetes_pod_name", "__meta_kubernetes_namespace", "__meta_kubernetes_pod_container_name", "__meta_kubernetes_pod_container_id"]
          action = "replace"
          target_label = "__path__"
          separator = ";" // Using a separator unlikely to be in labels
          // Regex captures the four parts separated by ';'
          regex = "^(.+);(.+);(.+);(.+)$"
          // Replacement constructs the /var/log/containers/ path
          replacement = "/var/log/containers/$1_$2_$3-$4.log"
        }
        // --- END CORRECTED RULE ---

        // Label creation -  "container_runtime" field from "__meta_kubernetes_pod_container_id"
        rule {
          source_labels = ["__meta_kubernetes_pod_container_id"]
          action = "replace"
          target_label = "container_runtime"
          regex = "^(\\S+):\\/\\/.+$"
          replacement = "$1"
        }
      }

      loki.source.kubernetes "pod_logs" {
        targets    = discovery.relabel.pod_logs.output
        forward_to = [loki.process.pod_logs.receiver]
      }

      loki.process "pod_logs" {
        stage.static_labels {
            values = {
              cluster = "prod-eks",
            }
        }
        forward_to = [loki.write.endpoint.receiver]
      }

      loki.source.kubernetes_events "cluster_events" {
        job_name   = "integrations/kubernetes/eventhandler"
        log_format = "logfmt"
        forward_to = [loki.process.cluster_events.receiver]
      }

      loki.process "cluster_events" {
        forward_to = [loki.write.endpoint.receiver]

        stage.static_labels {
          values = {
            cluster = "prod-eks",
          }
        }

        stage.labels {
          values = {
            kubernetes_cluster_events = "job",
          }
        }
      }

      loki.write "endpoint" {
        endpoint {
            url = "http://loki-distributor.logging.svc.cluster.local:3100/loki/api/v1/push"
            tenant_id = "fake"
        }
      }

Seams like reject_old_samples: false doesn’t solve errors like ‘entry too far behind, entry timestamp is: 2025-07-06T00:24:09Z, oldest acceptable timestamp is: 2025-07-09T18:26:02Z’