Can query the data in WAL, but not in s3 backend

I have deployed distributed tempo with s3 backend storage. At first, I can query the data via Grafana because the data is stored in WAL, but when reaching the complete_block_timeout, theoretically, I can still query the data because queriers will retrieve data from s3 backend storage, but it didn’t. Here is my configuration of the Tempo:

cache:
  caches:
  - memcached:
      consistent_hash: true
      host: 'distributed-tempo-memcached'
      service: memcached-client
      timeout: 500ms
    roles:
    - parquet-footer
    - bloom
    - frontend-search
compactor:
  compaction:
    block_retention: 15m
    compacted_block_retention: 5m
    compaction_cycle: 30s
    compaction_window: 5m
    max_block_bytes: 107374182400
    max_compaction_objects: 6000000
    max_time_per_tenant: 5m
    retention_concurrency: 10
    v2_in_buffer_bytes: 5242880
    v2_out_buffer_bytes: 20971520
    v2_prefetch_traces_count: 1000
  ring:
    kvstore:
      store: memberlist
distributor:
  receivers:
    otlp:
      protocols:
        grpc:
          endpoint: 0.0.0.0:4317
  ring:
    kvstore:
      store: memberlist
ingester:
  complete_block_timeout: 10m
  flush_check_period: 10s
  lifecycler:
    ring:
      kvstore:
        store: memberlist
      replication_factor: 3
    tokens_file_path: /var/tempo/tokens.json
  max_block_bytes: 524288000
  max_block_duration: 5m
  trace_idle_period: 10s
memberlist:
  abort_if_cluster_join_fails: false
  bind_addr: []
  bind_port: 7946
  cluster_label: 'distributed-tempo.tracing'
  gossip_interval: 1s
  gossip_nodes: 2
  gossip_to_dead_nodes_time: 30s
  join_members:
  - dns+distributed-tempo-gossip-ring:7946
  leave_timeout: 5s
  left_ingesters_timeout: 5m
  max_join_backoff: 1m
  max_join_retries: 10
  min_join_backoff: 1s
  node_name: ""
  packet_dial_timeout: 5s
  packet_write_timeout: 5s
  pull_push_interval: 30s
  randomize_node_name: true
  rejoin_interval: 0s
  retransmit_factor: 2
  stream_timeout: 10s
multitenancy_enabled: false
overrides:
  defaults: {}
  per_tenant_override_config: /runtime-config/overrides.yaml
querier:
  frontend_worker:
    frontend_address: distributed-tempo-query-frontend-discovery:9095
  max_concurrent_queries: 20
  search:
    query_timeout: 30s
  trace_by_id:
    query_timeout: 10s
query_frontend:
  max_outstanding_per_tenant: 2000
  max_retries: 2
  metrics:
    concurrent_jobs: 1000
    duration_slo: 0s
    interval: 5m
    max_duration: 3h
    query_backend_after: 5s
    target_bytes_per_job: 104857600
    throughput_bytes_slo: 0
  search:
    concurrent_jobs: 1000
    target_bytes_per_job: 104857600
  trace_by_id:
    query_shards: 50
server:
  grpc_server_max_recv_msg_size: 4194304
  grpc_server_max_send_msg_size: 4194304
  http_listen_port: 3100
  http_server_read_timeout: 30s
  http_server_write_timeout: 30s
  log_format: logfmt
  log_level: info
storage:
  trace:
    backend: s3
    blocklist_poll: 5m
    local:
      path: /var/tempo/traces
    pool:
      max_workers: 400
      queue_depth: 20000
    s3:
      access_key: xxxx
      bucket: bucket-test-tempo
      endpoint: s3.amazonaws.com
      insecure: false
      part_size: 67108864
      region: ap-southeast-1
      secret_key: xxxxx
    search:
      prefetch_trace_count: 1000
    wal:
      path: /var/tempo/wal
usage_report:
  reporting_enabled: true