Data retention for 30 days in grafana tempo do not work

Hello,
I have tempo-distributed 2.5.0 deployed using helm chart(tempo-distributed-1.15.2) wishing I had 30 days retention, for a while I’ve been looking through the documentation and related threads and can only get at most 1h 30 min.
I leave the configuration with you hoping you can help me.

tempo.yaml | 
 cache:
     caches:
      - memcached:
          consistent_hash: true
          host: 'tempo-memcached'
          service: memcached-client
          timeout: 500ms
        roles:
        - parquet-footer
        - bloom
        - frontend-search
    compactor:
      compaction:
        block_retention: 720h
        compacted_block_retention: 1h
        compaction_cycle: 30s
        compaction_window: 1h
        max_block_bytes: 107374182400
        max_compaction_objects: 6000000
        max_time_per_tenant: 5m
        retention_concurrency: 10
        v2_in_buffer_bytes: 5242880
        v2_out_buffer_bytes: 20971520
        v2_prefetch_traces_count: 1000
      ring:
        kvstore:
          store: memberlist
    distributor:
      log_received_spans:
        enabled: true
        filter_by_status_error: true
        include_all_attributes: true
      receivers:
        otlp:
          protocols:
            grpc:
              endpoint: 0.0.0.0:4317
            http:
              endpoint: 0.0.0.0:4318
      ring:
        kvstore:
          store: memberlist
    ingester:
      complete_block_timeout: 720h
      flush_all_on_shutdown: true
      lifecycler:
        ring:
          kvstore:
            store: memberlist
          replication_factor: 3
        tokens_file_path: /var/tempo/tokens.json
    memberlist:
      abort_if_cluster_join_fails: false
      bind_addr: []
      bind_port: 7946
      gossip_interval: 1s
      gossip_nodes: 2
      gossip_to_dead_nodes_time: 30s
      join_members:
      - dns+tempo-gossip-ring:7946
      leave_timeout: 5s
      left_ingesters_timeout: 5m
      max_join_backoff: 1m
      max_join_retries: 10
      min_join_backoff: 1s
      node_name: ""
      packet_dial_timeout: 5s
      packet_write_timeout: 5s
      pull_push_interval: 30s
      randomize_node_name: true
      rejoin_interval: 0s
      retransmit_factor: 2
      stream_timeout: 10s
    metrics_generator:
      metrics_ingestion_time_range_slack: 30s
      processor:
        service_graphs:
          dimensions: []
          histogram_buckets:
          - 0.1
          - 0.2
          - 0.4
          - 0.8
          - 1.6
          - 3.2
          - 6.4
          - 12.8
          max_items: 10000
          wait: 10s
          workers: 10
        span_metrics:
          dimensions: []
          histogram_buckets:
          - 0.002
          - 0.004
          - 0.008
          - 0.016
          - 0.032
          - 0.064
          - 0.128
          - 0.256
          - 0.512
          - 1.02
          - 2.05
          - 4.1
      registry:
        collection_interval: 15s
        external_labels: {}
        stale_duration: 15m
      ring:
        kvstore:
          store: memberlist
      storage:
        path: /var/tempo/wal
        remote_write: []
        remote_write_add_org_id_header: true
        remote_write_flush_deadline: 1m
      traces_storage:
        path: /var/tempo/traces
    multitenancy_enabled: false
    overrides:
      max_traces_per_user: 50000
      per_tenant_override_config: /runtime-config/overrides.yaml
    querier:
      frontend_worker:
        frontend_address: tempo-query-frontend-discovery:9095
        grpc_client_config:
          max_recv_msg_size: 4194304000000
          max_send_msg_size: 4194304000000
      max_concurrent_queries: 20
      search:
        external_backend: null
        external_endpoints: []
        external_hedge_requests_at: 8s
        external_hedge_requests_up_to: 2
        prefer_self: 10
        query_timeout: 45s
      trace_by_id:
        query_timeout: 10s
    query_frontend:
      max_outstanding_per_tenant: 2000
      max_retries: 2
      metrics:
        max_duration: 720h
      search:
        concurrent_jobs: 1000
        max_duration: 720h
        target_bytes_per_job: 104857600000
      trace_by_id:
        query_shards: 50
    server:
      grpc_server_max_recv_msg_size: 4194304000000
      grpc_server_max_send_msg_size: 4194304000000
      http_listen_port: 3100
      http_server_read_timeout: 30s
      http_server_write_timeout: 30s
      log_format: logfmt
      log_level: info
    storage:
      trace:
        backend: s3
        blocklist_poll: 5m
        pool:
          max_workers: 400
          queue_depth: 20000
        s3:
          access_key: XXXXXXXX
          bucket: bucket-name
          endpoint: s3-endpoint.url
          insecure: true
          secret_key: XXXXXXXXXXX
        wal:
          path: /var/tempo/wal
    usage_report:
      reporting_enabled: true