Does tempo distributed metrics_generator need access to ingester wal or blob storage?

not seeing traces_spanmetrics_latency from metrics it’s sending. my current config:
{{ if .Values.tempo.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: tempo-gcs-ksa
namespace: monitoring
annotations:
iam.gke.io/gcp-service-account: tempo-gcs-serviceaccount@samaya-prod-403612.iam.gserviceaccount.com

apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: tempo
namespace: argocd
spec:
project: default
source:
repoURL: Grafana Community Kubernetes Helm Charts | helm-charts
chart: tempo-distributed
targetRevision: {{ .Values.tempo.targetRevision }}
helm:
releaseName: tempo-distributed
values: |
service:
type: ClusterIP
serviceAccount:
create: false
name: tempo-gcs-ksa
config: |
storage:
trace:
backend: gcs
gcs:
bucket_name: {{ .Values.tempo.bucketName }}
querier:
frontend_worker:
frontend_address: tempo-distributed-query-frontend.monitoring.svc.cluster.local:9095
server:
http_listen_port: 3100
distributor:
ring:
kvstore:
store: memberlist
receivers:
otlp:
protocols:
grpc:
http:
ingester:
lifecycler:
ring:
replication_factor: 1
kvstore:
store: memberlist
metrics_generator:
storage:
path: /var/tempo/wal
processor:
span_metrics:
dimensions: [“service”, “span_name”, “span_kind”, “status_code”, “status_message”]
histogram_buckets: [0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5]
local_blocks:
flush_to_storage: true
memberlist:
join_members:
- dns+tempo-distributed-gossip-ring:7946
overrides:
defaults:
metrics_generator:
processors:
- span-metrics
- service-graphs
- local-blocks
traces:
otlp:
grpc:
enabled: true
http:
enabled: true
ingester:
replicas: 8
config:
replication_factor: 1
resources:
requests:
cpu: “0.1”
memory: “1.5Gi”
limits:
cpu: “0.1”
memory: “1.5Gi”
extraVolumeMounts:
- name: tempo-wal
mountPath: /var/tempo/wal
extraVolumes:
- name: tempo-wal
persistentVolumeClaim:
claimName: tempo-wal
distributor:
resources:
requests:
cpu: “0.1”
memory: “0.3Gi”
limits:
cpu: “0.2”
memory: “0.4Gi”
compactor:
resources:
requests:
cpu: “0.3”
memory: “1Gi”
limits:
cpu: “0.4”
memory: “1Gi”
config:
compaction:
compaction_window: 30m
max_block_bytes: 573741824
querier:
resources:
requests:
cpu: “0.2”
memory: “0.4Gi”
limits:
cpu: “0.3”
memory: “0.5Gi”
metricsGenerator:
enabled: true
podLabels:
scrape: “true”
extraVolumeMounts:
- name: tempo-wal
mountPath: /var/tempo/wal
extraVolumes:
- name: tempo-wal
persistentVolumeClaim:
claimName: tempo-wal
destination:
server: ‘https://kubernetes.default.svc
namespace: monitoring
syncPolicy:
automated:
prune: true
selfHeal: true

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: tempo-wal
namespace: monitoring
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
{{ end }}