I have a containerized observability stack on EKS, with Grafana, Loki, Tempo, and Prometheus. When upgrading Tempo from 2.3 to 2.6.1, the Tempo pods upload without issues, but the tempo-query-frontend pod only uploads the tempo-query container; only the query-frontend pod. Therefore, the pod crashes in CrashLoopBackOff (1/2).
I thought the error was associated with the storage block version, because Tempo 2.2.3 used vParquet, and according to the documentation, Tempo 2.6 and later uses vParquet4. However, this did not resolve the following error:
flag provided but not defined: -http.base-path
Usage of /tempo-query:
-config string
A path to the plugin’s configuration file
NOTE: It’s important to mention that I also updated the flag value from <query.base-path> to <http.base-path>, and after this change, we still see the message that this value is not defined.
ExampleManifetsTEMPO:
apiVersion: v1
kind: ConfigMap
metadata:
name: tempo-config
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
data:
tempo-query.yaml: |
backend: 127.0.0.1:3100
overrides.yaml: |
overrides: {}
tempo.yaml: |
compactor:
compaction:
block_retention: {{ .Values.global.tracing.retention }}
compacted_block_retention: 30m
compaction_cycle: 30s
compaction_window: 30m
max_block_bytes: 107374182400
max_compaction_objects: 6000000
max_time_per_tenant: 5m
retention_concurrency: 10
# v2_in_buffer_bytes: 5242880
# v2_out_buffer_bytes: 20971520
# v2_prefetch_traces_count: 1000
ring:
kvstore:
store: memberlist
distributor:
receivers:
otlp:
protocols:
http:
endpoint: 0.0.0.0:55681
grpc:
endpoint: 0.0.0.0:4317
ring:
kvstore:
store: memberlist
ingester:
lifecycler:
ring:
kvstore:
store: memberlist
replication_factor: 3
tokens_file_path: /var/tempo/tokens.json
complete_block_timeout: 30m
memberlist:
abort_if_cluster_join_fails: false
join_members:
- tempo-gossip-ring
multitenancy_enabled: false
overrides:
metrics_generator_processors:
- service-graphs
- span-metrics
per_tenant_override_config: /conf/overrides.yaml
max_bytes_per_trace: 10000000
max_bytes_per_tag_values_query: 10000000
querier:
frontend_worker:
frontend_address: tempo-query-frontend-discovery.{{ .Values.global.namespace }}.svc.cluster.local:9095
max_concurrent_queries: 50
search:
external_endpoints: []
external_hedge_requests_at: 8s
external_hedge_requests_up_to: 2
prefer_self: 10
query_timeout: 30s
trace_by_id:
query_timeout: 10s
query_frontend:
max_retries: 2
search:
concurrent_jobs: 1000
target_bytes_per_job: 104857600
trace_by_id:
query_shards: 50
server:
grpc_server_max_recv_msg_size: 8e+06
grpc_server_max_send_msg_size: 8e+06
http_listen_port: 3100
http_server_read_timeout: 30s
http_server_write_timeout: 30s
log_format: logfmt
log_level: info
storage:
trace:
backend: s3
s3:
bucket: {{ .Values.global.tracing.bucket }}-{{ .Values.global.monitoring.clusterName }}
endpoint: s3.us-east-1.amazonaws.com
insecure: true
block:
version: vParquet4
blocklist_poll: 10m
blocklist_poll_tenant_index_builders: 3
cache: memcached
memcached:
consistent_hash: true
host: tempo-memcached
service: memcached-client
timeout: 500ms
wal:
path: /var/tempo/wal
usage_report:
reporting_enabled: true
#Services
apiVersion: v1
kind: Service
metadata:
name: tempo-compactor
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: compactor
spec:
type: ClusterIP
ports:
- name: http-metrics
port: 3100
targetPort: http
protocol: TCP
selector:
app.kubernetes.io/component: compactor
app.kubernetes.io/instance: tempo
app.kubernetes.io/name: tempo
apiVersion: v1
kind: Service
metadata:
name: tempo-distributor
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: distributor
spec:
type: ClusterIP
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: http
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
- name: distributor-otlp-http
port: 55681
protocol: TCP
targetPort: otlp-http
- name: distributor-otlp-grpc
port: 4317
appProtocol: tcp
targetPort: otlp-grpc
- name: distributor-otlp-legacy
port: 55680
appProtocol: tcp
targetPort: otlp-grpc
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: distributor
apiVersion: v1
kind: Service
metadata:
name: tempo-distributor-discovery
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: distributor
spec:
type: ClusterIP
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: http-metrics
- name: distributor-otlp-http
port: 55681
protocol: TCP
targetPort: otlp-http
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: distributor
apiVersion: v1
kind: Service
metadata:
name: tempo-ingester
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: ingester
spec:
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: 3100
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: ingester
apiVersion: v1
kind: Service
metadata:
name: tempo-ingester-discovery
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: ingester
spec:
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: 3100
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: ingester
apiVersion: v1
kind: Service
metadata:
name: tempo-memcached
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: memcached
spec:
ports:
- name: memcached-client
port: 11211
protocol: TCP
targetPort: 11211
- name: http-metrics
port: 9150
protocol: TCP
targetPort: http-metrics
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: memcached
apiVersion: v1
kind: Service
metadata:
name: tempo-querier
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: querier
spec:
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: 3100
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: querier
apiVersion: v1
kind: Service
metadata:
name: tempo-query-frontend
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: query-frontend
spec:
type: ClusterIP
ports:
- name: http-metrics
port: 3100
protocol: TCP
targetPort: 3100
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
- name: tempo-query-jaeger-ui
port: 16686
targetPort: 16686
- name: tempo-query-jaeger-grpc
port: 16685
targetPort: 16685
appProtocol: tcp
- name: tempo-query-metrics
port: 16687
targetPort: jaeger-metrics
publishNotReadyAddresses: true
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: query-frontend
apiVersion: v1
kind: Service
metadata:
name: tempo-query-frontend-discovery
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: query-frontend
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: 3100
protocol: TCP
targetPort: 3100
- name: grpc
port: 9095
appProtocol: tcp
targetPort: 9095
- name: grpclb
port: 9096
appProtocol: tcp
targetPort: grpc
- name: tempo-query-jaeger-ui
port: 16686
targetPort: 16686
- name: tempo-query-jaeger-grpc
port: 16685
targetPort: 16685
appProtocol: tcp
- name: tempo-query-metrics
port: 16687
targetPort: jaeger-metrics
selector:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: query-frontend
apiVersion: v1
kind: Service
metadata:
name: tempo-gossip-ring
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/component: gossip-ring
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
spec:
type: ClusterIP
clusterIP: None
ports:
- name: gossip-ring
port: 7946
protocol: TCP
targetPort: http-memberlist
selector:
app.kubernetes.io/instance: tempo
app.kubernetes.io/name: tempo
app.kubernetes.io/part-of: memberlist
#Deployments-StatefulSet
apiVersion: apps/v1
kind: Deployment
metadata:
name: tempo-compactor
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: compactor
app.kubernetes.io/part-of: memberlist
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
minReadySeconds: 10
progressDeadlineSeconds: 600
replicas: 1
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: compactor
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: compactor
app.kubernetes.io/part-of: memberlist
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- args:
- -target=compactor
- -config.file=/conf/tempo.yaml
- -mem-ballast-size-mbs=1024
image: grafana-tempo-2.6.1
imagePullPolicy: IfNotPresent
name: compactor
ports:
- containerPort: 3100
name: http-metrics
protocol: TCP
- containerPort: 7946
name: http-memberlist
protocol: TCP
resources:
limits:
cpu: “5000m”
memory: “10000Mi”
requests:
cpu: “10m”
memory: “100Mi”
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: tempo-compactor-store
terminationGracePeriodSeconds: 30
volumes:
- configMap:
defaultMode: 420
items:
- key: tempo.yaml
path: tempo.yaml
- key: overrides.yaml
path: overrides.yaml
name: tempo-config
name: config
- emptyDir: {}
name: tempo-compactor-store
apiVersion: apps/v1
kind: Deployment
metadata:
name: tempo-distributor
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: distributor
app.kubernetes.io/part-of: memberlist
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
minReadySeconds: 10
progressDeadlineSeconds: 600
replicas: 1
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: distributor
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: distributor
app.kubernetes.io/part-of: memberlist
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- args:
- -target=distributor
- -config.file=/conf/tempo.yaml
- -mem-ballast-size-mbs=1024
image: grafana-tempo-2.6.1
imagePullPolicy: IfNotPresent
name: distributor
ports:
- containerPort: 7946
name: http-memberlist
protocol: TCP
- containerPort: 3100
name: http-metrics
protocol: TCP
- containerPort: 55681
name: otlp-http
protocol: TCP
- containerPort: 4317
name: otlp-grpc
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: http-metrics
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 4000m
memory: 6000Mi
requests:
cpu: 10m
memory: 500Mi
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: tempo-distributor-store
terminationGracePeriodSeconds: 30
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: distributor
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: distributor
topologyKey: failure-domain.beta.kubernetes.io/zone
volumes:
- configMap:
defaultMode: 420
items:
- key: tempo.yaml
path: tempo.yaml
- key: overrides.yaml
path: overrides.yaml
name: tempo-config
name: config
- emptyDir: {}
name: tempo-distributor-store
apiVersion: apps/v1
kind: Deployment
metadata:
name: tempo-querier
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: querier
app.kubernetes.io/part-of: memberlist
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
minReadySeconds: 10
progressDeadlineSeconds: 600
replicas: 1
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: querier
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: querier
app.kubernetes.io/part-of: memberlist
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- args:
- -target=querier
- -config.file=/conf/tempo.yaml
- -mem-ballast-size-mbs=1024
image: grafana-tempo-2.6.1
imagePullPolicy: IfNotPresent
name: querier
ports:
- containerPort: 7946
name: http-memberlist
protocol: TCP
- containerPort: 3100
name: http-metrics
protocol: TCP
resources:
limits:
cpu: 3000m
memory: 3000Mi
requests:
cpu: 10m
memory: 100Mi
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: tempo-querier-store
terminationGracePeriodSeconds: 30
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: querier
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: querier
topologyKey: failure-domain.beta.kubernetes.io/zone
volumes:
- configMap:
defaultMode: 420
items:
- key: tempo.yaml
path: tempo.yaml
- key: overrides.yaml
path: overrides.yaml
name: tempo-config
name: config
- emptyDir: {}
name: tempo-querier-store
apiVersion: apps/v1
kind: Deployment
metadata:
name: tempo-query-frontend
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.6.1”
app.kubernetes.io/component: query-frontend
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
minReadySeconds: 10
progressDeadlineSeconds: 600
replicas: 1
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: query-frontend
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.6.1”
app.kubernetes.io/component: query-frontend
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- args:
- -target=query-frontend
- -config.file=/conf/tempo.yaml
- -mem-ballast-size-mbs=1024
image: grafana-tempo-2.6.1
imagePullPolicy: IfNotPresent
name: query-frontend
ports:
- containerPort: 3100
name: http-metrics
protocol: TCP
- containerPort: 9095
name: grpc
resources:
limits:
cpu: 3000m
memory: 3000Mi
requests:
cpu: 10m
memory: 100Mi
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: tempo-queryfrontend-store
- args:
- --http.base-path={{ .Values.global.tracing.jaeger.path | default “/” }}
- --grpc-storage-plugin.configuration-file=/conf/tempo-query.yaml
- --query.bearer-token-propagation=true
image: grafana-tempo-query-2.6.1
imagePullPolicy: IfNotPresent
name: tempo-query
ports:
- containerPort: 16686
name: jaeger-ui
- containerPort: 16685
name: jaeger-ui-grpc
- containerPort: 16687
name: jaeger-metrics
resources:
limits:
cpu: 3000m
memory: 3000Mi
requests:
cpu: 10m
memory: 50Mi
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: tempo-queryfrontend-store
terminationGracePeriodSeconds: 30
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: query-frontend
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: query-frontend
topologyKey: failure-domain.beta.kubernetes.io/zone
volumes:
- configMap:
defaultMode: 420
items:
- key: tempo.yaml
path: tempo.yaml
- key: tempo-query.yaml
path: tempo-query.yaml
- key: overrides.yaml
path: overrides.yaml
name: tempo-config
name: config
- emptyDir: {}
name: tempo-queryfrontend-store
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: tempo-ingester
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: ingester
app.kubernetes.io/part-of: memberlist
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
podManagementPolicy: Parallel
replicas: 3
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: ingester
serviceName: ingester
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: ingester
app.kubernetes.io/part-of: memberlist
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- args:
- -target=ingester
- -config.file=/conf/tempo.yaml
- -mem-ballast-size-mbs=1024
image: grafana-tempo-2.6.1
imagePullPolicy: IfNotPresent
name: ingester
ports:
- name: grpc
containerPort: 9095
- name: http-memberlist
containerPort: 7946
protocol: TCP
- name: http-metrics
containerPort: 3100
protocol: TCP
readinessProbe:
failureThreshold: 3
httpGet:
path: /ready
port: http-metrics
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 5000m
memory: 15000Mi
requests:
cpu: 10m
memory: 500Mi
volumeMounts:
- mountPath: /conf
name: config
- mountPath: /var/tempo
name: data
terminationGracePeriodSeconds: 300
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: ingester
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: ingester
topologyKey: failure-domain.beta.kubernetes.io/zone
volumes:
- configMap:
defaultMode: 420
items:
- key: tempo.yaml
path: tempo.yaml
- key: overrides.yaml
path: overrides.yaml
name: tempo-config
name: config
- emptyDir: {}
name: data
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: tempo-memcached
namespace: {{ .Values.global.namespace }}
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: memcached
annotations:
configmap.reloader.stakater.com/reload: “tempo-config”
spec:
podManagementPolicy: OrderedReady
replicas: 1
renamespaceHistoryLimit: 10
selector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: memcached
serviceName: memcached
template:
metadata:
labels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/version: “2.0.1”
app.kubernetes.io/component: memcached
annotations:
fluentbit.io/exclude: “true”
spec:
serviceAccountName: {{ .Values.global.monitoring.servicesAccount }}
containers:
- image: bitnami-memcached-1.6.30
imagePullPolicy: IfNotPresent
name: memcached
ports:
- containerPort: 11211
name: client
protocol: TCP
resources:
limits:
cpu: 3000m
memory: 3000Mi
requests:
cpu: 10m
memory: 30Mi
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: memcached
topologyKey: kubernetes.io/hostname
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: tempo
app.kubernetes.io/instance: tempo
app.kubernetes.io/component: memcached
topologyKey: failure-domain.beta.kubernetes.io/zone
updateStrategy:
type: RollingUpdate
#VirtualService
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: tracing-jaeger-vs
namespace: {{ .Values.global.namespace }}
spec:
hosts:
- ‘{{ .Values.global.dns }}.{{ .Values.global.domain }}’
gateways:
- {{ .Values.global.namespace }}/namespace-gw
http:
- match:
- uri:
prefix: {{ .Values.global.tracing.jaeger.path }}/
rewrite:
uri: {{ .Values.global.tracing.jaeger.path }}/
route:
- destination:
host: tempo-query-frontend.{{ .Values.global.namespace }}.svc.cluster.local
port:
number: 16686