Team, I am trying to do migration from loki-dist to loki. My loki has joined the loki-dist cluster and promtail is configured to send logs to both old and new client.
But some how I am not able to see logs from old loki using new data source. The old storage was bolt-shipper and I am trying to use tsdb in new. Below is my configuration
auth_enabled: false
podAnnotations:
sidecar.istio.io/inject: "false"
commonConfig:
replication_factor: 1
# path_prefix: /var/loki
limits_config:
# max_query_parallelism by the formula:
# [queriers count] * [max_concurrent] >= [max_query_parallelism]
# tsdb_max_query_parallelism: 64 # Default 128
ingestion_rate_mb: 20
ingestion_burst_size_mb: 30
retention_period: 720h
schemaConfig:
configs:
- from: "2020-09-07"
store: boltdb-shipper
object_store: gcs
schema: v11
index:
prefix: loki_index_
period: 24h
- from: 2025-08-14
store: tsdb
object_store: gcs
schema: v13
index:
prefix: index_
period: 24h
storage:
type: gcs
gcs: {}
bucketNames:
chunks: sandbox-sandbox-london-b-loki
ruler: sandbox-sandbox-london-b-loki
admin: sandbox-sandbox-london-b-loki
storage_config:
gcs:
bucket_name: sandbox-sandbox-london-b-loki
boltdb_shipper:
active_index_directory: /var/loki/boltdb-shipper-active
cache_location: /var/loki/boltdb-shipper-cache
tsdb_shipper:
active_index_directory: /var/loki/tsdb-index
cache_location: /var/loki/tsdb-cache
# storage_config:
# gcs:
# bucketnames: -sandbox-sandbox-london-b-loki
# boltdb_shipper:
# active_index_directory: /var/loki/index
# cache_location: /var/loki/cache
# cache_ttl: 168h
# index_gateway_client:
# server_address: dns:///loki-loki-distributed-index-gateway:9095
# shared_store: gcs
# tsdb_shipper:
# active_index_directory: /var/loki/tsdb-index
# cache_location: /var/loki/tsdb-cache
# cache_ttl: 168h
# index_gateway_client:
# server_address: dns:///loki-gateway:9095
# shared_store: gcs
# storage_config:
# tsdb_shipper:
# active_index_directory: /data/tsdb-index
# cache_location: /data/tsdb-cache
# storage_config:
# gcs:
# bucket_name: -sandbox-sandbox-london-b-loki
# boltdb_shipper:
# shared_store: gcs
# active_index_directory: /var/loki/boltdb-shipper-active
# cache_location: /var/loki/boltdb-shipper-cache
# index_gateway_client:
# server_address: dns:///loki-index-gateway:9095
# tsdb_shipper:
# active_index_directory: /var/loki/tsdb-index
# cache_location: /var/loki/tsdb-cache
# index_gateway_client:
# server_address: dns:///loki-index-gateway:9095
# storage_config:
# tsdb_shipper:
# active_index_directory: /var/lib/loki/tsdb-index
# cache_location: /var/lib/loki/tsdb-cache
ingester:
chunk_encoding: snappy
server:
grpc_server_max_recv_msg_size: 104857600 # 100 Mb
grpc_server_max_send_msg_size: 104857600 # 100 Mb
querier:
max_concurrent: 8
deploymentMode: SimpleScalable
serviceAccount:
annotations:
iam.gke.io/gcp-service-account: sandbox-london-b-loki@-sandbox.iam.gserviceaccount.com
gateway:
replicas: 2
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 256Mi
service:
labels:
prometheus.io/service-monitor: "false"
annotations:
tailscale.com/expose: "true"
tailscale.com/hostname: "loki-sandbox-london-b"
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/component: gateway
backend:
# podAnnotations:
# sidecar.istio.io/inject: "false"
# podLabels:
# sidecar.istio.io/inject: "false"
replicas: 2
resources:
requests:
cpu: 200m
memory: 250Mi
limits:
cpu: 400m
memory: 500Mi
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/component: backend
read:
autoscaling:
enabled: true
minReplicas: 5
maxReplicas: 20
targetCPUUtilizationPercentage: 60
resources:
requests:
cpu: 300m
memory: 1000Mi
limits:
cpu: 600m
memory: 3000Mi
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: []
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/component: read
write:
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10
targetCPUUtilizationPercentage: 60
resources:
requests:
cpu: 200m
memory: 500Mi
limits:
cpu: 300m
memory: 1000Mi
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution: []
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
topologySpreadConstraints:
- maxSkew: 1
topologyKey: "kubernetes.io/hostname"
whenUnsatisfiable: ScheduleAnyway
labelSelector:
matchLabels:
app.kubernetes.io/name: loki
app.kubernetes.io/component: write
chunksCache:
enabled: true
# resources:
# requests:
# cpu: 100m
# memory: 500Mi
# limits:
# cpu: 200m
# memory: 1000Mi
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
resultsCache:
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 200m
memory: 500Mi
# tolerations:
# - key: "dedicated"
# operator: "Equal"
# value: "observability"
# effect: "NoExecute"
# affinity:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: app
# operator: In
# values:
# - observability
# TODO: this method is deprecated and will be replaced with other solution
monitoring:
dashboards:
enabled: false
rules:
enabled: true
labels:
release: monitoring
serviceMonitor:
enabled: true
labels:
release: monitoring
selfMonitoring:
enabled: false
grafanaAgent:
installOperator: false
# Enable minio for storage
minio:
enabled: false
# Zero out replica counts of other deployment modes
singleBinary:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
test:
enabled: false
lokiCanary:
enabled: false
migrate:
fromDistributed:
enabled: true
memberlistService: loki-loki-distributed-memberlist```