"empty ring" error with memberlist

I have created grafana/loki using istio and deployed in openshift.

When i was trying to test with promtail i was facing below error

level=warn ts=2021-08-13T10:42:42.205933Z caller=client.go:344 component=client host=api.*.appdomain.cloud msg=“error sending batch, will retry” status=500 error="server returned HTTP status 500 Internal Server Error (500): empty ring"

Please find the attached configmap.yml

apiVersion: v1
data:
  config.yaml: |-
    "auth_enabled": true
    "chunk_store_config":
      "max_look_back_period": "0s"
    "compactor":
      "compaction_interval": "2h"
      "shared_store": "s3"
      "working_directory": "/data/loki/compactor"
    "distributor":
      "ring":
        "kvstore":
          "store": "memberlist"
    "frontend":
      "compress_responses": true
      "max_outstanding_per_tenant": 200
    "frontend_worker":
      "frontend_address": "observatorium-xyz-loki-query-frontend-grpc.observatorium.svc.cluster.local:9095"
      "grpc_client_config":
        "max_send_msg_size": 104857600
      "parallelism": 32
    "ingester":
      "chunk_block_size": 262144
      "chunk_encoding": "snappy"
      "chunk_idle_period": "2h"
      "chunk_retain_period": "1m"
      "chunk_target_size": 1572864
      "lifecycler":
        "heartbeat_period": "5s"
        "interface_names":
        - "eth0"
        "join_after": "60s"
        "num_tokens": 512
        "ring":
          "heartbeat_timeout": "1m"
          "kvstore":
            "store": "memberlist"
          "replication_factor": 3
      "max_transfer_retries": 0
    "ingester_client":
      "grpc_client_config":
        "max_recv_msg_size": 67108864
      "remote_timeout": "1s"
    "limits_config":
      "enforce_metric_name": false
      "ingestion_burst_size_mb": 20
      "ingestion_rate_mb": 10
      "ingestion_rate_strategy": "global"
      "max_cache_freshness_per_query": "10m"
      "max_global_streams_per_user": 10000
      "max_query_length": "12000h"
      "max_query_parallelism": 32
      "max_streams_per_user": 0
      "reject_old_samples": true
      "reject_old_samples_max_age": "24h"
    "memberlist":
      "abort_if_cluster_join_fails": false
      "bind_port": 7946
      "join_members":
      - "observatorium-xyz-loki-gossip-ring.observatorium.svc.cluster.local:7946"
      "max_join_backoff": "1m"
      "max_join_retries": 10
      "min_join_backoff": "1s"
    "querier":
      "engine":
        "max_look_back_period": "5m"
        "timeout": "3m"
      "extra_query_delay": "0s"
      "query_ingesters_within": "2h"
      "query_timeout": "1h"
      "tail_max_duration": "1h"
    "query_range":
      "align_queries_with_step": true
      "cache_results": true
      "max_retries": 5
      "split_queries_by_interval": "30m"
    "schema_config":
      "configs":
      - "from": "2020-10-01"
        "index":
          "period": "24h"
          "prefix": "loki_index_"
        "object_store": "s3"
        "schema": "v11"
        "store": "boltdb-shipper"
    "server":
      "graceful_shutdown_timeout": "5s"
      "grpc_server_max_concurrent_streams": 1000
      "grpc_server_max_recv_msg_size": 104857600
      "grpc_server_max_send_msg_size": 104857600
      "http_listen_port": 3100
      "http_server_idle_timeout": "120s"
      "http_server_write_timeout": "1m"
    "storage_config":
      "boltdb_shipper":
        "active_index_directory": "/data/loki/index"
        "cache_location": "/data/loki/index_cache"
        "cache_ttl": "24h"
        "resync_interval": "5m"
        "shared_store": "s3"
  overrides.yaml: '{}'
kind: ConfigMap
metadata:
  annotations:
    kubectl.kubernetes.io/last-applied-configuration: |
      {"apiVersion":"v1","data":{"config.yaml":"\"auth_enabled\": true\n\"chunk_store_config\":\n  \"max_look_back_period\": \"0s\"\n\"compactor\":\n  \"compaction_interval\": \"2h\"\n  \"shared_store\": \"s3\"\n  \"working_directory\": \"/data/loki/compactor\"\n\"distributor\":\n  \"ring\":\n    \"kvstore\":\n      \"store\": \"memberlist\"\n\"frontend\":\n  \"compress_responses\": true\n  \"max_outstanding_per_tenant\": 200\n\"frontend_worker\":\n  \"frontend_address\": \"observatorium-xyz-loki-query-frontend-grpc.observatorium.svc.cluster.local:9095\"\n  \"grpc_client_config\":\n    \"max_send_msg_size\": 104857600\n  \"parallelism\": 32\n\"ingester\":\n  \"chunk_block_size\": 262144\n  \"chunk_encoding\": \"snappy\"\n  \"chunk_idle_period\": \"2h\"\n  \"chunk_retain_period\": \"1m\"\n  \"chunk_target_size\": 1572864\n  \"lifecycler\":\n    \"heartbeat_period\": \"5s\"\n    \"interface_names\":\n    - \"eth0\"\n    \"join_after\": \"60s\"\n    \"num_tokens\": 512\n    \"ring\":\n      \"heartbeat_timeout\": \"1m\"\n      \"kvstore\":\n        \"store\": \"memberlist\"\n  \"max_transfer_retries\": 0\n\"ingester_client\":\n  \"grpc_client_config\":\n    \"max_recv_msg_size\": 67108864\n  \"remote_timeout\": \"1s\"\n\"limits_config\":\n  \"enforce_metric_name\": false\n  \"ingestion_burst_size_mb\": 20\n  \"ingestion_rate_mb\": 10\n  \"ingestion_rate_strategy\": \"global\"\n  \"max_cache_freshness_per_query\": \"10m\"\n  \"max_global_streams_per_user\": 10000\n  \"max_query_length\": \"12000h\"\n  \"max_query_parallelism\": 32\n  \"max_streams_per_user\": 0\n  \"reject_old_samples\": true\n  \"reject_old_samples_max_age\": \"24h\"\n\"memberlist\":\n  \"abort_if_cluster_join_fails\": false\n  \"bind_port\": 7946\n  \"join_members\":\n  - \"observatorium-xyz-loki-gossip-ring.observatorium.svc.cluster.local:7946\"\n  \"max_join_backoff\": \"1m\"\n  \"max_join_retries\": 10\n  \"min_join_backoff\": \"1s\"\n\"querier\":\n  \"engine\":\n    \"max_look_back_period\": \"5m\"\n    \"timeout\": \"3m\"\n  \"extra_query_delay\": \"0s\"\n  \"query_ingesters_within\": \"2h\"\n  \"query_timeout\": \"1h\"\n  \"tail_max_duration\": \"1h\"\n\"query_range\":\n  \"align_queries_with_step\": true\n  \"cache_results\": true\n  \"max_retries\": 5\n  \"split_queries_by_interval\": \"30m\"\n\"schema_config\":\n  \"configs\":\n  - \"from\": \"2020-10-01\"\n    \"index\":\n      \"period\": \"24h\"\n      \"prefix\": \"loki_index_\"\n    \"object_store\": \"s3\"\n    \"schema\": \"v11\"\n    \"store\": \"boltdb-shipper\"\n\"server\":\n  \"graceful_shutdown_timeout\": \"5s\"\n  \"grpc_server_max_concurrent_streams\": 1000\n  \"grpc_server_max_recv_msg_size\": 104857600\n  \"grpc_server_max_send_msg_size\": 104857600\n  \"http_listen_port\": 3100\n  \"http_server_idle_timeout\": \"120s\"\n  \"http_server_write_timeout\": \"1m\"\n\"storage_config\":\n  \"boltdb_shipper\":\n    \"active_index_directory\": \"/data/loki/index\"\n    \"cache_location\": \"/data/loki/index_cache\"\n    \"cache_ttl\": \"24h\"\n    \"resync_interval\": \"5m\"\n    \"shared_store\": \"s3\"","overrides.yaml":"{}"},"kind":"ConfigMap","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"observatorium-xyz","app.kubernetes.io/name":"loki","app.kubernetes.io/part-of":"observatorium","app.kubernetes.io/version":"2.2.0"},"name":"observatorium-xyz-loki","namespace":"observatorium"}}
  creationTimestamp: "2021-08-12T11:34:10Z"
  labels:
    app.kubernetes.io/instance: observatorium-xyz
    app.kubernetes.io/name: loki
    app.kubernetes.io/part-of: observatorium
    app.kubernetes.io/version: 2.2.0
  managedFields:
  - apiVersion: v1
    fieldsType: FieldsV1
    fieldsV1:
      f:data:
        .: {}
        f:config.yaml: {}
        f:overrides.yaml: {}
      f:metadata:
        f:annotations:
          .: {}
          f:kubectl.kubernetes.io/last-applied-configuration: {}
        f:labels:
          .: {}
          f:app.kubernetes.io/instance: {}
          f:app.kubernetes.io/name: {}
          f:app.kubernetes.io/part-of: {}
          f:app.kubernetes.io/version: {}
    manager: kubectl
    operation: Update
    time: "2021-08-12T11:34:10Z"
  name: observatorium-xyz-loki
  namespace: observatorium
  resourceVersion: "8113011"
  selfLink: /api/v1/namespaces/observatorium/configmaps/observatorium-xyz-loki
  uid: bd9e8b22-f607-4065-b687-eea7448671f2

Could you send me any reference to fix these errors.

Hi,
Are you able to solve the issue? I having the same issue.

We found out this is related to the service port names. Istio (I believe some older versions maybe?) was interpreting the correct protocol based on the port name (not necessary the port type field) in the service definition.

In our case, we needed to adjust the service port names from grpc, gossip, and metrics to just tcp and this update resolved the “empty ring” errors.

This topic was automatically closed 365 days after the last reply. New replies are no longer allowed.