Promtail Config And Setup For Docker Swarm Containers

I have a promtail and docker compose config and setup that works fine but when i try to follow same for docker swarm cluster, logs are not showing up for some reason

I have searched online for a doc on working config and setup for docker swarm with promtail and unfortunately i could not find anything to help

So here i am hoping someone will be willing to help point me in the right direction

here is error from promtail container in docker swarm

level=error ts=2024-11-21T00:52:59.569451224Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

level=error ts=2024-11-21T00:53:10.069885261Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

here is promtail config am using

server:
  http_listen_port: 9080
  grpc_listen_port: 0

positions:
  filename: /tmp/positions.yaml

clients:
  - url: http://loki:3100/loki/api/v1/push

scrape_configs:
  - job_name: system
    static_configs:
    - targets:
        - localhost
      labels:
        job: varlogs
        # __path__: /var/log/*log
        __path__: /var/log/!(auth.log)*log

  - job_name: flog_scrape 
    docker_sd_configs:
      - host: unix:///var/run/docker.sock
        refresh_interval: 5s
        filters:
          - name: label
            values: ["logging=promtail"] 
    relabel_configs:
      - source_labels: ['__meta_docker_container_name']
        regex: '/(.*)'
        target_label: 'container'
      - source_labels: ['__meta_docker_container_log_stream']
        target_label: 'logstream'
      - source_labels: ['__meta_docker_container_label_logging_jobname']
        target_label: 'job'
    pipeline_stages:
      - cri: {}
      - multiline:
          firstline: ^\d{4}-\d{2}-\d{2} \d{1,2}:\d{2}:\d{2},\d{3}
          max_wait_time: 3s
      # https://grafana.com/docs/loki/latest/clients/promtail/stages/json/
      - json:
          expressions:
            #message: message
            level: level
            #output: 'message'

and here is the docker swarm service i have the logging labels attached to

x-logging:
  &default-logging
  driver: "json-file"
  options:
    max-size: "1m"
    max-file: "1"
    tag: "{{.Name}}"

services:

  app-1:
    image: "app-1:v0.1"
    networks:
      - apps
    env_file:
      - ./env/.env.app-1
    deploy:
      mode: replicated
      replicas: 2
      placement:
        constraints:
          - node.labels.node != node-1
    labels:
      logging: "promtail"
      logging_jobname: "containerlogs"
    logging: *default-logging

networks:
  apps:
    external: true

but when i check loki, i do not see the container and logstream labels in loki dashboard like it does for docker compose one that works with the same above config

what am i doing wrong and what do i need to fix this so i can see the container and logstream lables to filter to the container logs i want to view logs for?

docker swarm labels are not showing up in loki

docker compose works fine and labels show up in loki

Is /var/run/docker.sock mounted inside your promtail container?

yes it is

  promtail:
    image: grafana/promtail:2.9.1
    command: "-config.file=/mnt/config/promtail-config.yaml"
    volumes:
      - /mnt/promtail/promtail-config.yaml:/mnt/config/promtail-config.yaml
      - /var/log:/var/log
      - /var/run/docker.sock:/var/run/docker.sock
      - /mnt/docker/containers:/var/lib/docker/containers:ro
    deploy:
      placement:
        constraints:
          - node.labels.node == node-1
    networks:
      - apps
    env_file:
      - ./env/.env.promtail

Make sure promtail is privileged and see if that helps.

that is same exact config that works for docker compose

also did you see the error messages from promtail, not sure if it is privilege issue

level=error ts=2024-11-21T00:52:59.569451224Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

level=error ts=2024-11-21T00:53:10.069885261Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

the docker swarm stack is deployed by root user also

i restarted promtail so you can see the initial logs before the errors start to show up

level=info ts=2024-11-21T21:41:24.665684332Z caller=promtail.go:133 msg="Reloading configuration file" md5sum=f92619b1dd73a5c36bcebbd9ea051546

level=info ts=2024-11-21T21:41:24.67314879Z caller=server.go:322 http=[::]:9080 grpc=[::]:36929 msg="server listening on addresses"

level=info ts=2024-11-21T21:41:24.674030726Z caller=main.go:174 msg="Starting Promtail" version="(version=2.9.1, branch=HEAD, revision=d9d5ed4a1)"

level=warn ts=2024-11-21T21:41:24.674372646Z caller=promtail.go:263 msg="enable watchConfig"

level=info ts=2024-11-21T21:41:29.66909887Z caller=filetargetmanager.go:361 msg="Adding target" key="/var/log/!(auth.log)*log:{job=\"varlogs\"}"

level=info ts=2024-11-21T21:41:29.669412943Z caller=filetargetmanager.go:361 msg="Adding target" key="/var/log/auth.log:{job=\"ssh\"}"

level=info ts=2024-11-21T21:41:29.669713599Z caller=filetarget.go:313 msg="watching new directory" directory=/var/log

level=info ts=2024-11-21T21:41:29.670261451Z caller=target_group.go:128 msg="added Docker target" containerID=cdc417c17ae455845af5e93ec5655bbea738c6b965fbd15a47f97008b05e4fc3

level=info ts=2024-11-21T21:41:29.670428906Z caller=tailer.go:145 component=tailer msg="tail routine: started" path=/var/log/auth.log

level=info ts=2024-11-21T21:41:29.670625422Z caller=target_group.go:128 msg="added Docker target" containerID=91386280fc2c2c10e77d4bea6ad4ecbcb1728b2544068b24dbc583df05357d5c

level=info ts=2024-11-21T21:41:29.670798898Z caller=target_group.go:128 msg="added Docker target" containerID=1ac8da3bfd001029f80906842f5c16dcaa967bc5956975a24e87435898847def

ts=2024-11-21T21:41:29.670631103Z caller=log.go:168 level=info msg="Seeked /var/log/auth.log - &{Offset:0 Whence:0}"

level=info ts=2024-11-21T21:41:29.670919008Z caller=target_group.go:128 msg="added Docker target" containerID=b1950b45095564b78042c51c68bbac6ffa854a12dec18e1562a208df1b24df4f

level=info ts=2024-11-21T21:41:29.671015951Z caller=target_group.go:128 msg="added Docker target" containerID=aeef6798aecae5e4469b6d2e06c9dcf94ed07519f68dbddd3fa43cd352293ab3

level=info ts=2024-11-21T21:41:29.671722225Z caller=target_group.go:128 msg="added Docker target" containerID=a36db6b8c6230b750b763f13cf6f87b7c421b1bde552a4e9ca3a031945321f54

level=info ts=2024-11-21T21:41:29.671826561Z caller=target_group.go:128 msg="added Docker target" containerID=f7586a5ef226f73c4b72e25ef93c20aaf5b156c2f707a120d1a2e3aa59f6ef41

level=info ts=2024-11-21T21:41:29.67682167Z caller=target_group.go:128 msg="added Docker target" containerID=97f868ebb42580e421696818bdecdd70542b51c39e6a282e90a50a82316763b3

level=error ts=2024-11-21T21:41:30.159648719Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

level=error ts=2024-11-21T21:41:30.622808254Z caller=client.go:430 component=client host=loki:3100 msg="final error sending batch" status=400 tenant= error="server returned HTTP status 400 Bad Request (400): error at least one label pair is required per stream"

The error says at least one label is required. Since your configuration has relabel configuration that’s a signal that something else is wrong, and not the configuration.

However, it may be a good thing to try again, by adding a static label temporarily just to get rid of that error, and see if there is any other obvious error.

how do i add a static label? mind showing me what to add to the existing config to achieve that?

also i dont really understand the error message, what label is required? where? and why it says at least one label pair is required per stream?

See static_labels | Grafana Loki documentation

When sending logs to Loki, there has to be at least one set of label.

based on my current config, what do i need to add and where?
easier than reading up doc to figure out what it is and trying to compare with what i have

here is my promtail config

server:
  http_listen_port: 9080
  grpc_listen_port: 0

positions:
  filename: /tmp/positions.yaml

clients:
  - url: http://loki:3100/loki/api/v1/push

scrape_configs:
  - job_name: system
    static_configs:
    - targets:
        - localhost
      labels:
        job: varlogs
        # __path__: /var/log/*log
        __path__: /var/log/!(auth.log)*log

  - job_name: flog_scrape 
    docker_sd_configs:
      - host: unix:///var/run/docker.sock
        refresh_interval: 5s
        filters:
          - name: label
            values: ["logging=promtail"] 
    relabel_configs:
      - source_labels: ['__meta_docker_container_name']
        regex: '/(.*)'
        target_label: 'container'
      - source_labels: ['__meta_docker_container_log_stream']
        target_label: 'logstream'
      - source_labels: ['__meta_docker_container_label_logging_jobname']
        target_label: 'job'
    pipeline_stages:
      - cri: {}
      - multiline:
          firstline: ^\d{4}-\d{2}-\d{2} \d{1,2}:\d{2}:\d{2},\d{3}
          max_wait_time: 3s
      # https://grafana.com/docs/loki/latest/clients/promtail/stages/json/
      - json:
          expressions:
            #message: message
            level: level
            #output: 'message'

and here is my swarm yaml file for app i need logs for

x-logging:
  &default-logging
  driver: "json-file"
  options:
    max-size: "1m"
    max-file: "1"
    tag: "{{.Name}}"

services:

  app-1:
    image: "app-1:v0.1"
    networks:
      - apps
    env_file:
      - ./env/.env.app-1
    deploy:
      mode: replicated
      replicas: 2
      placement:
        constraints:
          - node.labels.node != node-1
    labels:
      logging: "promtail"
      logging_jobname: "containerlogs"
    logging: *default-logging

networks:
  apps:
    external: true

and here is promtail swarm yaml file

promtail:
    image: grafana/promtail:2.9.1
    command: "-config.file=/mnt/config/promtail-config.yaml"
    volumes:
      - /mnt/promtail/promtail-config.yaml:/mnt/config/promtail-config.yaml
      - /var/log:/var/log
      - /var/run/docker.sock:/var/run/docker.sock
      - /mnt/docker/containers:/var/lib/docker/containers:ro
    deploy:
      placement:
        constraints:
          - node.labels.node == node-1
    networks:
      - apps
    env_file:
      - ./env/.env.promtail

for anyone having same issue, promtail needs to be deployed as a global service so it is on every node. i only had it on a single node and thus could not get logs for containers on other nodes

so yeah for docker swarm cluster with multiple nodes, need to deploy as global mode

you are welcome