How to configure traces for tempo using only logs from Loki

Hi Grafana community,
I wanted to load my existing application logs to Loki and wanted to follow traces there. How can I accomplish this without needing application instrumentation such as OpenTelemetry?

Here is my current set up
:white_check_mark: Configured Grafana and Loki without problems.
:white_check_mark: Successfully sent logs from ElasticSearch to Loki using Logtash.
:white_check_mark: Logs are available from Grafana.
:white_check_mark: Derived field has been added to Loki with internal link to Tempo correctly

:x: But when I search a traceid in Tempo, I get an error such as as

failed to get trace with id: 2f506bd0428518c549c87421 Status: 404 Not Found Body: trace not found

:x: TraceQL from Tempo doesn’t return anything on { }

:x: Using the Loki Search feature inside Tempo returns null for Trace ID

No error exists in the application logs

tempo.yaml

stream_over_http_enabled: true


server:
  http_listen_port: 3200
  log_level: info

query_frontend:
  search:
    duration_slo: 5s
    throughput_bytes_slo: 1.073741824e+09
  trace_by_id:
    duration_slo: 5s

distributor:
    # Optional.
    # Enable to metric every received span to help debug ingestion
    # This is not recommended for production environments
  log_received_spans:
    enabled: true
  metric_received_spans:
    enabled: true
  receivers:                           # this configuration will listen on all ports and protocols that tempo is capable of.
    jaeger:                            # the receives all come from the OpenTelemetry collector.  more configuration information can
      protocols:                       # be found there: https://github.com/open-telemetry/opentelemetry-collector/tree/main/receiver
        thrift_http:                   #
        grpc:                          # for a production deployment you should only enable the receivers you need!
        thrift_binary:
        thrift_compact:
    zipkin:
    otlp:
      protocols:
        http:
        grpc:
    opencensus:

ingester:
  max_block_duration: 5m               # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally

compactor:
  compaction:
    block_retention: 24h                # overall Tempo trace retention. set for demo purposes

metrics_generator:
  registry:
    external_labels:
      source: tempo
      cluster: docker-compose
  storage:
    path: /tmp/tempo/generator/wal
    remote_write:
      - url: http://prometheus:9090/api/v1/write
        send_exemplars: true

storage:
  trace:
    backend: local                     # backend configuration to use
    wal:
      path: /tmp/tempo/wal             # where to store the the wal locally
    local:
      path: /tmp/tempo/blocks

overrides:
  defaults:
    metrics_generator:
      processors: [service-graphs, span-metrics] # enables metrics generator

loki-config.yaml

auth_enabled: false

server:
  http_listen_port: 3100
  grpc_listen_port: 9096

common:
  path_prefix: /tmp/loki
  storage:
    filesystem:
      chunks_directory: /tmp/loki/chunks
      rules_directory: /tmp/loki/rules
  replication_factor: 1
  ring:
    instance_addr: 127.0.0.1
    kvstore:
      store: inmemory

schema_config:
  configs:
    - from: 2020-10-24
      store: boltdb-shipper
      object_store: filesystem
      schema: v11
      index:
        prefix: index_
        period: 24h

ruler:
  alertmanager_url: http://localhost:9093

docker-compose.yml

version: "3"

services:
  grafana:
    image: grafana/grafana:latest
    container_name: grafana
    volumes:
      - /root/dockerfiles/cloki/grafana:/var/lib/grafana
    environment:
      - GF_SECURITY_ADMIN_USER=${ADMIN_USER:-admin}
      - GF_SECURITY_ADMIN_PASSWORD=${ADMIN_PASSWORD:-admin}
    restart: unless-stopped 
    user: "0" # IMPORTANT, change this id (unique), get by running 'id -u', 
    ports:
      - "3001:3000" # change(left) if used by other services/containers
    networks:
      - elk_to_loki
  qryn:
    image: qxip/qryn:latest
    container_name: loki
    volumes:
      - /root/dockerfiles/cloki/loki:/etc/loki
    restart: unless-stopped
    expose:
      - 3100
    ports:
      - "3100:3100"
    environment:
      - CLICKHOUSE_SERVER=clickhouse-seed
      - DEBUG=true
    depends_on:
      - clickhouse-seed
    networks:
      - elk_to_loki
  promtail:
    image: grafana/promtail:latest
    volumes:
      - /var/log:/var/log
      - /root/dockerfiles/cloki/promtail:/etc/promtail
      - /var/run/docker.sock:/var/run/docker.sock # neccessary for docker logging (do not forget the plugin!!)
    command: -config.file=/etc/promtail/promtail-config.yml
    restart: unless-stopped
    networks:
      - elk_to_loki
    # ports:
    #   - "1514:1514" #  only needed for syslogs
  clickhouse-seed:
    image: yandex/clickhouse-server
    container_name: clickhouse-seed
    ports:
      - 8123:8123
    networks:
      - elk_to_loki

  clickhouse-client:
    container_name: clickhouse-client
    image: yandex/clickhouse-client
    entrypoint:
      - /bin/sleep
    command:
      - infinity
    networks:
      - elk_to_loki

  # logstash:
    # image: grafana/logstash-output-loki # for using loki as output # docker.elastic.co/logstash/logstash:
    # container_name: logstash
    # volumes:
      # - /root/dockerfiles/cloki/logstash/logstash.conf:/etc/logstash/conf.d/logstash.conf
      # - /root/dockerfiles/cloki/logstash/certs:/etc/logstash/certs/
      # - /root/dockerfiles/cloki/logstash/logstash.yml:/usr/share/logstash/config/logstash.yml
    # networks:
      # - elk_to_loki
    # command: >
        # bash -c "bundle update
        # && logstash -f /etc/logstash/conf.d/logstash.conf"
    # # above for using opensearch as input
    
  tempo:
    image: grafana/tempo:latest
    command: [ "-config.file=/etc/tempo.yaml" ]
    volumes:
      - /root/dockerfiles/cloki/tempo/tempo.yaml:/etc/tempo.yaml
    ports:
      - "3200:3200"   # tempo
      - "4317:4317"   # otlp grpc
      - "4318:4318"   # otlp http
    networks:
      - elk_to_loki
    
  # otel-collector:
    # image: otel/opentelemetry-collector
    # command: ["--config=/etc/otel-collector-config.yaml"]
    # volumes:
      # - /root/dockerfiles/cloki/otelc/otel-collector-config.yaml:/etc/otel-collector-config.yaml
    # ports:
      # - "4317:4317"   # OTLP gRPC receiver
    
networks:
  elk_to_loki:
    external: true```