Home | Markdown | Gemini | Microblog
tempo:
retention: 168h
storage:
trace:
backend: local
local:
path: /var/tempo/traces
wal:
path: /var/tempo/wal
receivers:
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
persistence:
enabled: true
size: 10Gi
storageClassName: ""
resources:
limits:
cpu: 1000m
memory: 2Gi
requests:
cpu: 500m
memory: 1Gi
apiVersion: v1
kind: PersistentVolume
metadata:
name: tempo-data-pv
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /data/nfs/k3svolumes/tempo/data
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: tempo-data-pvc
namespace: monitoring
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
apiVersion: v1
kind: ConfigMap
metadata:
name: grafana-datasources-all
namespace: monitoring
data:
datasources.yaml: |
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
url: http://prometheus-kube-prometheus-prometheus.monitoring:9090/
access: proxy
isDefault: true
- name: Alertmanager
type: alertmanager
uid: alertmanager
url: http://prometheus-kube-prometheus-alertmanager.monitoring:9093/
- name: Loki
type: loki
uid: loki
url: http://loki.monitoring.svc.cluster.local:3100
- name: Tempo
type: tempo
uid: tempo
url: http://tempo.monitoring.svc.cluster.local:3200
jsonData:
tracesToLogsV2:
datasourceUid: loki
spanStartTimeShift: -1h
spanEndTimeShift: 1h
tracesToMetrics:
datasourceUid: prometheus
serviceMap:
datasourceUid: prometheus
nodeGraph:
enabled: true
cd /home/paul/git/conf/f3s/tempo just install
kubectl get pods -n monitoring -l app.kubernetes.io/name=tempo kubectl exec -n monitoring <tempo-pod> -- wget -qO- http://localhost:3200/ready
// OTLP receiver for traces via gRPC and HTTP
otelcol.receiver.otlp "default" {
grpc {
endpoint = "0.0.0.0:4317"
}
http {
endpoint = "0.0.0.0:4318"
}
output {
traces = [otelcol.processor.batch.default.input]
}
}
// Batch processor — accumulates spans before forwarding to Tempo
otelcol.processor.batch "default" {
timeout = "5s"
send_batch_size = 100
send_batch_max_size = 200
output {
traces = [otelcol.exporter.otlp.tempo.input]
}
}
// OTLP exporter to Tempo
otelcol.exporter.otlp "tempo" {
client {
endpoint = "tempo.monitoring.svc.cluster.local:4317"
tls {
insecure = true
}
compression = "gzip"
}
}
cd /home/paul/git/conf/f3s/loki just upgrade
User -> Frontend (Flask:5000) -> Middleware (Flask:5001) -> Backend (Flask:5002)
| | |
Alloy (OTLP:4317) -> Tempo -> Grafana
flask==3.0.0 requests==2.31.0 opentelemetry-distro==0.49b0 opentelemetry-exporter-otlp==1.28.0 opentelemetry-instrumentation-flask==0.49b0 opentelemetry-instrumentation-requests==0.49b0
from opentelemetry import trace
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
from opentelemetry.instrumentation.flask import FlaskInstrumentor
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.resources import Resource
resource = Resource(attributes={
"service.name": "frontend",
"service.namespace": "tracing-demo",
"service.version": "1.0.0"
})
provider = TracerProvider(resource=resource)
otlp_exporter = OTLPSpanExporter(
endpoint="http://alloy.monitoring.svc.cluster.local:4317",
insecure=True
)
processor = BatchSpanProcessor(otlp_exporter)
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
FlaskInstrumentor().instrument_app(app)
RequestsInstrumentor().instrument()
cd /home/paul/git/conf/f3s/tracing-demo just build just import just install
kubectl get pods -n services | grep tracing-demo kubectl get ingress -n services tracing-demo-ingress
{ resource.service.namespace = "tracing-demo" }
{ duration > 200ms }
{ resource.service.name = "frontend" }
{ status = error }
{ resource.service.namespace = "tracing-demo" } && { span.http.status_code >= 500 }
curl -H "Host: tracing-demo.f3s.foo.zone" http://r0/api/process
{
"middleware_response": {
"backend_data": {
"data": {
"id": 12345,
"query_time_ms": 100.0,
"timestamp": "2025-12-28T18:35:01.064538",
"value": "Sample data from backend service"
},
"service": "backend"
},
"middleware_processed": true,
"original_data": {
"source": "GET request"
},
"transformation_time_ms": 50
},
"request_data": {
"source": "GET request"
},
"service": "frontend",
"status": "success"
}
kubectl exec -n monitoring tempo-0 -- wget -qO- \ 'http://localhost:3200/api/search?tags=service.namespace%3Dtracing-demo&limit=5' 2>/dev/null | \ python3 -m json.tool
{
"traceID": "4be1151c0bdcd5625ac7e02b98d95bd5",
"rootServiceName": "frontend",
"rootTraceName": "GET /api/process",
"durationMs": 221
}
Trace ID: 4be1151c0bdcd5625ac7e02b98d95bd5 Service: frontend GET /api/process 221.10ms (HTTP server span) frontend-process 216.23ms (business logic) POST 209.97ms (HTTP client -> middleware) Service: middleware POST /api/transform 186.02ms (HTTP server span) middleware-transform 180.96ms (business logic) GET 127.52ms (HTTP client -> backend) Service: backend GET /api/data 103.93ms (HTTP server span) backend-get-data 102.11ms (business logic, 100ms sleep)
{ resource.service.namespace = "tracing-demo" }

kubectl exec -n monitoring <tempo-pod> -- df -h /var/tempo