👷 chore: added docker-compose configuration files for Grafana, Prometheus, and Tempo, Otel stacks (#9071)

* feat: added docker-compose configuration files for Grafana, Prometheus, and Tempo, Otel stacks

* fix: datasource uid
This commit is contained in:
Neko 2025-09-11 11:27:24 +08:00 committed by GitHub
parent 9089e2683e
commit bed591c559
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 1091 additions and 1 deletions

View File

@ -8,6 +8,9 @@ services:
- '${MINIO_PORT}:${MINIO_PORT}' # MinIO API
- '9001:9001' # MinIO Console
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
- '3000:3000' # Grafana
- '4318:4318' # otel-collector HTTP
- '4317:4317' # otel-collector gRPC
command: tail -f /dev/null
networks:
- lobe-network
@ -29,11 +32,52 @@ services:
file: docker-compose/local/docker-compose.yml
service: searxng
grafana:
profiles:
- otel
extends:
file: docker-compose/local/grafana/docker-compose.yml
service: grafana
tempo:
profiles:
- otel
extends:
file: docker-compose/local/grafana/docker-compose.yml
service: tempo
prometheus:
profiles:
- otel
extends:
file: docker-compose/local/grafana/docker-compose.yml
service: prometheus
otel-collector:
profiles:
- otel
extends:
file: docker-compose/local/grafana/docker-compose.yml
service: otel-collector
otel-tracing-test:
profiles:
- otel-test
extends:
file: docker-compose/local/grafana/docker-compose.yml
service: otel-tracing-test
volumes:
data:
driver: local
s3_data:
driver: local
grafana_data:
driver: local
tempo_data:
driver: local
prometheus_data:
driver: local
networks:
lobe-network:

View File

@ -9,6 +9,9 @@ services:
- '9001:9001' # MinIO Console
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
- '${LOBE_PORT}:3210' # LobeChat
- '3000:3000' # Grafana
- '4318:4318' # otel-collector HTTP
- '4317:4317' # otel-collector gRPC
command: tail -f /dev/null
networks:
- lobe-network
@ -58,7 +61,7 @@ services:
wait \$MINIO_PID
"
# version lock ref: https://github.com/lobehub/lobe-chat/pull/7331
# version lock ref: https://github.com/lobehub/lobe-chat/pull/7331
casdoor:
image: casbin/casdoor:v2.13.0
container_name: lobe-casdoor
@ -162,11 +165,90 @@ services:
wait \$LOBE_PID
"
grafana:
profiles:
- otel
image: grafana/grafana:12.2.0-17419259409
container_name: lobe-grafana
network_mode: 'service:network-service'
restart: always
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/datasources:/etc/grafana/provisioning/datasources
depends_on:
- tempo
- prometheus
tempo:
profiles:
- otel
image: grafana/tempo:latest
container_name: lobe-tempo
network_mode: 'service:network-service'
restart: always
volumes:
- ./tempo/tempo.yaml:/etc/tempo.yaml
- tempo_data:/var/tempo
command: ['-config.file=/etc/tempo.yaml']
prometheus:
profiles:
- otel
image: prom/prometheus
container_name: lobe-prometheus
network_mode: 'service:network-service'
restart: always
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-otlp-receiver'
- '--web.enable-remote-write-receiver'
- '--enable-feature=exemplar-storage'
otel-collector:
profiles:
- otel
image: otel/opentelemetry-collector
container_name: lobe-otel-collector
network_mode: 'service:network-service'
restart: always
volumes:
- ./otel-collector/collector-config.yaml:/etc/otelcol/config.yaml
command: ['--config', '/etc/otelcol/config.yaml']
depends_on:
- tempo
- prometheus
otel-tracing-test:
profiles:
- otel-test
image: ghcr.io/grafana/xk6-client-tracing:v0.0.7
container_name: lobe-otel-tracing-test
network_mode: 'service:network-service'
restart: always
environment:
- ENDPOINT=127.0.0.1:4317
volumes:
data:
driver: local
s3_data:
driver: local
grafana_data:
driver: local
tempo_data:
driver: local
prometheus_data:
driver: local
networks:
lobe-network:

View File

@ -0,0 +1,42 @@
# Proxy, if you need it
# HTTP_PROXY=http://localhost:7890
# HTTPS_PROXY=http://localhost:7890
# Other environment variables, as needed. You can refer to the environment variables configuration for the client version, making sure not to have ACCESS_CODE.
# OPENAI_API_KEY=sk-xxxx
# OPENAI_PROXY_URL=https://api.openai.com/v1
# OPENAI_MODEL_LIST=...
# ===========================
# ====== Preset config ======
# ===========================
# if no special requirements, no need to change
LOBE_PORT=3210
CASDOOR_PORT=8000
MINIO_PORT=9000
APP_URL=http://localhost:3210
AUTH_URL=http://localhost:3210/api/auth
# Postgres related, which are the necessary environment variables for DB
LOBE_DB_NAME=lobechat
POSTGRES_PASSWORD=uWNZugjBqixf8dxC
AUTH_CASDOOR_ISSUER=http://localhost:8000
# Casdoor secret
AUTH_CASDOOR_ID=a387a4892ee19b1a2249
AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354
CASDOOR_WEBHOOK_SECRET=casdoor-secret
# MinIO S3 configuration
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD
# Configure the bucket information of MinIO
S3_PUBLIC_DOMAIN=http://localhost:9000
S3_ENDPOINT=http://localhost:9000
MINIO_LOBE_BUCKET=lobe
# Configure for casdoor
origin=http://localhost:8000

View File

@ -0,0 +1,42 @@
# Proxy如果你需要的话比如你使用 GitHub 作为鉴权服务提供商)
# HTTP_PROXY=http://localhost:7890
# HTTPS_PROXY=http://localhost:7890
# 其他环境变量,视需求而定,可以参照客户端版本的环境变量配置,注意不要有 ACCESS_CODE
# OPENAI_API_KEY=sk-xxxx
# OPENAI_PROXY_URL=https://api.openai.com/v1
# OPENAI_MODEL_LIST=...
# ===================
# ===== 预设配置 =====
# ===================
# 如没有特殊需要不用更改
LOBE_PORT=3210
CASDOOR_PORT=8000
MINIO_PORT=9000
APP_URL=http://localhost:3210
AUTH_URL=http://localhost:3210/api/auth
# Postgres 相关,也即 DB 必须的环境变量
LOBE_DB_NAME=lobechat
POSTGRES_PASSWORD=uWNZugjBqixf8dxC
AUTH_CASDOOR_ISSUER=http://localhost:8000
# Casdoor secret
AUTH_CASDOOR_ID=a387a4892ee19b1a2249
AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354
CASDOOR_WEBHOOK_SECRET=casdoor-secret
# MinIO S3 配置
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD
# 在下方配置 minio 中添加的桶
S3_PUBLIC_DOMAIN=http://localhost:9000
S3_ENDPOINT=http://localhost:9000
MINIO_LOBE_BUCKET=lobe
# 为 casdoor 配置
origin=http://localhost:8000

View File

@ -0,0 +1,251 @@
name: lobe-chat-database
services:
network-service:
image: alpine
container_name: lobe-network
restart: always
ports:
- '${MINIO_PORT}:${MINIO_PORT}' # MinIO API
- '9001:9001' # MinIO Console
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
- '${LOBE_PORT}:3210' # LobeChat
- '3000:3000' # Grafana
- '4318:4318' # otel-collector HTTP
- '4317:4317' # otel-collector gRPC
command: tail -f /dev/null
networks:
- lobe-network
postgresql:
image: pgvector/pgvector:pg17
container_name: lobe-postgres
ports:
- '5432:5432'
volumes:
- './data:/var/lib/postgresql/data'
environment:
- 'POSTGRES_DB=${LOBE_DB_NAME}'
- 'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U postgres']
interval: 5s
timeout: 5s
retries: 5
restart: always
networks:
- lobe-network
minio:
image: minio/minio:RELEASE.2025-04-22T22-12-26Z
container_name: lobe-minio
network_mode: 'service:network-service'
volumes:
- './s3_data:/etc/minio/data'
environment:
- 'MINIO_API_CORS_ALLOW_ORIGIN=*'
env_file:
- .env
restart: always
entrypoint: >
/bin/sh -c "
minio server /etc/minio/data --address ':${MINIO_PORT}' --console-address ':9001' &
MINIO_PID=\$!
while ! curl -s http://localhost:${MINIO_PORT}/minio/health/live; do
echo 'Waiting for MinIO to start...'
sleep 1
done
sleep 5
mc alias set myminio http://localhost:${MINIO_PORT} ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD}
echo 'Creating bucket ${MINIO_LOBE_BUCKET}'
mc mb myminio/${MINIO_LOBE_BUCKET}
wait \$MINIO_PID
"
# version lock ref: https://github.com/lobehub/lobe-chat/pull/7331
casdoor:
image: casbin/casdoor:v2.13.0
container_name: lobe-casdoor
entrypoint: /bin/sh -c './server --createDatabase=true'
network_mode: 'service:network-service'
depends_on:
postgresql:
condition: service_healthy
environment:
httpport: ${CASDOOR_PORT}
RUNNING_IN_DOCKER: 'true'
driverName: 'postgres'
dataSourceName: 'user=postgres password=${POSTGRES_PASSWORD} host=postgresql port=5432 sslmode=disable dbname=casdoor'
runmode: 'dev'
volumes:
- ./init_data.json:/init_data.json
env_file:
- .env
searxng:
image: searxng/searxng
container_name: lobe-searxng
volumes:
- './searxng-settings.yml:/etc/searxng/settings.yml'
environment:
- 'SEARXNG_SETTINGS_FILE=/etc/searxng/settings.yml'
restart: always
networks:
- lobe-network
env_file:
- .env
grafana:
image: grafana/grafana:12.2.0-17419259409
container_name: lobe-grafana
network_mode: 'service:network-service'
restart: always
environment:
- GF_AUTH_ANONYMOUS_ENABLED=true
- GF_AUTH_ANONYMOUS_ORG_ROLE=Admin
- GF_AUTH_DISABLE_LOGIN_FORM=true
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/datasources:/etc/grafana/provisioning/datasources
depends_on:
- tempo
- prometheus
tempo:
image: grafana/tempo:latest
container_name: lobe-tempo
network_mode: 'service:network-service'
restart: always
volumes:
- ./tempo/tempo.yaml:/etc/tempo.yaml
- tempo_data:/var/tempo
command: ['-config.file=/etc/tempo.yaml']
prometheus:
image: prom/prometheus
container_name: lobe-prometheus
network_mode: 'service:network-service'
restart: always
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-otlp-receiver'
- '--web.enable-remote-write-receiver'
- '--enable-feature=exemplar-storage'
otel-collector:
image: otel/opentelemetry-collector
container_name: lobe-otel-collector
network_mode: 'service:network-service'
restart: always
volumes:
- ./otel-collector/collector-config.yaml:/etc/otelcol/config.yaml
command: ['--config', '/etc/otelcol/config.yaml']
depends_on:
- tempo
- prometheus
otel-tracing-test:
profiles:
- otel-test
image: ghcr.io/grafana/xk6-client-tracing:v0.0.7
container_name: lobe-otel-tracing-test
network_mode: 'service:network-service'
restart: always
environment:
- ENDPOINT=127.0.0.1:4317
lobe:
image: lobehub/lobe-chat-database
container_name: lobe-chat
network_mode: 'service:network-service'
depends_on:
postgresql:
condition: service_healthy
network-service:
condition: service_started
minio:
condition: service_started
casdoor:
condition: service_started
environment:
- 'NEXT_AUTH_SSO_PROVIDERS=casdoor'
- 'KEY_VAULTS_SECRET=Kix2wcUONd4CX51E/ZPAd36BqM4wzJgKjPtz2sGztqQ='
- 'NEXT_AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg'
- 'DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/${LOBE_DB_NAME}'
- 'S3_BUCKET=${MINIO_LOBE_BUCKET}'
- 'S3_ENABLE_PATH_STYLE=1'
- 'S3_ACCESS_KEY=${MINIO_ROOT_USER}'
- 'S3_ACCESS_KEY_ID=${MINIO_ROOT_USER}'
- 'S3_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD}'
- 'LLM_VISION_IMAGE_USE_BASE64=1'
- 'S3_SET_ACL=0'
- 'SEARXNG_URL=http://searxng:8080'
- 'OTEL_EXPORTER_OTLP_METRICS_PROTOCOL=http/protobuf'
- 'OTEL_EXPORTER_OTLP_METRICS_ENDPOINT=http://localhost:4318/v1/metrics'
- 'OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=http/protobuf'
- 'OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://localhost:4318/v1/traces'
env_file:
- .env
restart: always
entrypoint: >
/bin/sh -c "
/bin/node /app/startServer.js &
LOBE_PID=\$!
sleep 3
if [ $(wget --timeout=5 --spider --server-response ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
echo '⚠Warning: Unable to fetch OIDC configuration from Casdoor'
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
echo '⚠️注意:无法从 Casdoor 获取 OIDC 配置'
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
else
if ! wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep 'issuer' | grep ${AUTH_CASDOOR_ISSUER}; then
printf '❌Error: The Auth issuer is conflict, Issuer in OIDC configuration is: %s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
echo ' , but the issuer in .env file is: ${AUTH_CASDOOR_ISSUER} '
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
printf '❌错误Auth 的 issuer 冲突OIDC 配置中的 issuer 是:%s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
echo ' , 但 .env 文件中的 issuer 是:${AUTH_CASDOOR_ISSUER} '
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
fi
fi
if [ $(wget --timeout=5 --spider --server-response ${S3_ENDPOINT}/minio/health/live 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
echo '⚠Warning: Unable to fetch MinIO health status'
echo 'Request URL: ${S3_ENDPOINT}/minio/health/live'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
echo '⚠️注意:无法获取 MinIO 健康状态'
echo '请求 URL: ${S3_ENDPOINT}/minio/health/live'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
fi
wait \$LOBE_PID
"
volumes:
data:
driver: local
s3_data:
driver: local
grafana_data:
driver: local
tempo_data:
driver: local
prometheus_data:
driver: local
networks:
lobe-network:
driver: bridge

View File

@ -0,0 +1,15 @@
apiVersion: 1
prune: true
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://127.0.0.1:9090
basicAuth: false
isDefault: false
version: 1
editable: false

View File

@ -0,0 +1,16 @@
apiVersion: 1
prune: true
datasources:
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://127.0.0.1:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo

View File

@ -0,0 +1,45 @@
extensions:
health_check:
endpoint: 127.0.0.1:13133
receivers:
prometheus:
config:
scrape_configs:
- job_name: otel-collector-metrics
scrape_interval: 10s
static_configs:
- targets: ["127.0.0.1:8888"]
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
prometheusremotewrite:
endpoint: http://127.0.0.1:9090/api/v1/write
tls:
insecure: true
otlp:
endpoint: 127.0.0.1:14317
tls:
insecure: true
debug:
verbosity: detailed
service:
pipelines:
metrics:
receivers: [prometheus]
exporters: [prometheusremotewrite]
traces:
receivers: [otlp]
exporters: [otlp]
logs:
receivers: [otlp]
exporters: [debug]

View File

@ -0,0 +1,11 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["127.0.0.1:9090"]
- job_name: "tempo"
static_configs:
- targets: ["127.0.0.1:3200"]

View File

@ -0,0 +1,58 @@
stream_over_http_enabled: true
server:
http_listen_port: 3200
log_level: info
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
metadata_slo:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s
distributor:
max_attribute_bytes: 10485760
receivers:
otlp:
protocols:
grpc:
endpoint: 127.0.0.1:14317
http:
endpoint: 127.0.0.1:14318
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /var/tempo/generator/wal
remote_write:
- url: http://127.0.0.1:9090/api/v1/write
send_exemplars: true
traces_storage:
path: /var/tempo/generator/traces
storage:
trace:
backend: local # backend configuration to use
wal:
path: /var/tempo/wal # where to store the wal locally
local:
path: /var/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator
generate_native_histograms: both

View File

@ -0,0 +1,44 @@
# Proxy, if you need it
# HTTP_PROXY=http://localhost:7890
# HTTPS_PROXY=http://localhost:7890
# Other environment variables, as needed. You can refer to the environment variables configuration for the client version, making sure not to have ACCESS_CODE.
# OPENAI_API_KEY=sk-xxxx
# OPENAI_PROXY_URL=https://api.openai.com/v1
# OPENAI_MODEL_LIST=...
# ===========================
# ====== Preset config ======
# ===========================
# if no special requirements, no need to change
LOBE_PORT=3210
CASDOOR_PORT=8000
MINIO_PORT=9000
APP_URL=http://localhost:3210
AUTH_URL=http://localhost:3210/api/auth
# Postgres related, which are the necessary environment variables for DB
LOBE_DB_NAME=lobechat
POSTGRES_PASSWORD=uWNZugjBqixf8dxC
AUTH_CASDOOR_ISSUER=http://localhost:8000
# Casdoor secret
AUTH_CASDOOR_ID=a387a4892ee19b1a2249
AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354
CASDOOR_WEBHOOK_SECRET=casdoor-secret
# MinIO S3 configuration
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD
# Configure the bucket information of MinIO
S3_PUBLIC_DOMAIN=http://localhost:9000
S3_ENDPOINT=http://localhost:9000
MINIO_LOBE_BUCKET=lobe
GF_SECURITY_ADMIN_PASSWORD=YOUR_GRAFANA_PASSWORD
# Configure for casdoor
origin=http://localhost:8000

View File

@ -0,0 +1,42 @@
# Proxy如果你需要的话比如你使用 GitHub 作为鉴权服务提供商)
# HTTP_PROXY=http://localhost:7890
# HTTPS_PROXY=http://localhost:7890
# 其他环境变量,视需求而定,可以参照客户端版本的环境变量配置,注意不要有 ACCESS_CODE
# OPENAI_API_KEY=sk-xxxx
# OPENAI_PROXY_URL=https://api.openai.com/v1
# OPENAI_MODEL_LIST=...
# ===================
# ===== 预设配置 =====
# ===================
# 如没有特殊需要不用更改
LOBE_PORT=3210
CASDOOR_PORT=8000
MINIO_PORT=9000
APP_URL=http://localhost:3210
AUTH_URL=http://localhost:3210/api/auth
# Postgres 相关,也即 DB 必须的环境变量
LOBE_DB_NAME=lobechat
POSTGRES_PASSWORD=uWNZugjBqixf8dxC
AUTH_CASDOOR_ISSUER=http://localhost:8000
# Casdoor secret
AUTH_CASDOOR_ID=a387a4892ee19b1a2249
AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354
CASDOOR_WEBHOOK_SECRET=casdoor-secret
# MinIO S3 配置
MINIO_ROOT_USER=admin
MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD
# 在下方配置 minio 中添加的桶
S3_PUBLIC_DOMAIN=http://localhost:9000
S3_ENDPOINT=http://localhost:9000
MINIO_LOBE_BUCKET=lobe
# 为 casdoor 配置
origin=http://localhost:8000

View File

@ -0,0 +1,249 @@
name: lobe-chat-database
services:
network-service:
image: alpine
container_name: lobe-network
restart: always
ports:
- '${MINIO_PORT}:${MINIO_PORT}' # MinIO API
- '9001:9001' # MinIO Console
- '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor
- '${LOBE_PORT}:3210' # LobeChat
- '3000:3000' # Grafana
- '4318:4318' # otel-collector HTTP
- '4317:4317' # otel-collector gRPC
command: tail -f /dev/null
networks:
- lobe-network
postgresql:
image: pgvector/pgvector:pg17
container_name: lobe-postgres
ports:
- '5432:5432'
volumes:
- './data:/var/lib/postgresql/data'
environment:
- 'POSTGRES_DB=${LOBE_DB_NAME}'
- 'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}'
healthcheck:
test: ['CMD-SHELL', 'pg_isready -U postgres']
interval: 5s
timeout: 5s
retries: 5
restart: always
networks:
- lobe-network
minio:
image: minio/minio:RELEASE.2025-04-22T22-12-26Z
container_name: lobe-minio
network_mode: 'service:network-service'
volumes:
- './s3_data:/etc/minio/data'
environment:
- 'MINIO_API_CORS_ALLOW_ORIGIN=*'
env_file:
- .env
restart: always
entrypoint: >
/bin/sh -c "
minio server /etc/minio/data --address ':${MINIO_PORT}' --console-address ':9001' &
MINIO_PID=\$!
while ! curl -s http://localhost:${MINIO_PORT}/minio/health/live; do
echo 'Waiting for MinIO to start...'
sleep 1
done
sleep 5
mc alias set myminio http://localhost:${MINIO_PORT} ${MINIO_ROOT_USER} ${MINIO_ROOT_PASSWORD}
echo 'Creating bucket ${MINIO_LOBE_BUCKET}'
mc mb myminio/${MINIO_LOBE_BUCKET}
wait \$MINIO_PID
"
# version lock ref: https://github.com/lobehub/lobe-chat/pull/7331
casdoor:
image: casbin/casdoor:v2.13.0
container_name: lobe-casdoor
entrypoint: /bin/sh -c './server --createDatabase=true'
network_mode: 'service:network-service'
depends_on:
postgresql:
condition: service_healthy
environment:
httpport: ${CASDOOR_PORT}
RUNNING_IN_DOCKER: 'true'
driverName: 'postgres'
dataSourceName: 'user=postgres password=${POSTGRES_PASSWORD} host=postgresql port=5432 sslmode=disable dbname=casdoor'
runmode: 'dev'
volumes:
- ./init_data.json:/init_data.json
env_file:
- .env
searxng:
image: searxng/searxng
container_name: lobe-searxng
volumes:
- './searxng-settings.yml:/etc/searxng/settings.yml'
environment:
- 'SEARXNG_SETTINGS_FILE=/etc/searxng/settings.yml'
restart: always
networks:
- lobe-network
env_file:
- .env
grafana:
image: grafana/grafana:12.2.0-17419259409
container_name: lobe-grafana
network_mode: 'service:network-service'
restart: always
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GF_SECURITY_ADMIN_PASSWORD}
- GF_FEATURE_TOGGLES_ENABLE=traceqlEditor
volumes:
- grafana_data:/var/lib/grafana
- ./grafana/dashboards:/etc/grafana/provisioning/dashboards
- ./grafana/datasources:/etc/grafana/provisioning/datasources
depends_on:
- tempo
- prometheus
tempo:
image: grafana/tempo:latest
container_name: lobe-tempo
network_mode: 'service:network-service'
restart: always
volumes:
- ./tempo/tempo.yaml:/etc/tempo.yaml
- tempo_data:/var/tempo
command: ['-config.file=/etc/tempo.yaml']
prometheus:
image: prom/prometheus
container_name: lobe-prometheus
network_mode: 'service:network-service'
restart: always
volumes:
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-otlp-receiver'
- '--web.enable-remote-write-receiver'
- '--enable-feature=exemplar-storage'
otel-collector:
image: otel/opentelemetry-collector
container_name: lobe-otel-collector
network_mode: 'service:network-service'
restart: always
volumes:
- ./otel-collector/collector-config.yaml:/etc/otelcol/config.yaml
command: ['--config', '/etc/otelcol/config.yaml']
depends_on:
- tempo
- prometheus
otel-tracing-test:
profiles:
- otel-test
image: ghcr.io/grafana/xk6-client-tracing:v0.0.7
container_name: lobe-otel-tracing-test
network_mode: 'service:network-service'
restart: always
environment:
- ENDPOINT=127.0.0.1:4317
lobe:
image: lobehub/lobe-chat-database
container_name: lobe-chat
network_mode: 'service:network-service'
depends_on:
postgresql:
condition: service_healthy
network-service:
condition: service_started
minio:
condition: service_started
casdoor:
condition: service_started
environment:
- 'NEXT_AUTH_SSO_PROVIDERS=casdoor'
- 'KEY_VAULTS_SECRET=Kix2wcUONd4CX51E/ZPAd36BqM4wzJgKjPtz2sGztqQ='
- 'NEXT_AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg'
- 'DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/${LOBE_DB_NAME}'
- 'S3_BUCKET=${MINIO_LOBE_BUCKET}'
- 'S3_ENABLE_PATH_STYLE=1'
- 'S3_ACCESS_KEY=${MINIO_ROOT_USER}'
- 'S3_ACCESS_KEY_ID=${MINIO_ROOT_USER}'
- 'S3_SECRET_ACCESS_KEY=${MINIO_ROOT_PASSWORD}'
- 'LLM_VISION_IMAGE_USE_BASE64=1'
- 'S3_SET_ACL=0'
- 'SEARXNG_URL=http://searxng:8080'
- 'OTEL_EXPORTER_OTLP_METRICS_PROTOCOL=http/protobuf'
- 'OTEL_EXPORTER_OTLP_METRICS_ENDPOINT=http://localhost:4318/v1/metrics'
- 'OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=http/protobuf'
- 'OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=http://localhost:4318/v1/traces'
env_file:
- .env
restart: always
entrypoint: >
/bin/sh -c "
/bin/node /app/startServer.js &
LOBE_PID=\$!
sleep 3
if [ $(wget --timeout=5 --spider --server-response ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
echo '⚠Warning: Unable to fetch OIDC configuration from Casdoor'
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
echo '⚠️注意:无法从 Casdoor 获取 OIDC 配置'
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
else
if ! wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep 'issuer' | grep ${AUTH_CASDOOR_ISSUER}; then
printf '❌Error: The Auth issuer is conflict, Issuer in OIDC configuration is: %s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
echo ' , but the issuer in .env file is: ${AUTH_CASDOOR_ISSUER} '
echo 'Request URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
printf '❌错误Auth 的 issuer 冲突OIDC 配置中的 issuer 是:%s' \$(wget -O - --timeout=5 ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration 2>&1 | grep -E 'issuer.*' | awk -F '\"' '{print \$4}')
echo ' , 但 .env 文件中的 issuer 是:${AUTH_CASDOOR_ISSUER} '
echo '请求 URL: ${AUTH_CASDOOR_ISSUER}/.well-known/openid-configuration'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
fi
fi
if [ $(wget --timeout=5 --spider --server-response ${S3_ENDPOINT}/minio/health/live 2>&1 | grep -c 'HTTP/1.1 200 OK') -eq 0 ]; then
echo '⚠Warning: Unable to fetch MinIO health status'
echo 'Request URL: ${S3_ENDPOINT}/minio/health/live'
echo 'Read more at: https://lobehub.com/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
echo '⚠️注意:无法获取 MinIO 健康状态'
echo '请求 URL: ${S3_ENDPOINT}/minio/health/live'
echo '了解更多https://lobehub.com/zh/docs/self-hosting/server-database/docker-compose#necessary-configuration'
echo ''
fi
wait \$LOBE_PID
"
volumes:
data:
driver: local
s3_data:
driver: local
grafana_data:
driver: local
tempo_data:
driver: local
prometheus_data:
driver: local
networks:
lobe-network:
driver: bridge

View File

@ -0,0 +1,15 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
uid: prometheus
access: proxy
orgId: 1
url: http://127.0.0.1:9090
basicAuth: false
isDefault: false
version: 1
editable: false
jsonData:
httpMethod: GET

View File

@ -0,0 +1,20 @@
apiVersion: 1
datasources:
- name: Tempo
type: tempo
access: proxy
orgId: 1
url: http://127.0.0.1:3200
basicAuth: false
isDefault: true
version: 1
editable: false
apiVersion: 1
uid: tempo
jsonData:
httpMethod: GET
serviceMap:
datasourceUid: prometheus
streamingEnabled:
search: true

View File

@ -0,0 +1,45 @@
extensions:
health_check:
endpoint: 127.0.0.1:13133
receivers:
prometheus:
config:
scrape_configs:
- job_name: otel-collector-metrics
scrape_interval: 10s
static_configs:
- targets: ["127.0.0.1:8888"]
otlp:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
exporters:
prometheusremotewrite:
endpoint: http://127.0.0.1:9090/api/v1/write
tls:
insecure: true
otlp:
endpoint: 127.0.0.1:14317
tls:
insecure: true
debug:
verbosity: detailed
service:
pipelines:
metrics:
receivers: [prometheus]
exporters: [prometheusremotewrite]
traces:
receivers: [otlp]
exporters: [otlp]
logs:
receivers: [otlp]
exporters: [debug]

View File

@ -0,0 +1,11 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: "prometheus"
static_configs:
- targets: ["127.0.0.1:9090"]
- job_name: "tempo"
static_configs:
- targets: ["127.0.0.1:3200"]

View File

@ -0,0 +1,58 @@
stream_over_http_enabled: true
server:
http_listen_port: 3200
log_level: info
query_frontend:
search:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
metadata_slo:
duration_slo: 5s
throughput_bytes_slo: 1.073741824e+09
trace_by_id:
duration_slo: 5s
distributor:
max_attribute_bytes: 10485760
receivers:
otlp:
protocols:
grpc:
endpoint: 127.0.0.1:14317
http:
endpoint: 127.0.0.1:14318
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
compactor:
compaction:
block_retention: 1h # overall Tempo trace retention. set for demo purposes
metrics_generator:
registry:
external_labels:
source: tempo
cluster: docker-compose
storage:
path: /var/tempo/generator/wal
remote_write:
- url: http://127.0.0.1:9090/api/v1/write
send_exemplars: true
traces_storage:
path: /var/tempo/generator/traces
storage:
trace:
backend: local # backend configuration to use
wal:
path: /var/tempo/wal # where to store the wal locally
local:
path: /var/tempo/blocks
overrides:
defaults:
metrics_generator:
processors: [service-graphs, span-metrics, local-blocks] # enables metrics generator
generate_native_histograms: both