mirror of
https://github.com/rommapp/romm.git
synced 2026-01-15 00:12:50 +08:00
328 lines
9.3 KiB
Bash
Executable File
328 lines
9.3 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
set -o errexit # treat errors as fatal
|
|
set -o nounset # treat unset variables as an error
|
|
set -o pipefail # treat errors in pipes as fatal
|
|
shopt -s inherit_errexit # inherit errexit
|
|
|
|
LOGLEVEL="${LOGLEVEL:="INFO"}"
|
|
|
|
# make it possible to disable the inotify watcher process
|
|
ENABLE_RESCAN_ON_FILESYSTEM_CHANGE="${ENABLE_RESCAN_ON_FILESYSTEM_CHANGE:="false"}"
|
|
ENABLE_SCHEDULED_RESCAN="${ENABLE_SCHEDULED_RESCAN:="false"}"
|
|
ENABLE_SCHEDULED_UPDATE_LAUNCHBOX_METADATA="${ENABLE_SCHEDULED_UPDATE_LAUNCHBOX_METADATA:="false"}"
|
|
ENABLE_SCHEDULED_UPDATE_SWITCH_TITLEDB="${ENABLE_SCHEDULED_UPDATE_SWITCH_TITLEDB:="false"}"
|
|
|
|
# if REDIS_HOST is set, we assume that an external redis is used
|
|
REDIS_HOST="${REDIS_HOST:=""}"
|
|
|
|
# logger colors
|
|
RED='\033[0;31m'
|
|
LIGHTMAGENTA='\033[0;95m'
|
|
GREEN='\033[0;32m'
|
|
YELLOW='\033[0;33m'
|
|
BLUE='\033[0;34m'
|
|
CYAN='\033[0;36m'
|
|
RESET='\033[0;00m'
|
|
|
|
print_banner() {
|
|
local version
|
|
version=$(python3 -c "exec(open('__version__.py').read()); print(__version__)")
|
|
info_log " _____ __ __ "
|
|
info_log ' | __ \ | \/ |'
|
|
info_log ' | |__) |___ _ __ ___ | \ / |'
|
|
info_log " | _ // _ \\| '_ \` _ \\| |\\/| |"
|
|
info_log ' | | \ \ (_) | | | | | | | | |'
|
|
info_log ' |_| \_\___/|_| |_| |_|_| |_|'
|
|
info_log ""
|
|
info_log "The beautiful, powerful, self-hosted Rom manager and player"
|
|
info_log ""
|
|
info_log "Version: ${version}"
|
|
info_log ""
|
|
}
|
|
|
|
debug_log() {
|
|
# print debug log output if enabled
|
|
if [[ ${LOGLEVEL} == "DEBUG" ]]; then
|
|
echo -e "${LIGHTMAGENTA}DEBUG: ${BLUE}[RomM]${LIGHTMAGENTA}[init]${CYAN}[$(date +"%Y-%m-%d %T")]${RESET}" "${@}" || true
|
|
fi
|
|
}
|
|
|
|
info_log() {
|
|
echo -e "${GREEN}INFO: ${BLUE}[RomM]${LIGHTMAGENTA}[init]${CYAN}[$(date +"%Y-%m-%d %T")]${RESET}" "${@}" || true
|
|
}
|
|
|
|
warn_log() {
|
|
echo -e "${YELLOW}WARNING: ${BLUE}[RomM]${LIGHTMAGENTA}[init]${CYAN}[$(date +"%Y-%m-%d %T")]${RESET}" "${@}" || true
|
|
}
|
|
|
|
error_log() {
|
|
echo -e "${RED}ERROR: ${BLUE}[RomM]${LIGHTMAGENTA}[init]${CYAN}[$(date +"%Y-%m-%d %T")]${RESET}" "${@}" || true
|
|
exit 1
|
|
}
|
|
|
|
# Commands to run initial startup tasks
|
|
run_startup() {
|
|
if ! PYTHONPATH="/backend:${PYTHONPATH-}" opentelemetry-instrument \
|
|
--service_name "${OTEL_SERVICE_NAME_PREFIX-}startup" \
|
|
python3 /backend/startup.py; then
|
|
error_log "Startup script failed, exiting"
|
|
fi
|
|
}
|
|
|
|
wait_for_gunicorn_socket() {
|
|
debug_log "Waiting for gunicorn socket file..."
|
|
local retries=60
|
|
while [[ ! -S /tmp/gunicorn.sock && retries -gt 0 ]]; do
|
|
sleep 0.5
|
|
((retries--))
|
|
done
|
|
|
|
if [[ -S /tmp/gunicorn.sock ]]; then
|
|
debug_log "Gunicorn socket file found"
|
|
else
|
|
warn_log "Gunicorn socket file not found after waiting 30s!"
|
|
fi
|
|
}
|
|
|
|
# function that runs or main process and creates a corresponding PID file,
|
|
start_bin_gunicorn() {
|
|
# cleanup potentially leftover socket
|
|
rm /tmp/gunicorn.sock -f
|
|
|
|
# commands to start our main application and store its PID to check for crashes
|
|
info_log "Starting backend"
|
|
|
|
export PYTHONUNBUFFERED=1
|
|
export PYTHONDONTWRITEBYTECODE=1
|
|
|
|
opentelemetry-instrument \
|
|
--service_name "${OTEL_SERVICE_NAME_PREFIX-}api" \
|
|
gunicorn \
|
|
--bind=0.0.0.0:"${DEV_PORT:-5000}" \
|
|
--bind=unix:/tmp/gunicorn.sock \
|
|
--pid=/tmp/gunicorn.pid \
|
|
--forwarded-allow-ips="*" \
|
|
--worker-class uvicorn_worker.UvicornWorker \
|
|
--workers "${WEB_SERVER_CONCURRENCY:-1}" \
|
|
--timeout "${WEB_SERVER_TIMEOUT:-300}" \
|
|
--keep-alive "${WEB_SERVER_KEEPALIVE:-2}" \
|
|
--max-requests "${WEB_SERVER_MAX_REQUESTS:-1000}" \
|
|
--max-requests-jitter "${WEB_SERVER_MAX_REQUESTS_JITTER:-100}" \
|
|
--worker-connections "${WEB_SERVER_WORKER_CONNECTIONS:-1000}" \
|
|
--error-logfile - \
|
|
--log-config /etc/gunicorn/logging.conf \
|
|
main:app &
|
|
}
|
|
|
|
# Commands to start nginx (handling PID creation internally)
|
|
start_bin_nginx() {
|
|
wait_for_gunicorn_socket
|
|
|
|
info_log "Starting nginx"
|
|
if [[ ${EUID} -ne 0 ]]; then
|
|
nginx
|
|
else
|
|
# if container runs as root, drop permissions
|
|
nginx -g 'user romm;'
|
|
fi
|
|
|
|
: "${ROMM_BASE_URL:=http://0.0.0.0:8080}"
|
|
info_log "🚀 RomM is now available at ${ROMM_BASE_URL}"
|
|
}
|
|
|
|
# Commands to start valkey-server (handling PID creation internally)
|
|
start_bin_valkey-server() {
|
|
info_log "Starting internal valkey"
|
|
|
|
if [[ -f /usr/local/etc/valkey/valkey.conf ]]; then
|
|
if [[ ${LOGLEVEL} == "DEBUG" ]]; then
|
|
valkey-server /usr/local/etc/valkey/valkey.conf &
|
|
else
|
|
valkey-server /usr/local/etc/valkey/valkey.conf >/dev/null 2>&1 &
|
|
fi
|
|
else
|
|
if [[ ${LOGLEVEL} == "DEBUG" ]]; then
|
|
valkey-server --dir /redis-data &
|
|
else
|
|
valkey-server --dir /redis-data >/dev/null 2>&1 &
|
|
fi
|
|
fi
|
|
|
|
VALKEY_PID=$!
|
|
echo "${VALKEY_PID}" >/tmp/valkey-server.pid
|
|
|
|
local host="127.0.0.1"
|
|
local port="6379"
|
|
local max_retries=120
|
|
local retry=0
|
|
|
|
debug_log "Waiting for internal valkey to be ready..."
|
|
|
|
# Temporarily disable errexit for this part of the script
|
|
set +o errexit
|
|
|
|
while ((retry < max_retries)); do
|
|
# Attempt to check if valkey TCP port is open
|
|
if (echo >/dev/tcp/"${host}"/"${port}") 2>/dev/null; then
|
|
debug_log "Internal valkey is ready and accepting connections"
|
|
set -o errexit # Re-enable errexit after success
|
|
return 0
|
|
fi
|
|
|
|
sleep 0.5
|
|
((retry++))
|
|
done
|
|
|
|
error_log "Internal valkey did not become ready after $((max_retries * 500))ms"
|
|
}
|
|
|
|
# Commands to start RQ scheduler
|
|
start_bin_rq_scheduler() {
|
|
info_log "Starting RQ scheduler"
|
|
|
|
RQ_REDIS_HOST=${REDIS_HOST:-127.0.0.1} \
|
|
RQ_REDIS_PORT=${REDIS_PORT:-6379} \
|
|
RQ_REDIS_USERNAME=${REDIS_USERNAME:-""} \
|
|
RQ_REDIS_PASSWORD=${REDIS_PASSWORD:-""} \
|
|
RQ_REDIS_DB=${REDIS_DB:-0} \
|
|
RQ_REDIS_SSL=${REDIS_SSL:-0} \
|
|
rqscheduler \
|
|
--path /backend \
|
|
--pid /tmp/rq_scheduler.pid &
|
|
}
|
|
|
|
# Commands to start RQ worker
|
|
start_bin_rq_worker() {
|
|
info_log "Starting RQ worker"
|
|
|
|
# Build Redis URL properly
|
|
local redis_url
|
|
if [[ -n ${REDIS_PASSWORD-} ]]; then
|
|
redis_url="redis${REDIS_SSL:+s}://${REDIS_USERNAME-}:${REDIS_PASSWORD}@${REDIS_HOST:-127.0.0.1}:${REDIS_PORT:-6379}/${REDIS_DB:-0}"
|
|
elif [[ -n ${REDIS_USERNAME-} ]]; then
|
|
redis_url="redis${REDIS_SSL:+s}://${REDIS_USERNAME}@${REDIS_HOST:-127.0.0.1}:${REDIS_PORT:-6379}/${REDIS_DB:-0}"
|
|
else
|
|
redis_url="redis${REDIS_SSL:+s}://${REDIS_HOST:-127.0.0.1}:${REDIS_PORT:-6379}/${REDIS_DB:-0}"
|
|
fi
|
|
|
|
# Set PYTHONPATH so RQ can find the tasks module
|
|
PYTHONPATH="/backend:${PYTHONPATH-}" rq worker \
|
|
--path /backend \
|
|
--pid /tmp/rq_worker.pid \
|
|
--url "${redis_url}" \
|
|
high default low &
|
|
}
|
|
|
|
start_bin_watcher() {
|
|
info_log "Starting watcher"
|
|
watchfiles \
|
|
--target-type command \
|
|
"opentelemetry-instrument --service_name '${OTEL_SERVICE_NAME_PREFIX-}watcher' python3 watcher.py" \
|
|
/romm/library &
|
|
WATCHER_PID=$!
|
|
echo "${WATCHER_PID}" >/tmp/watcher.pid
|
|
}
|
|
|
|
watchdog_process_pid() {
|
|
PROCESS=$1
|
|
if [[ -f "/tmp/${PROCESS}.pid" ]]; then
|
|
# Check if the pid we last wrote to our state file is actually active
|
|
PID=$(cat "/tmp/${PROCESS}.pid") || true
|
|
if [[ ! -d "/proc/${PID}" ]]; then
|
|
start_bin_"${PROCESS}"
|
|
fi
|
|
else
|
|
# Start process if we dont have a corresponding PID file
|
|
start_bin_"${PROCESS}"
|
|
fi
|
|
}
|
|
|
|
stop_process_pid() {
|
|
PROCESS=$1
|
|
if [[ -f "/tmp/${PROCESS}.pid" ]]; then
|
|
PID=$(cat "/tmp/${PROCESS}.pid") || true
|
|
if [[ -d "/proc/${PID}" ]]; then
|
|
info_log "Stopping ${PROCESS}"
|
|
kill "${PID}" || true
|
|
# wait for process exit
|
|
while [[ -e "/proc/${PID}" ]]; do sleep 0.1; done
|
|
fi
|
|
fi
|
|
}
|
|
|
|
shutdown() {
|
|
# shutdown in reverse order
|
|
stop_process_pid rq_worker
|
|
stop_process_pid rq_scheduler
|
|
stop_process_pid watcher
|
|
stop_process_pid nginx
|
|
stop_process_pid gunicorn
|
|
stop_process_pid valkey-server
|
|
}
|
|
|
|
# switch to backend directory
|
|
cd /backend || { error_log "/backend directory doesn't seem to exist"; }
|
|
|
|
print_banner
|
|
|
|
# setup trap handler
|
|
exited=0
|
|
trap 'exited=1 && shutdown' SIGINT SIGTERM EXIT
|
|
|
|
# clear any leftover PID files
|
|
rm /tmp/*.pid -f
|
|
|
|
# Disable OpenTelemetry if no OTEL_ prefixed environment variables are set
|
|
if ! printenv | grep -q '^OTEL_'; then
|
|
info_log "No OpenTelemetry environment variables found, disabling OpenTelemetry SDK"
|
|
export OTEL_SDK_DISABLED=true
|
|
fi
|
|
|
|
# Set ROMM_AUTH_SECRET_KEY if not already set
|
|
if [[ -z ${ROMM_AUTH_SECRET_KEY} ]]; then
|
|
ROMM_AUTH_SECRET_KEY=$(python3 -c "import secrets; print(secrets.token_hex(32))")
|
|
info_log "ROMM_AUTH_SECRET_KEY not set, generating random secret key"
|
|
export ROMM_AUTH_SECRET_KEY
|
|
fi
|
|
|
|
# Start Valkey server if REDIS_HOST is not set (which would mean user is using an external Redis/Valkey)
|
|
if [[ -z ${REDIS_HOST} ]]; then
|
|
watchdog_process_pid valkey-server
|
|
else
|
|
info_log "REDIS_HOST is set, not starting internal valkey-server"
|
|
fi
|
|
|
|
# Run needed database migrations once at startup
|
|
info_log "Running database migrations"
|
|
if alembic upgrade head; then
|
|
info_log "Database migrations succeeded"
|
|
else
|
|
error_log "Failed to run database migrations"
|
|
fi
|
|
|
|
# Startup process requires database and cache to be already available
|
|
run_startup
|
|
|
|
# main loop
|
|
while ! ((exited)); do
|
|
watchdog_process_pid gunicorn
|
|
|
|
# only start the scheduler if enabled
|
|
if [[ ${ENABLE_SCHEDULED_RESCAN} == "true" || ${ENABLE_SCHEDULED_UPDATE_SWITCH_TITLEDB} == "true" || ${ENABLE_SCHEDULED_UPDATE_LAUNCHBOX_METADATA} == "true" ]]; then
|
|
watchdog_process_pid rq_scheduler
|
|
fi
|
|
|
|
watchdog_process_pid rq_worker
|
|
|
|
# only start the watcher if enabled
|
|
if [[ ${ENABLE_RESCAN_ON_FILESYSTEM_CHANGE} == "true" ]]; then
|
|
watchdog_process_pid watcher
|
|
fi
|
|
|
|
watchdog_process_pid nginx
|
|
|
|
# check for died processes every 5 seconds
|
|
sleep 5
|
|
done
|