mirror of
https://github.com/linuxserver/docker-mods.git
synced 2026-03-23 00:05:28 +08:00
swag:auto-uptime-kuma Initial release of the mod
This commit is contained in:
parent
cc80a43d42
commit
dbbeba055f
4
.github/workflows/BuildImage.yml
vendored
4
.github/workflows/BuildImage.yml
vendored
@ -5,8 +5,8 @@ on: [push, pull_request_target, workflow_dispatch]
|
||||
env:
|
||||
GITHUB_REPO: "linuxserver/docker-mods" #don't modify
|
||||
ENDPOINT: "linuxserver/mods" #don't modify
|
||||
BASEIMAGE: "replace_baseimage" #replace
|
||||
MODNAME: "replace_modname" #replace
|
||||
BASEIMAGE: "swag" #replace
|
||||
MODNAME: "auto-uptime-kuma" #replace
|
||||
|
||||
jobs:
|
||||
set-vars:
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
FROM scratch
|
||||
|
||||
LABEL maintainer="username"
|
||||
LABEL maintainer="labmonkey"
|
||||
|
||||
# copy local files
|
||||
COPY root/ /
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
## Buildstage ##
|
||||
FROM ghcr.io/linuxserver/baseimage-alpine:3.17 as buildstage
|
||||
|
||||
RUN \
|
||||
echo "**** install packages ****" && \
|
||||
apk add --no-cache \
|
||||
curl && \
|
||||
echo "**** grab rclone ****" && \
|
||||
mkdir -p /root-layer && \
|
||||
curl -o \
|
||||
/root-layer/rclone.deb -L \
|
||||
"https://downloads.rclone.org/v1.47.0/rclone-v1.47.0-linux-amd64.deb"
|
||||
|
||||
# copy local files
|
||||
COPY root/ /root-layer/
|
||||
|
||||
## Single layer deployed image ##
|
||||
FROM scratch
|
||||
|
||||
LABEL maintainer="username"
|
||||
|
||||
# Add files from buildstage
|
||||
COPY --from=buildstage /root-layer/ /
|
||||
61
README.md
61
README.md
@ -1,25 +1,52 @@
|
||||
# Rsync - Docker mod for openssh-server
|
||||
# Auto Uptime Kuma - Docker mod for SWAG
|
||||
|
||||
This mod adds rsync to openssh-server, to be installed/updated during container start.
|
||||
This mod gives SWAG the ability to automatically add Uptime Kuma "Monitors" for the running containers. Ultimately it will allow you with a very low effort to setup notifications whenever any of your services goes down etc.
|
||||
|
||||
In openssh-server docker arguments, set an environment variable `DOCKER_MODS=linuxserver/mods:openssh-server-rsync`
|
||||
## Requirements
|
||||
|
||||
If adding multiple mods, enter them in an array separated by `|`, such as `DOCKER_MODS=linuxserver/mods:openssh-server-rsync|linuxserver/mods:openssh-server-mod2`
|
||||
Running [Uptime Kuma](https://github.com/louislam/uptime-kuma) instance with `username` and `password` configured. The container should be in the same [user defined bridge network](https://docs.linuxserver.io/general/swag#docker-networking) as SWAG.
|
||||
|
||||
# Mod creation instructions
|
||||
## Installation
|
||||
|
||||
* Fork the repo, create a new branch based on the branch `template`.
|
||||
* Edit the `Dockerfile` for the mod. `Dockerfile.complex` is only an example and included for reference; it should be deleted when done.
|
||||
* Inspect the `root` folder contents. Edit, add and remove as necessary.
|
||||
* After all init scripts and services are created, run `find ./ -path "./.git" -prune -o \( -name "run" -o -name "finish" -o -name "check" \) -not -perm -u=x,g=x,o=x -print -exec chmod +x {} +` to fix permissions.
|
||||
* Edit this readme with pertinent info, delete these instructions.
|
||||
* Finally edit the `.github/workflows/BuildImage.yml`. Customize the vars for `BASEIMAGE` and `MODNAME`. Set the versioning logic if needed.
|
||||
* Ask the team to create a new branch named `<baseimagename>-<modname>`. Baseimage should be the name of the image the mod will be applied to. The new branch will be based on the `template` branch.
|
||||
* Submit PR against the branch created by the team.
|
||||
In SWAG docker arguments, set an environment variable `DOCKER_MODS=linuxserver/mods:swag-auto-uptime-kuma`.
|
||||
|
||||
Add additional environment variables to the SWAG docker image:
|
||||
|
||||
## Tips and tricks
|
||||
| Name | Required | Example | Description |
|
||||
| ---------------------- | -------- | -------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `UPTIME_KUMA_URL` | Yes | `http://uptime-kuma:3001/` | The URL to the Uptime Kuma instance. Please note this cannot be the domain that it configured by SWAG as during initialization phase of the container those domains are not yet available. Instead use the docker container name. |
|
||||
| `UPTIME_KUMA_USERNAME` | Yes | `admin` | Your Uptime Kuma username |
|
||||
| `UPTIME_KUMA_PASSWORD` | Yes | `password` | Your Uptime Kuma password |
|
||||
|
||||
* Some images have helpers built in, these images are currently:
|
||||
* [Openvscode-server](https://github.com/linuxserver/docker-openvscode-server/pull/10/files)
|
||||
* [Code-server](https://github.com/linuxserver/docker-code-server/pull/95)
|
||||
Unfortunately Uptime Kuma does not provide API keys for it's Socket.io API at the moment and Username/Password have to be used.
|
||||
|
||||
Finally, add `swag.uptime-kuma.enabled=true` label at minimum to each of your containers that you wish to monitor. More types of labels are listed in next section.
|
||||
|
||||
## Labels
|
||||
|
||||
This mod is utilizing the wonderful [Uptime Kuma API](https://github.com/lucasheld/uptime-kuma-api) library. It allows you configure nearly every property of the Monitors by defining Docker Labels. For detailed documentation of each of these properties please refer to the `add_monitor` endpoint in the [official documentation](https://uptime-kuma-api.readthedocs.io/en/latest/api.html#uptime_kuma_api.UptimeKumaApi.add_monitor).
|
||||
|
||||
| Label | Default Value | Example Value | Description |
|
||||
| -------------------------------------- | ------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `swag.uptime-kuma.enabled` | `false` | `true` | The only required label for the minimal setup. It allows the mod to detect the container and configure monitor. |
|
||||
| `swag.uptime-kuma.monitor.name` | `{containerName}` | Radarr <br> Jellyfin Public | By default the name of the Monitor will be value of the Docker container name transformed to start with uppercase letter. |
|
||||
| `swag.uptime-kuma.monitor.url` | `https://{containerName}.{domainName}` | `https://radarr.domain.com/` <br> `https://pihole.domain.com/admin/` | By default the URL of each container if build based of the actual container name (`{containerName}`) defined in docker and the value of `URL` environment variable (`{domainName}`) defined in SWAG (as required by SWAG itself). |
|
||||
| `swag.uptime-kuma.monitor.type` | http | http | While technically possible to override the monitor type the purpose of this mod is to monitor HTTP endpoints. |
|
||||
| `swag.uptime-kuma.monitor. description` | Automatically generated by SWAG auto-uptime-kuma | My own description | The description is only for informational purposes and can be freely changed |
|
||||
| `swag.uptime-kuma.monitor.*` | | `swag.uptime-kuma.monitor. maxretries=5` <br> `swag.uptime-kuma.monitor. accepted_statuscodes= 200-299,404,501` | There are many more properties to configure. The fact that aything can be changed does not mean that it should. Some properties or combinations could not work and should be changed only if you know what you are doing. Please check the [Uptime Kuma API](https://uptime-kuma-api.readthedocs.io/en/latest/api.html#uptime_kuma_api.UptimeKumaApi.add_monitor) for more examples. Properties that are expected to be lists should be separated by comma `,` |
|
||||
|
||||
## Notifications
|
||||
|
||||
While ultimately this mod makes it easier to setup notifications for your docker containers it does not configure more than Uptime Kuma Monitors. In order to receive Notifications you should configure them manually and then either enable one type to be default for all your Monitors or specify the Notifications by using the `swag.uptime-kuma.monitor.notificationIDList` label.
|
||||
|
||||
## Known Limitations
|
||||
|
||||
- At the moment this mod does *NOT* monitor your docker containers for changes. This means that whenever you change any of your labels or remove a container and wish to no longer monitor it then the changes will *NOT* be applied to Uptime Kuma in real time. In order to reload the changes you have two options:
|
||||
- Restart the `swag` container. This will run initialization scripts again and reload all the changes (add/delete/update monitors)
|
||||
- Run the script manually which is the following command via `ssh`: `docker exec swag python3 /app/auto-uptime-kuma.py` (where `swag` is your container name of the SWAG instance).
|
||||
|
||||
- Due to limitations of the Uptime Kuma API whenever you make changes to your container or labels that already have a Monior setup then the **Update** action will be performed by running **Delete** followed by **Add**. What it means that all changes will result in a new Monitor for the same container that will lose history of the heartbeats, all manual changes and get a new 'id' number.
|
||||
|
||||
## Purge data
|
||||
|
||||
For the purpose of development or simply if you feel that you want to purge all the Monitors and files created by this mod you can run following command via `ssh`: `docker exec swag python3 /app/auto-uptime-kuma.py -purge` (where `swag` is your container name of the SWAG instance).
|
||||
|
||||
68
root/app/auto-uptime-kuma.py
Normal file
68
root/app/auto-uptime-kuma.py
Normal file
@ -0,0 +1,68 @@
|
||||
from swagDocker import SwagDocker
|
||||
from swagUptimeKuma import SwagUptimeKuma
|
||||
import sys
|
||||
import argparse
|
||||
import os
|
||||
|
||||
|
||||
def parseCommandLine():
|
||||
"""
|
||||
Different application behavior if executed from CLI
|
||||
"""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-purge', action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
if (args.purge == True):
|
||||
swagUptimeKuma.purgeData()
|
||||
swagUptimeKuma.disconnect()
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def addOrUpdateMonitors(domainName, swagContainers):
|
||||
for swagContainer in swagContainers:
|
||||
containerConfig = swagDocker.parseContainerLabels(
|
||||
swagContainer.labels, ".monitor.")
|
||||
containerName = swagContainer.name
|
||||
monitorData = swagUptimeKuma.parseMonitorData(
|
||||
containerName, domainName, containerConfig)
|
||||
|
||||
if (not swagUptimeKuma.monitorExists(containerName)):
|
||||
swagUptimeKuma.addMonitor(containerName, domainName, monitorData)
|
||||
else:
|
||||
swagUptimeKuma.updateMonitor(
|
||||
containerName, domainName, monitorData)
|
||||
|
||||
|
||||
def getMonitorsToBeRemoved(swagContainers, apiMonitors):
|
||||
# Monitors to be removed are those that no longer have an existing container
|
||||
# Monitor <-> Container link is done by comparing the container name with the monitor swag tag value
|
||||
existingMonitorNames = [swagUptimeKuma.getMonitorSwagTagValue(
|
||||
monitor) for monitor in apiMonitors]
|
||||
existingContainerNames = [container.name for container in swagContainers]
|
||||
|
||||
monitorsToBeRemoved = [
|
||||
containerName for containerName in existingMonitorNames if containerName not in existingContainerNames]
|
||||
return monitorsToBeRemoved
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
url = os.environ['UPTIME_KUMA_URL']
|
||||
username = os.environ['UPTIME_KUMA_USERNAME']
|
||||
password = os.environ['UPTIME_KUMA_PASSWORD']
|
||||
domainName = os.environ['URL']
|
||||
|
||||
swagDocker = SwagDocker("swag.uptime-kuma")
|
||||
swagUptimeKuma = SwagUptimeKuma(url, username, password)
|
||||
|
||||
parseCommandLine()
|
||||
|
||||
swagContainers = swagDocker.getSwagContainers()
|
||||
|
||||
addOrUpdateMonitors(domainName, swagContainers)
|
||||
|
||||
monitorsToBeRemoved = getMonitorsToBeRemoved(
|
||||
swagContainers, swagUptimeKuma.apiMonitors)
|
||||
swagUptimeKuma.deleteMonitors(monitorsToBeRemoved)
|
||||
|
||||
swagUptimeKuma.disconnect()
|
||||
20
root/app/helpers.py
Normal file
20
root/app/helpers.py
Normal file
@ -0,0 +1,20 @@
|
||||
def has_key_with_value(dictionary, key, value):
|
||||
return key in dictionary and dictionary[key] == value
|
||||
|
||||
|
||||
def merge_dicts(*dict_args):
|
||||
result = {}
|
||||
for dictionary in dict_args:
|
||||
result.update(dictionary)
|
||||
return result
|
||||
|
||||
|
||||
def write_file(filename, content):
|
||||
with open(filename, 'w+') as file:
|
||||
file.write(content)
|
||||
|
||||
|
||||
def read_file(filename):
|
||||
with open(filename, 'r') as file:
|
||||
content = file.read()
|
||||
return content
|
||||
50
root/app/swagDocker.py
Normal file
50
root/app/swagDocker.py
Normal file
@ -0,0 +1,50 @@
|
||||
import docker
|
||||
|
||||
|
||||
class SwagDocker:
|
||||
"""
|
||||
A service class for interacting with Docker containers that are used by SWAG mods.
|
||||
"""
|
||||
|
||||
client = None
|
||||
_containers = None
|
||||
_labelPrefix = None
|
||||
|
||||
def __init__(self, labelPrefix: str):
|
||||
self._labelPrefix = labelPrefix
|
||||
self.client = docker.from_env()
|
||||
|
||||
def getSwagContainers(self):
|
||||
"""
|
||||
Retrieve Docker containers filtered by "swag.my_mod.enabled=true":
|
||||
>>> swag = SwagDocker("swag.my_mod")
|
||||
>>> containers = swag.getSwagContainers()
|
||||
"""
|
||||
if self._containers is None:
|
||||
self._containers = self.client.containers.list(
|
||||
filters={"label": [f"{self._labelPrefix}.enabled=true"]})
|
||||
return self._containers
|
||||
|
||||
def parseContainerLabels(self, containerLabels, extraPrefix=""):
|
||||
"""
|
||||
Having following example container labels:
|
||||
swag.my_mod.enabled: true
|
||||
swag.my_mod.config.apple: "123"
|
||||
swag.my_mod.config.orange: "456"
|
||||
|
||||
>>> for container in containers:
|
||||
>>> containerConfigA = swagDocker.parseContainerLabels(container.labels)
|
||||
# Above will return {"enabled": true, "config.apple": "123", "config.orange": "456"}
|
||||
>>> containerConfigB = swagDocker.parseContainerLabels(container.labels, ".config.")
|
||||
# Above will return {"apple": "123", "orange": "456"}
|
||||
"""
|
||||
filteredContainerLabels = {}
|
||||
fullPrefix = f"{self._labelPrefix}{extraPrefix}"
|
||||
prefix_length = len(fullPrefix)
|
||||
|
||||
for label, value in containerLabels.items():
|
||||
if label.startswith(fullPrefix):
|
||||
parsedLabel = label[prefix_length:]
|
||||
filteredContainerLabels[parsedLabel] = value
|
||||
|
||||
return filteredContainerLabels
|
||||
185
root/app/swagUptimeKuma.py
Normal file
185
root/app/swagUptimeKuma.py
Normal file
@ -0,0 +1,185 @@
|
||||
from uptime_kuma_api.api import UptimeKumaApi, MonitorType
|
||||
from helpers import *
|
||||
import os
|
||||
|
||||
logPrefix = "[mod-auto-uptime-kuma]"
|
||||
|
||||
|
||||
class SwagUptimeKuma:
|
||||
swagTagName = "swag"
|
||||
swagUptimeKumaConfigDir = "/auto-uptime-kuma"
|
||||
|
||||
_api = None
|
||||
_apiSwagTag = None
|
||||
apiMonitors = None
|
||||
|
||||
defaultMonitorConfig = dict(
|
||||
type=MonitorType.HTTP,
|
||||
description="Automatically generated by SWAG auto-uptime-kuma"
|
||||
)
|
||||
|
||||
def __init__(self, url, username, password):
|
||||
self._api = UptimeKumaApi(url)
|
||||
self._api.login(username, password)
|
||||
self.apiMonitors = self._api.get_monitors()
|
||||
|
||||
if not os.path.exists(self.swagUptimeKumaConfigDir):
|
||||
print(
|
||||
f"{logPrefix} Creating config directory '{self.swagUptimeKumaConfigDir}'")
|
||||
os.makedirs(self.swagUptimeKumaConfigDir)
|
||||
|
||||
def disconnect(self):
|
||||
"""
|
||||
API has to be disconnected at the end as the connection is blocking
|
||||
"""
|
||||
self._api.disconnect()
|
||||
|
||||
def getSwagTag(self):
|
||||
"""
|
||||
The "swag" tag is used to detect in API which monitors were created using this script.
|
||||
"""
|
||||
# If the tag was not fetched yet
|
||||
if (self._apiSwagTag == None):
|
||||
for tag in self._api.get_tags():
|
||||
if (tag['name'] == self.swagTagName):
|
||||
self._apiSwagTag = tag
|
||||
break
|
||||
|
||||
# If the tag was not in API then it has to be created
|
||||
if (self._apiSwagTag == None):
|
||||
self._apiSwagTag = self._api.add_tag(
|
||||
name=self.swagTagName, color="#ff4f97")
|
||||
|
||||
return self._apiSwagTag
|
||||
|
||||
def parseMonitorData(self, containerName, domainName, monitorData):
|
||||
"""
|
||||
Some of the container label values might have to be converted before sending to API.
|
||||
Additionally merge default config with label config.
|
||||
"""
|
||||
# Convert strings that are lists in API
|
||||
for key in ["accepted_statuscodes", "notificationIDList"]:
|
||||
if (key in monitorData and type(monitorData[key]) is str):
|
||||
monitorData[key] = monitorData[key].split(",")
|
||||
|
||||
dynamicMonitorConfig = {
|
||||
"name": containerName.title(),
|
||||
"url": f"https://{containerName}.{domainName}"
|
||||
}
|
||||
|
||||
return merge_dicts(self.defaultMonitorConfig, dynamicMonitorConfig, monitorData)
|
||||
|
||||
def addMonitor(self, containerName, domainName, monitorData):
|
||||
monitorData = self.parseMonitorData(
|
||||
containerName, domainName, monitorData)
|
||||
if (has_key_with_value(self.apiMonitors, "name", monitorData['name'])):
|
||||
print(
|
||||
f"{logPrefix} Uptime Kuma already contains '{monitorData['name']}' monitor, skipping...")
|
||||
return
|
||||
|
||||
print(
|
||||
f"{logPrefix} Adding monitor '{monitorData['name']}'")
|
||||
|
||||
monitor = self._api.add_monitor(**monitorData)
|
||||
|
||||
self._api.add_monitor_tag(
|
||||
tag_id=self.getSwagTag()['id'],
|
||||
monitor_id=monitor['monitorID'],
|
||||
value=containerName
|
||||
)
|
||||
|
||||
content = self.buildContainerConfigContent(monitorData)
|
||||
write_file(
|
||||
f"{self.swagUptimeKumaConfigDir}/{containerName}.conf", content)
|
||||
|
||||
def deleteMonitor(self, containerName):
|
||||
monitorData = self.getMonitor(containerName)
|
||||
print(
|
||||
f"{logPrefix} Deleting monitor {monitorData['id']}:{monitorData['name']}")
|
||||
self._api.delete_monitor(monitorData['id'])
|
||||
|
||||
def deleteMonitors(self, containerNames):
|
||||
print(f"{logPrefix} Deleting all monitors that had their containers removed")
|
||||
if (containerNames):
|
||||
for containerName in containerNames:
|
||||
self.deleteMonitor(containerName)
|
||||
else:
|
||||
print(f"{logPrefix} Nothing to remove")
|
||||
|
||||
def updateMonitor(self, containerName, domainName, monitorData):
|
||||
"""
|
||||
Please not that due to API limitations the "update" action is actually "delete" followed by "add"
|
||||
so that in the end the monitors are actually recreated
|
||||
"""
|
||||
newContent = self.buildContainerConfigContent(monitorData)
|
||||
oldContent = self.readContainerConfigContent(containerName)
|
||||
existingMonitorData = self.getMonitor(containerName)
|
||||
|
||||
if (not oldContent == newContent):
|
||||
print(
|
||||
f"{logPrefix} Updating (Delete and Add) monitor {existingMonitorData['id']}:{existingMonitorData['name']}")
|
||||
self.deleteMonitor(containerName)
|
||||
self.addMonitor(containerName, domainName, monitorData)
|
||||
else:
|
||||
print(
|
||||
f"{logPrefix} Monitor {existingMonitorData['id']}:{existingMonitorData['name']} is unchanged, skipping...")
|
||||
|
||||
def buildContainerConfigContent(self, monitorData):
|
||||
"""
|
||||
In order to compare if container labels were changed the contents are stored in config files for each container.
|
||||
"""
|
||||
content = ""
|
||||
for key, value in monitorData.items():
|
||||
content += f'{key}={value}\n'
|
||||
return content.strip()
|
||||
|
||||
def readContainerConfigContent(self, containerName):
|
||||
fileName = f"{self.swagUptimeKumaConfigDir}/{containerName}.conf"
|
||||
if (not os.path.exists(fileName)):
|
||||
return ""
|
||||
|
||||
return read_file(fileName).strip()
|
||||
|
||||
def getMonitor(self, containerName):
|
||||
for monitor in self.apiMonitors:
|
||||
swagTagValue = self.getMonitorSwagTagValue(monitor)
|
||||
if (swagTagValue != None and swagTagValue == containerName):
|
||||
return monitor
|
||||
return None
|
||||
|
||||
def monitorExists(self, containerName):
|
||||
return True if self.getMonitor(containerName) else False
|
||||
|
||||
def getMonitorSwagTagValue(self, monitorData):
|
||||
"""
|
||||
This value is container name itself. Used to link containers with monitors
|
||||
"""
|
||||
for tag in monitorData.get('tags'):
|
||||
if (has_key_with_value(tag, "name", self.swagTagName)):
|
||||
return tag['value']
|
||||
return None
|
||||
|
||||
def purgeData(self):
|
||||
"""
|
||||
Removes all of the monitors and files created with this script
|
||||
"""
|
||||
print(f"{logPrefix} Purging all monitors added by swag")
|
||||
|
||||
for monitor in self.apiMonitors:
|
||||
containerName = self.getMonitorSwagTagValue(monitor)
|
||||
if (containerName != None):
|
||||
self.deleteMonitor(containerName)
|
||||
|
||||
if os.path.exists(self.swagUptimeKumaConfigDir):
|
||||
print(
|
||||
f"{logPrefix} Purging config directory '{self.swagUptimeKumaConfigDir}'")
|
||||
file_list = os.listdir(self.swagUptimeKumaConfigDir)
|
||||
|
||||
for filename in file_list:
|
||||
file_path = os.path.join(
|
||||
self.swagUptimeKumaConfigDir, filename)
|
||||
if os.path.isfile(file_path):
|
||||
os.remove(file_path)
|
||||
print(f"{logPrefix} Removed '{file_path}' file")
|
||||
|
||||
print(f"{logPrefix} Purging finished")
|
||||
@ -1,30 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# This is the init file used for adding os or pip packages to install lists.
|
||||
# It takes advantage of the built-in init-mods-package-install init script that comes with the baseimages.
|
||||
# If using this, we need to make sure we set this init as a dependency of init-mods-package-install so this one runs first
|
||||
|
||||
if ! command -v apprise; then
|
||||
echo "**** Adding apprise and its deps to package install lists ****"
|
||||
echo "apprise" >> /mod-pip-packages-to-install.list
|
||||
## Ubuntu
|
||||
if [ -f /usr/bin/apt ]; then
|
||||
echo "\
|
||||
python3 \
|
||||
python3-pip \
|
||||
runc" >> /mod-repo-packages-to-install.list
|
||||
fi
|
||||
# Alpine
|
||||
if [ -f /sbin/apk ]; then
|
||||
echo "\
|
||||
cargo \
|
||||
libffi-dev \
|
||||
openssl-dev \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3 \
|
||||
py3-pip" >> /mod-repo-packages-to-install.list
|
||||
fi
|
||||
else
|
||||
echo "**** apprise already installed, skipping ****"
|
||||
fi
|
||||
@ -1 +0,0 @@
|
||||
oneshot
|
||||
@ -1 +0,0 @@
|
||||
/etc/s6-overlay/s6-rc.d/init-mod-imagename-modname-add-package/run
|
||||
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# This is an install script that is designed to run after init-mods-package-install
|
||||
# so it can take advantage of packages installed
|
||||
# init-mods-end depends on this script so that later init and services wait until this script exits
|
||||
|
||||
echo "**** Setting up apprise ****"
|
||||
apprise blah blah
|
||||
@ -1 +0,0 @@
|
||||
oneshot
|
||||
@ -1 +0,0 @@
|
||||
/etc/s6-overlay/s6-rc.d/init-mod-imagename-modname-install/run
|
||||
12
root/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-add-package/run
Executable file
12
root/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-add-package/run
Executable file
@ -0,0 +1,12 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
echo "[mod-swag-auto-uptime-kuma] Installing SWAG auto-uptime-kuma packages"
|
||||
|
||||
if ! pip list 2>&1 | grep -q "uptime-kuma-api\|docker"; then
|
||||
echo "\
|
||||
docker \
|
||||
uptime-kuma-api" >> /mod-pip-packages-to-install.list
|
||||
echo "[mod-swag-auto-uptime-kuma] Successfuly installed packages"
|
||||
else
|
||||
echo "[mod-swag-auto-uptime-kuma] Packages already installed, skipping..."
|
||||
fi
|
||||
@ -0,0 +1 @@
|
||||
oneshot
|
||||
@ -0,0 +1 @@
|
||||
/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-add-package/run
|
||||
16
root/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-install/run
Executable file
16
root/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-install/run
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
if [ -z "$UPTIME_KUMA_URL" ] || [ -z "$UPTIME_KUMA_USERNAME" ] || [ -z "$UPTIME_KUMA_PASSWORD" ]; then
|
||||
echo "[mod-swag-auto-uptime-kuma] Missing required environment variables. Please refer to the Readme, skipping..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "[mod-swag-auto-uptime-kuma] Executing SWAG auto-uptime-kuma mod"
|
||||
|
||||
scriptPath='/app/auto-uptime-kuma.py'
|
||||
|
||||
if [ -e "$scriptPath" ] && [ ! -x "$scriptPath" ]; then
|
||||
chmod +x "$scriptPath"
|
||||
fi
|
||||
|
||||
python3 $scriptPath
|
||||
@ -0,0 +1 @@
|
||||
oneshot
|
||||
@ -0,0 +1 @@
|
||||
/etc/s6-overlay/s6-rc.d/init-mod-swag-auto-uptime-kuma-install/run
|
||||
@ -1,7 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# This is an example service that would run for the mod
|
||||
# It depends on init-services, the baseimage hook for start of all longrun services
|
||||
|
||||
exec \
|
||||
s6-setuidgid abc run my app
|
||||
@ -1 +0,0 @@
|
||||
longrun
|
||||
Loading…
x
Reference in New Issue
Block a user