Compare commits

..

No commits in common. "master" and "4.95.3-ls246" have entirely different histories.

20 changed files with 652 additions and 856 deletions

0
.editorconfig Normal file → Executable file
View File

2
.github/CONTRIBUTING.md vendored Normal file → Executable file
View File

@ -6,7 +6,7 @@
* Read, and fill the Pull Request template * Read, and fill the Pull Request template
* If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR * If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR
* If the PR is addressing an existing issue include, closes #\<issue number>, in the body of the PR commit message * If the PR is addressing an existing issue include, closes #\<issue number>, in the body of the PR commit message
* If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://linuxserver.io/discord) * If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://discord.gg/YWrKVTn)
## Common files ## Common files

0
.github/FUNDING.yml vendored Normal file → Executable file
View File

2
.github/ISSUE_TEMPLATE/config.yml vendored Normal file → Executable file
View File

@ -1,7 +1,7 @@
blank_issues_enabled: false blank_issues_enabled: false
contact_links: contact_links:
- name: Discord chat support - name: Discord chat support
url: https://linuxserver.io/discord url: https://discord.gg/YWrKVTn
about: Realtime support / chat with the community and the team. about: Realtime support / chat with the community and the team.
- name: Discourse discussion forum - name: Discourse discussion forum

0
.github/ISSUE_TEMPLATE/issue.bug.yml vendored Normal file → Executable file
View File

0
.github/ISSUE_TEMPLATE/issue.feature.yml vendored Normal file → Executable file
View File

3
.github/workflows/call_issue_pr_tracker.yml vendored Normal file → Executable file
View File

@ -8,9 +8,6 @@ on:
pull_request_review: pull_request_review:
types: [submitted,edited,dismissed] types: [submitted,edited,dismissed]
permissions:
contents: read
jobs: jobs:
manage-project: manage-project:
permissions: permissions:

3
.github/workflows/call_issues_cron.yml vendored Normal file → Executable file
View File

@ -4,9 +4,6 @@ on:
- cron: '31 1 * * *' - cron: '31 1 * * *'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
stale: stale:
permissions: permissions:

View File

@ -3,9 +3,6 @@ name: External Trigger Main
on: on:
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
external-trigger-master: external-trigger-master:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -18,10 +15,7 @@ jobs:
SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }} SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }}
run: | run: |
printf "# External trigger for docker-code-server\n\n" >> $GITHUB_STEP_SUMMARY printf "# External trigger for docker-code-server\n\n" >> $GITHUB_STEP_SUMMARY
if grep -q "^code-server_master_" <<< "${SKIP_EXTERNAL_TRIGGER}"; then if grep -q "^code-server_master" <<< "${SKIP_EXTERNAL_TRIGGER}"; then
echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY
echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` contains \`code-server_master_\`; will skip trigger if version matches." >> $GITHUB_STEP_SUMMARY
elif grep -q "^code-server_master" <<< "${SKIP_EXTERNAL_TRIGGER}"; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` contains \`code-server_master\`; skipping trigger." >> $GITHUB_STEP_SUMMARY echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` contains \`code-server_master\`; skipping trigger." >> $GITHUB_STEP_SUMMARY
exit 0 exit 0
@ -31,11 +25,6 @@ jobs:
printf "\n## Retrieving external version\n\n" >> $GITHUB_STEP_SUMMARY printf "\n## Retrieving external version\n\n" >> $GITHUB_STEP_SUMMARY
EXT_RELEASE=$(curl -u ${{ secrets.CR_USER }}:${{ secrets.CR_PAT }} -sX GET https://api.github.com/repos/coder/code-server/releases/latest | jq -r '.tag_name' | sed 's|^v||') EXT_RELEASE=$(curl -u ${{ secrets.CR_USER }}:${{ secrets.CR_PAT }} -sX GET https://api.github.com/repos/coder/code-server/releases/latest | jq -r '.tag_name' | sed 's|^v||')
echo "Type is \`custom_version_command\`" >> $GITHUB_STEP_SUMMARY echo "Type is \`custom_version_command\`" >> $GITHUB_STEP_SUMMARY
if grep -q "^code-server_master_${EXT_RELEASE}" <<< "${SKIP_EXTERNAL_TRIGGER}"; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` matches current external release; skipping trigger." >> $GITHUB_STEP_SUMMARY
exit 0
fi
if [ -z "${EXT_RELEASE}" ] || [ "${EXT_RELEASE}" == "null" ]; then if [ -z "${EXT_RELEASE}" ] || [ "${EXT_RELEASE}" == "null" ]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Can't retrieve external version, exiting" >> $GITHUB_STEP_SUMMARY echo "> Can't retrieve external version, exiting" >> $GITHUB_STEP_SUMMARY
@ -46,8 +35,8 @@ jobs:
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
exit 1 exit 1
fi fi
EXT_RELEASE_SANITIZED=$(echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g') EXT_RELEASE=$(echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g')
echo "Sanitized external version: \`${EXT_RELEASE_SANITIZED}\`" >> $GITHUB_STEP_SUMMARY echo "External version: \`${EXT_RELEASE}\`" >> $GITHUB_STEP_SUMMARY
echo "Retrieving last pushed version" >> $GITHUB_STEP_SUMMARY echo "Retrieving last pushed version" >> $GITHUB_STEP_SUMMARY
image="linuxserver/code-server" image="linuxserver/code-server"
tag="latest" tag="latest"
@ -59,30 +48,13 @@ jobs:
--header "Accept: application/vnd.oci.image.index.v1+json" \ --header "Accept: application/vnd.oci.image.index.v1+json" \
--header "Authorization: Bearer ${token}" \ --header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/manifests/${tag}") "https://ghcr.io/v2/${image}/manifests/${tag}")
if jq -e '.layers // empty' <<< "${multidigest}" >/dev/null 2>&1; then multidigest=$(jq -r ".manifests[] | select(.platform.architecture == \"amd64\").digest?" <<< "${multidigest}")
# If there's a layer element it's a single-arch manifest so just get that digest digest=$(curl -s \
digest=$(jq -r '.config.digest' <<< "${multidigest}") --header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
else --header "Accept: application/vnd.oci.image.manifest.v1+json" \
# Otherwise it's multi-arch or has manifest annotations --header "Authorization: Bearer ${token}" \
if jq -e '.manifests[]?.annotations // empty' <<< "${multidigest}" >/dev/null 2>&1; then "https://ghcr.io/v2/${image}/manifests/${multidigest}" \
# Check for manifest annotations and delete if found | jq -r '.config.digest')
multidigest=$(jq 'del(.manifests[] | select(.annotations))' <<< "${multidigest}")
fi
if [[ $(jq '.manifests | length' <<< "${multidigest}") -gt 1 ]]; then
# If there's still more than one digest, it's multi-arch
multidigest=$(jq -r ".manifests[] | select(.platform.architecture == \"amd64\").digest?" <<< "${multidigest}")
else
# Otherwise it's single arch
multidigest=$(jq -r ".manifests[].digest?" <<< "${multidigest}")
fi
if digest=$(curl -s \
--header "Accept: application/vnd.docker.distribution.manifest.v2+json" \
--header "Accept: application/vnd.oci.image.manifest.v1+json" \
--header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/manifests/${multidigest}"); then
digest=$(jq -r '.config.digest' <<< "${digest}");
fi
fi
image_info=$(curl -sL \ image_info=$(curl -sL \
--header "Authorization: Bearer ${token}" \ --header "Authorization: Bearer ${token}" \
"https://ghcr.io/v2/${image}/blobs/${digest}") "https://ghcr.io/v2/${image}/blobs/${digest}")
@ -103,8 +75,8 @@ jobs:
exit 1 exit 1
fi fi
echo "Last pushed version: \`${IMAGE_VERSION}\`" >> $GITHUB_STEP_SUMMARY echo "Last pushed version: \`${IMAGE_VERSION}\`" >> $GITHUB_STEP_SUMMARY
if [ "${EXT_RELEASE_SANITIZED}" == "${IMAGE_VERSION}" ]; then if [ "${EXT_RELEASE}" == "${IMAGE_VERSION}" ]; then
echo "Sanitized version \`${EXT_RELEASE_SANITIZED}\` already pushed, exiting" >> $GITHUB_STEP_SUMMARY echo "Version \`${EXT_RELEASE}\` already pushed, exiting" >> $GITHUB_STEP_SUMMARY
exit 0 exit 0
elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then
echo "New version \`${EXT_RELEASE}\` found; but there already seems to be an active build on Jenkins; exiting" >> $GITHUB_STEP_SUMMARY echo "New version \`${EXT_RELEASE}\` found; but there already seems to be an active build on Jenkins; exiting" >> $GITHUB_STEP_SUMMARY
@ -119,8 +91,8 @@ jobs:
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}
else else
printf "\n## Trigger new build\n\n" >> $GITHUB_STEP_SUMMARY printf "\n## Trigger new build\n\n" >> $GITHUB_STEP_SUMMARY
echo "New sanitized version \`${EXT_RELEASE_SANITIZED}\` found; old version was \`${IMAGE_VERSION}\`. Triggering new build" >> $GITHUB_STEP_SUMMARY echo "New version \`${EXT_RELEASE}\` found; old version was \`${IMAGE_VERSION}\`. Triggering new build" >> $GITHUB_STEP_SUMMARY
if [[ "${artifacts_found}" == "true" ]]; then if "${artifacts_found}" == "true" ]]; then
echo "All artifacts seem to be uploaded." >> $GITHUB_STEP_SUMMARY echo "All artifacts seem to be uploaded." >> $GITHUB_STEP_SUMMARY
fi fi
response=$(curl -iX POST \ response=$(curl -iX POST \
@ -139,7 +111,7 @@ jobs:
--data-urlencode "description=GHA external trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ --data-urlencode "description=GHA external trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
--data-urlencode "Submit=Submit" --data-urlencode "Submit=Submit"
echo "**** Notifying Discord ****" echo "**** Notifying Discord ****"
TRIGGER_REASON="A version change was detected for code-server tag latest. Old version:${IMAGE_VERSION} New version:${EXT_RELEASE_SANITIZED}" TRIGGER_REASON="A version change was detected for code-server tag latest. Old version:${IMAGE_VERSION} New version:${EXT_RELEASE}"
curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903,
"description": "**Build Triggered** \n**Reason:** '"${TRIGGER_REASON}"' \n**Build URL:** '"${buildurl}display/redirect"' \n"}], "description": "**Build Triggered** \n**Reason:** '"${TRIGGER_REASON}"' \n**Build URL:** '"${buildurl}display/redirect"' \n"}],
"username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }}

View File

@ -5,9 +5,6 @@ on:
- cron: '32 * * * *' - cron: '32 * * * *'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
external-trigger-scheduler: external-trigger-scheduler:
runs-on: ubuntu-latest runs-on: ubuntu-latest

6
.github/workflows/greetings.yml vendored Normal file → Executable file
View File

@ -2,14 +2,8 @@ name: Greetings
on: [pull_request_target, issues] on: [pull_request_target, issues]
permissions:
contents: read
jobs: jobs:
greeting: greeting:
permissions:
issues: write
pull-requests: write
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/first-interaction@v1 - uses: actions/first-interaction@v1

View File

@ -5,9 +5,6 @@ on:
- cron: '0 20 * * 6' - cron: '0 20 * * 6'
workflow_dispatch: workflow_dispatch:
permissions:
contents: read
jobs: jobs:
package-trigger-scheduler: package-trigger-scheduler:
runs-on: ubuntu-latest runs-on: ubuntu-latest
@ -30,18 +27,9 @@ jobs:
fi fi
printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY
JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-code-server/${br}/jenkins-vars.yml) JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-code-server/${br}/jenkins-vars.yml)
if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-code-server/${br}/Jenkinsfile >/dev/null 2>&1; then if [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then
echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY
README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-code-server/${br}/readme-vars.yml) if [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then
if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} "
elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} " skipped_branches="${skipped_branches}${br} "
@ -49,7 +37,7 @@ jobs:
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`code-server_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`code-server_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} " skipped_branches="${skipped_branches}${br} "
elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/${br}/lastBuild/api/json | jq -r '.building') == "true" ]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY
skipped_branches="${skipped_branches}${br} " skipped_branches="${skipped_branches}${br} "
@ -61,11 +49,6 @@ jobs:
response=$(curl -iX POST \ response=$(curl -iX POST \
https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/${br}/buildWithParameters?PACKAGE_CHECK=true \ https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-code-server/job/${br}/buildWithParameters?PACKAGE_CHECK=true \
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|")
if [[ -z "${response}" ]]; then
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Jenkins build could not be triggered. Skipping branch."
continue
fi
echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY
echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY
sleep 10 sleep 10
@ -73,14 +56,11 @@ jobs:
buildurl="${buildurl%$'\r'}" buildurl="${buildurl%$'\r'}"
echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY
echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY
if ! curl -ifX POST \ curl -iX POST \
"${buildurl}submitDescription" \ "${buildurl}submitDescription" \
--user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \
--data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
--data-urlencode "Submit=Submit"; then --data-urlencode "Submit=Submit"
echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY
echo "> Unable to change the Jenkins job description."
fi
sleep 20 sleep 20
fi fi
else else

0
.github/workflows/permissions.yml vendored Normal file → Executable file
View File

162
Jenkinsfile vendored
View File

@ -56,23 +56,11 @@ pipeline {
steps{ steps{
echo "Running on node: ${NODE_NAME}" echo "Running on node: ${NODE_NAME}"
sh '''#! /bin/bash sh '''#! /bin/bash
echo "Pruning builder" containers=$(docker ps -aq)
docker builder prune -f --builder container || :
containers=$(docker ps -q)
if [[ -n "${containers}" ]]; then if [[ -n "${containers}" ]]; then
BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') docker stop ${containers}
for container in ${containers}; do
if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then
echo "skipping buildx container in docker stop"
else
echo "Stopping container ${container}"
docker stop ${container}
fi
done
fi fi
docker system prune -f --volumes || : docker system prune -af --volumes || : '''
docker image prune -af || :
'''
script{ script{
env.EXIT_STATUS = '' env.EXIT_STATUS = ''
env.LS_RELEASE = sh( env.LS_RELEASE = sh(
@ -94,11 +82,7 @@ pipeline {
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/'
env.PULL_REQUEST = env.CHANGE_ID env.PULL_REQUEST = env.CHANGE_ID
env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml' env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml'
if ( env.SYFT_IMAGE_TAG == null ) {
env.SYFT_IMAGE_TAG = 'latest'
}
} }
echo "Using syft image tag ${SYFT_IMAGE_TAG}"
sh '''#! /bin/bash sh '''#! /bin/bash
echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" ''' echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" '''
script{ script{
@ -208,7 +192,6 @@ pipeline {
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
env.CITEST_IMAGETAG = 'latest'
} }
} }
} }
@ -234,7 +217,6 @@ pipeline {
env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
env.CITEST_IMAGETAG = 'develop'
} }
} }
} }
@ -260,7 +242,6 @@ pipeline {
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache'
env.CITEST_IMAGETAG = 'develop'
} }
} }
} }
@ -283,7 +264,7 @@ pipeline {
-v ${WORKSPACE}:/mnt \ -v ${WORKSPACE}:/mnt \
-e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \ -e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \
-e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \ -e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \
ghcr.io/linuxserver/baseimage-alpine:3 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\ ghcr.io/linuxserver/baseimage-alpine:3.20 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\
apk add --no-cache python3 && \ apk add --no-cache python3 && \
python3 -m venv /lsiopy && \ python3 -m venv /lsiopy && \
pip install --no-cache-dir -U pip && \ pip install --no-cache-dir -U pip && \
@ -594,7 +575,7 @@ pipeline {
--label \"org.opencontainers.image.title=Code-server\" \ --label \"org.opencontainers.image.title=Code-server\" \
--label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \ --label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \
--no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \ --no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \
--provenance=true --sbom=true --builder=container --load \ --provenance=false --sbom=false --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh '''#! /bin/bash sh '''#! /bin/bash
set -e set -e
@ -618,17 +599,12 @@ pipeline {
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then if [[ "${PACKAGE_CHECK}" != "true" ]]; then
declare -A pids
IFS=',' read -ra CACHE <<< "$BUILDCACHE" IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do for i in "${CACHE[@]}"; do
docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} &
pids[$!]="$i"
done
for p in "${!pids[@]}"; do
wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; }
done done
wait
fi fi
''' '''
} }
@ -663,7 +639,7 @@ pipeline {
--label \"org.opencontainers.image.title=Code-server\" \ --label \"org.opencontainers.image.title=Code-server\" \
--label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \ --label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \
--no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \ --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \
--provenance=true --sbom=true --builder=container --load \ --provenance=false --sbom=false --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh '''#! /bin/bash sh '''#! /bin/bash
set -e set -e
@ -687,17 +663,12 @@ pipeline {
echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then if [[ "${PACKAGE_CHECK}" != "true" ]]; then
declare -A pids
IFS=',' read -ra CACHE <<< "$BUILDCACHE" IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do for i in "${CACHE[@]}"; do
docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} &
pids[$!]="$i"
done
for p in "${!pids[@]}"; do
wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; }
done done
wait
fi fi
''' '''
} }
@ -725,7 +696,7 @@ pipeline {
--label \"org.opencontainers.image.title=Code-server\" \ --label \"org.opencontainers.image.title=Code-server\" \
--label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \ --label \"org.opencontainers.image.description=[Code-server](https://coder.com) is VS Code running on a remote server, accessible through the browser. - Code on your Chromebook, tablet, and laptop with a consistent dev environment. - If you have a Windows or Mac workstation, more easily develop for Linux. - Take advantage of large cloud servers to speed up tests, compilations, downloads, and more. - Preserve battery life when you're on the go. - All intensive computation runs on your server. - You're no longer running excess instances of Chrome. \" \
--no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \ --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \
--provenance=true --sbom=true --builder=container --load \ --provenance=false --sbom=false --builder=container --load \
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
sh '''#! /bin/bash sh '''#! /bin/bash
set -e set -e
@ -750,15 +721,11 @@ pipeline {
echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin
echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin
if [[ "${PACKAGE_CHECK}" != "true" ]]; then if [[ "${PACKAGE_CHECK}" != "true" ]]; then
declare -A pids
IFS=',' read -ra CACHE <<< "$BUILDCACHE" IFS=',' read -ra CACHE <<< "$BUILDCACHE"
for i in "${CACHE[@]}"; do for i in "${CACHE[@]}"; do
docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} & docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} &
pids[$!]="$i"
done
for p in "${!pids[@]}"; do
wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; }
done done
wait
fi fi
''' '''
} }
@ -768,8 +735,7 @@ pipeline {
if [[ -n "${containers}" ]]; then if [[ -n "${containers}" ]]; then
docker stop ${containers} docker stop ${containers}
fi fi
docker system prune -f --volumes || : docker system prune -af --volumes || :
docker image prune -af || :
''' '''
} }
} }
@ -795,7 +761,7 @@ pipeline {
docker run --rm \ docker run --rm \
-v /var/run/docker.sock:/var/run/docker.sock:ro \ -v /var/run/docker.sock:/var/run/docker.sock:ro \
-v ${TEMPDIR}:/tmp \ -v ${TEMPDIR}:/tmp \
ghcr.io/anchore/syft:${SYFT_IMAGE_TAG} \ ghcr.io/anchore/syft:latest \
${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt ${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt
NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 )
echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github"
@ -882,7 +848,7 @@ pipeline {
CI_DOCKERENV="LSIO_FIRST_PARTY=true" CI_DOCKERENV="LSIO_FIRST_PARTY=true"
fi fi
fi fi
docker pull ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} docker pull ghcr.io/linuxserver/ci:latest
if [ "${MULTIARCH}" == "true" ]; then if [ "${MULTIARCH}" == "true" ]; then
docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64 docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64
docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
@ -905,10 +871,7 @@ pipeline {
-e WEB_AUTH=\"${CI_AUTH}\" \ -e WEB_AUTH=\"${CI_AUTH}\" \
-e WEB_PATH=\"${CI_WEBPATH}\" \ -e WEB_PATH=\"${CI_WEBPATH}\" \
-e NODE_NAME=\"${NODE_NAME}\" \ -e NODE_NAME=\"${NODE_NAME}\" \
-e SYFT_IMAGE_TAG=\"${CI_SYFT_IMAGE_TAG:-${SYFT_IMAGE_TAG}}\" \ -t ghcr.io/linuxserver/ci:latest \
-e COMMIT_SHA=\"${COMMIT_SHA}\" \
-e BUILD_NUMBER=\"${BUILD_NUMBER}\" \
-t ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} \
python3 test_build.py''' python3 test_build.py'''
} }
} }
@ -934,11 +897,9 @@ pipeline {
CACHEIMAGE=${i} CACHEIMAGE=${i}
fi fi
done done
docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:latest -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:latest -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
{ if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
if [ -n "${SEMVER}" ]; then if [ -n "${SEMVER}" ]; then
docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
{ if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
fi fi
done done
''' '''
@ -963,27 +924,20 @@ pipeline {
CACHEIMAGE=${i} CACHEIMAGE=${i}
fi fi
done done
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-latest -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-latest -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-latest -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-latest -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
if [ -n "${SEMVER}" ]; then if [ -n "${SEMVER}" ]; then
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER}
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
fi fi
done done
for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do
docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest || \ docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG}
docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} || \
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG}
docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} || \
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
if [ -n "${SEMVER}" ]; then if [ -n "${SEMVER}" ]; then
docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} || \ docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER}
{ if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; }
fi fi
done done
''' '''
@ -1001,41 +955,23 @@ pipeline {
environment name: 'EXIT_STATUS', value: '' environment name: 'EXIT_STATUS', value: ''
} }
steps { steps {
echo "Pushing New tag for current commit ${META_TAG}"
sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
-d '{"tag":"'${META_TAG}'",\
"object": "'${COMMIT_SHA}'",\
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\
"type": "commit",\
"tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}' '''
echo "Pushing New release for Tag"
sh '''#! /bin/bash sh '''#! /bin/bash
echo "Auto-generating release notes"
if [ "$(git tag --points-at HEAD)" != "" ]; then
echo "Existing tag points to current commit, suggesting no new LS changes"
AUTO_RELEASE_NOTES="No changes"
else
AUTO_RELEASE_NOTES=$(curl -fsL -H "Authorization: token ${GITHUB_TOKEN}" -H "Accept: application/vnd.github+json" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases/generate-notes \
-d '{"tag_name":"'${META_TAG}'",\
"target_commitish": "master"}' \
| jq -r '.body' | sed 's|## What.s Changed||')
fi
echo "Pushing New tag for current commit ${META_TAG}"
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
-d '{"tag":"'${META_TAG}'",\
"object": "'${COMMIT_SHA}'",\
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\
"type": "commit",\
"tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}'
echo "Pushing New release for Tag"
echo "Updating to ${EXT_RELEASE_CLEAN}" > releasebody.json echo "Updating to ${EXT_RELEASE_CLEAN}" > releasebody.json
jq -n \ echo '{"tag_name":"'${META_TAG}'",\
--arg tag_name "$META_TAG" \ "target_commitish": "master",\
--arg target_commitish "master" \ "name": "'${META_TAG}'",\
--arg ci_url "${CI_URL:-N/A}" \ "body": "**CI Report:**\\n\\n'${CI_URL:-N/A}'\\n\\n**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n\\n**Remote Changes:**\\n\\n' > start
--arg ls_notes "$AUTO_RELEASE_NOTES" \ printf '","draft": false,"prerelease": false}' >> releasebody.json
--arg remote_notes "$(cat releasebody.json)" \ paste -d'\\0' start releasebody.json > releasebody.json.done
'{ curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done'''
"tag_name": $tag_name,
"target_commitish": $target_commitish,
"name": $tag_name,
"body": ("**CI Report:**\\n\\n" + $ci_url + "\\n\\n**LinuxServer Changes:**\\n\\n" + $ls_notes + "\\n\\n**Remote Changes:**\\n\\n" + $remote_notes),
"draft": false,
"prerelease": false }' > releasebody.json.done
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done
'''
} }
} }
// Add protection to the release branch // Add protection to the release branch
@ -1210,22 +1146,12 @@ EOF
} }
cleanup { cleanup {
sh '''#! /bin/bash sh '''#! /bin/bash
echo "Pruning builder!!" echo "Performing docker system prune!!"
docker builder prune -f --builder container || : containers=$(docker ps -aq)
containers=$(docker ps -q)
if [[ -n "${containers}" ]]; then if [[ -n "${containers}" ]]; then
BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') docker stop ${containers}
for container in ${containers}; do
if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then
echo "skipping buildx container in docker stop"
else
echo "Stopping container ${container}"
docker stop ${container}
fi
done
fi fi
docker system prune -f --volumes || : docker system prune -af --volumes || :
docker image prune -af || :
''' '''
cleanWs() cleanWs()
} }

0
LICENSE Normal file → Executable file
View File

View File

@ -3,8 +3,9 @@
[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io)
[![Blog](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!") [![Blog](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!")
[![Discord](https://img.shields.io/discord/354974912613449730.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Discord&logo=discord)](https://linuxserver.io/discord "realtime support / chat with the community and the team.") [![Discord](https://img.shields.io/discord/354974912613449730.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Discord&logo=discord)](https://discord.gg/YWrKVTn "realtime support / chat with the community and the team.")
[![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=discourse)](https://discourse.linuxserver.io "post on our community forum.") [![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=discourse)](https://discourse.linuxserver.io "post on our community forum.")
[![Fleet](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Fleet)](https://fleet.linuxserver.io "an online web interface which displays all of our maintained images.")
[![GitHub](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub&logo=github)](https://github.com/linuxserver "view the source for all of our repositories.") [![GitHub](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub&logo=github)](https://github.com/linuxserver "view the source for all of our repositories.")
[![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Supporters&logo=open%20collective)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget") [![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Supporters&logo=open%20collective)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget")
@ -19,8 +20,9 @@ The [LinuxServer.io](https://linuxserver.io) team brings you another container r
Find us at: Find us at:
* [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more! * [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more!
* [Discord](https://linuxserver.io/discord) - realtime support / chat with the community and the team. * [Discord](https://discord.gg/YWrKVTn) - realtime support / chat with the community and the team.
* [Discourse](https://discourse.linuxserver.io) - post on our community forum. * [Discourse](https://discourse.linuxserver.io) - post on our community forum.
* [Fleet](https://fleet.linuxserver.io) - an online web interface which displays all of our maintained images.
* [GitHub](https://github.com/linuxserver) - view the source for all of our repositories. * [GitHub](https://github.com/linuxserver) - view the source for all of our repositories.
* [Open Collective](https://opencollective.com/linuxserver) - please consider helping us by either donating or contributing to our budget * [Open Collective](https://opencollective.com/linuxserver) - please consider helping us by either donating or contributing to our budget
@ -59,6 +61,7 @@ The architectures supported by this image are:
| :----: | :----: | ---- | | :----: | :----: | ---- |
| x86-64 | ✅ | amd64-\<version tag\> | | x86-64 | ✅ | amd64-\<version tag\> |
| arm64 | ✅ | arm64v8-\<version tag\> | | arm64 | ✅ | arm64v8-\<version tag\> |
| armhf | ❌ | |
## Application Setup ## Application Setup
@ -75,23 +78,6 @@ git config --global user.email "email address"
How to create the [hashed password](https://github.com/cdr/code-server/blob/master/docs/FAQ.md#can-i-store-my-password-hashed). How to create the [hashed password](https://github.com/cdr/code-server/blob/master/docs/FAQ.md#can-i-store-my-password-hashed).
## Read-Only Operation
This image can be run with a read-only container filesystem. For details please [read the docs](https://docs.linuxserver.io/misc/read-only/).
### Caveats
* `/tmp` must be mounted to tmpfs
* `sudo` will not be available
## Non-Root Operation
This image can be run with a non-root user. For details please [read the docs](https://docs.linuxserver.io/misc/non-root/).
### Caveats
* `sudo` will not be available
## Usage ## Usage
To help you get started creating a container from this image you can either use docker-compose or the docker cli. To help you get started creating a container from this image you can either use docker-compose or the docker cli.
@ -117,7 +103,6 @@ services:
- SUDO_PASSWORD_HASH= #optional - SUDO_PASSWORD_HASH= #optional
- PROXY_DOMAIN=code-server.my.domain #optional - PROXY_DOMAIN=code-server.my.domain #optional
- DEFAULT_WORKSPACE=/config/workspace #optional - DEFAULT_WORKSPACE=/config/workspace #optional
- PWA_APPNAME=code-server #optional
volumes: volumes:
- /path/to/code-server/config:/config - /path/to/code-server/config:/config
ports: ports:
@ -139,7 +124,6 @@ docker run -d \
-e SUDO_PASSWORD_HASH= `#optional` \ -e SUDO_PASSWORD_HASH= `#optional` \
-e PROXY_DOMAIN=code-server.my.domain `#optional` \ -e PROXY_DOMAIN=code-server.my.domain `#optional` \
-e DEFAULT_WORKSPACE=/config/workspace `#optional` \ -e DEFAULT_WORKSPACE=/config/workspace `#optional` \
-e PWA_APPNAME=code-server `#optional` \
-p 8443:8443 \ -p 8443:8443 \
-v /path/to/code-server/config:/config \ -v /path/to/code-server/config:/config \
--restart unless-stopped \ --restart unless-stopped \
@ -162,10 +146,7 @@ Containers are configured using parameters passed at runtime (such as those abov
| `-e SUDO_PASSWORD_HASH=` | Optionally set sudo password via hash (takes priority over `SUDO_PASSWORD` var). Format is `$type$salt$hashed`. | | `-e SUDO_PASSWORD_HASH=` | Optionally set sudo password via hash (takes priority over `SUDO_PASSWORD` var). Format is `$type$salt$hashed`. |
| `-e PROXY_DOMAIN=code-server.my.domain` | If this optional variable is set, this domain will be proxied for subdomain proxying. See [Documentation](https://github.com/coder/code-server/blob/main/docs/guide.md#using-a-subdomain) | | `-e PROXY_DOMAIN=code-server.my.domain` | If this optional variable is set, this domain will be proxied for subdomain proxying. See [Documentation](https://github.com/coder/code-server/blob/main/docs/guide.md#using-a-subdomain) |
| `-e DEFAULT_WORKSPACE=/config/workspace` | If this optional variable is set, code-server will open this directory by default | | `-e DEFAULT_WORKSPACE=/config/workspace` | If this optional variable is set, code-server will open this directory by default |
| `-e PWA_APPNAME=code-server` | If this optional variable is set, the PWA app will the specified name. |
| `-v /config` | Contains all relevant configuration files. | | `-v /config` | Contains all relevant configuration files. |
| `--read-only=true` | Run container with a read-only filesystem. Please [read the docs](https://docs.linuxserver.io/misc/read-only/). |
| `--user=1000:1000` | Run container with a non-root user. Please [read the docs](https://docs.linuxserver.io/misc/non-root/). |
## Environment variables from files (Docker secrets) ## Environment variables from files (Docker secrets)
@ -329,8 +310,6 @@ Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64
## Versions ## Versions
* **10.08.25:** - Let server listen on both ipv4 and ipv6.
* **03.06.25:** - Allow setting PWA name using env var `PWA_APPNAME`.
* **13.10.24:** - Only chown config folder when change to ownership or new install is detected. * **13.10.24:** - Only chown config folder when change to ownership or new install is detected.
* **09.10.24:** - Manage permissions in /config/.ssh according to file type * **09.10.24:** - Manage permissions in /config/.ssh according to file type
* **19.08.24:** - Rebase to Ubuntu Noble. * **19.08.24:** - Rebase to Ubuntu Noble.

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,6 @@ project_blurb: |
- All intensive computation runs on your server. - All intensive computation runs on your server.
- You're no longer running excess instances of Chrome. - You're no longer running excess instances of Chrome.
project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}" project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}"
project_categories: "Programming"
# supported architectures # supported architectures
available_architectures: available_architectures:
- {arch: "{{ arch_x86_64 }}", tag: "amd64-latest"} - {arch: "{{ arch_x86_64 }}", tag: "amd64-latest"}
@ -36,14 +35,6 @@ opt_param_env_vars:
- {env_var: "SUDO_PASSWORD_HASH", env_value: "", desc: "Optionally set sudo password via hash (takes priority over `SUDO_PASSWORD` var). Format is `$type$salt$hashed`."} - {env_var: "SUDO_PASSWORD_HASH", env_value: "", desc: "Optionally set sudo password via hash (takes priority over `SUDO_PASSWORD` var). Format is `$type$salt$hashed`."}
- {env_var: "PROXY_DOMAIN", env_value: "code-server.my.domain", desc: "If this optional variable is set, this domain will be proxied for subdomain proxying. See [Documentation](https://github.com/coder/code-server/blob/main/docs/guide.md#using-a-subdomain)"} - {env_var: "PROXY_DOMAIN", env_value: "code-server.my.domain", desc: "If this optional variable is set, this domain will be proxied for subdomain proxying. See [Documentation](https://github.com/coder/code-server/blob/main/docs/guide.md#using-a-subdomain)"}
- {env_var: "DEFAULT_WORKSPACE", env_value: "/config/workspace", desc: "If this optional variable is set, code-server will open this directory by default"} - {env_var: "DEFAULT_WORKSPACE", env_value: "/config/workspace", desc: "If this optional variable is set, code-server will open this directory by default"}
- {env_var: "PWA_APPNAME", env_value: "code-server", desc: "If this optional variable is set, the PWA app will the specified name."}
readonly_supported: true
readonly_message: |
* `/tmp` must be mounted to tmpfs
* `sudo` will not be available
nonroot_supported: true
nonroot_message: |
* `sudo` will not be available
# application setup block # application setup block
app_setup_block_enabled: true app_setup_block_enabled: true
app_setup_block: | app_setup_block: |
@ -81,14 +72,12 @@ init_diagram: |
init-crontab-config -> init-config-end init-crontab-config -> init-config-end
init-config -> init-crontab-config init-config -> init-crontab-config
init-mods-end -> init-custom-files init-mods-end -> init-custom-files
init-adduser -> init-device-perms
base -> init-envfile base -> init-envfile
base -> init-migrations base -> init-migrations
init-config-end -> init-mods init-config-end -> init-mods
init-mods-package-install -> init-mods-end init-mods-package-install -> init-mods-end
init-mods -> init-mods-package-install init-mods -> init-mods-package-install
init-adduser -> init-os-end init-adduser -> init-os-end
init-device-perms -> init-os-end
init-envfile -> init-os-end init-envfile -> init-os-end
init-custom-files -> init-services init-custom-files -> init-services
init-services -> svc-code-server init-services -> svc-code-server
@ -102,8 +91,6 @@ init_diagram: |
"code-server:latest" <- Base Images "code-server:latest" <- Base Images
# changelog # changelog
changelogs: changelogs:
- {date: "10.08.25:", desc: "Let server listen on both ipv4 and ipv6."}
- {date: "03.06.25:", desc: "Allow setting PWA name using env var `PWA_APPNAME`."}
- {date: "13.10.24:", desc: "Only chown config folder when change to ownership or new install is detected."} - {date: "13.10.24:", desc: "Only chown config folder when change to ownership or new install is detected."}
- {date: "09.10.24:", desc: "Manage permissions in /config/.ssh according to file type"} - {date: "09.10.24:", desc: "Manage permissions in /config/.ssh according to file type"}
- {date: "19.08.24:", desc: "Rebase to Ubuntu Noble."} - {date: "19.08.24:", desc: "Rebase to Ubuntu Noble."}

View File

@ -3,20 +3,18 @@
mkdir -p /config/{extensions,data,workspace,.ssh} mkdir -p /config/{extensions,data,workspace,.ssh}
if [[ -z ${LSIO_NON_ROOT_USER} ]] && [[ -z ${LSIO_READ_ONLY_FS} ]]; then if [[ -n "${SUDO_PASSWORD}" ]] || [[ -n "${SUDO_PASSWORD_HASH}" ]]; then
if [[ -n "${SUDO_PASSWORD}" ]] || [[ -n "${SUDO_PASSWORD_HASH}" ]]; then echo "setting up sudo access"
echo "setting up sudo access" if ! grep -q 'abc' /etc/sudoers; then
if ! grep -q 'abc' /etc/sudoers; then echo "adding abc to sudoers"
echo "adding abc to sudoers" echo "abc ALL=(ALL:ALL) ALL" >> /etc/sudoers
echo "abc ALL=(ALL:ALL) ALL" >> /etc/sudoers fi
fi if [[ -n "${SUDO_PASSWORD_HASH}" ]]; then
if [[ -n "${SUDO_PASSWORD_HASH}" ]]; then echo "setting sudo password using sudo password hash"
echo "setting sudo password using sudo password hash" sed -i "s|^abc:\!:|abc:${SUDO_PASSWORD_HASH}:|" /etc/shadow
sed -i "s|^abc:\!:|abc:${SUDO_PASSWORD_HASH}:|" /etc/shadow else
else echo "setting sudo password using SUDO_PASSWORD env var"
echo "setting sudo password using SUDO_PASSWORD env var" echo -e "${SUDO_PASSWORD}\n${SUDO_PASSWORD}" | passwd abc
echo -e "${SUDO_PASSWORD}\n${SUDO_PASSWORD}" | passwd abc
fi
fi fi
fi fi
@ -28,19 +26,17 @@ if [[ ! -f /config/.profile ]]; then
cp /root/.profile /config/.profile cp /root/.profile /config/.profile
fi fi
if [[ -z ${LSIO_NON_ROOT_USER} ]]; then # fix permissions (ignore contents of workspace)
# fix permissions (ignore contents of workspace) PUID=${PUID:-911}
PUID=${PUID:-911} if [[ ! "$(stat -c %u /config/.profile)" == "${PUID}" ]]; then
if [[ ! "$(stat -c %u /config/.profile)" == "${PUID}" ]]; then echo "Change in ownership or new install detected, please be patient while we chown existing files"
echo "Change in ownership or new install detected, please be patient while we chown existing files" echo "This could take some time"
echo "This could take some time" find /config -path "/config/workspace" -prune -o -exec lsiown abc:abc {} +
find /config -path "/config/workspace" -prune -o -exec lsiown abc:abc {} + lsiown abc:abc /config/workspace
lsiown abc:abc /config/workspace fi
fi chmod 700 /config/.ssh
chmod 700 /config/.ssh if [[ -n "$(ls -A /config/.ssh)" ]]; then
if [[ -n "$(ls -A /config/.ssh)" ]]; then find /config/.ssh/ -type d -exec chmod 700 '{}' \;
find /config/.ssh/ -type d -exec chmod 700 '{}' \; find /config/.ssh/ -type f -exec chmod 600 '{}' \;
find /config/.ssh/ -type f -exec chmod 600 '{}' \; find /config/.ssh/ -type f -iname '*.pub' -exec chmod 644 '{}' \;
find /config/.ssh/ -type f -iname '*.pub' -exec chmod 644 '{}' \;
fi
fi fi

View File

@ -14,33 +14,14 @@ else
PROXY_DOMAIN_ARG="--proxy-domain=${PROXY_DOMAIN}" PROXY_DOMAIN_ARG="--proxy-domain=${PROXY_DOMAIN}"
fi fi
if [[ -z ${PWA_APPNAME} ]]; then exec \
PWA_APPNAME="code-server" s6-notifyoncheck -d -n 300 -w 1000 -c "nc -z 127.0.0.1 8443" \
fi s6-setuidgid abc \
if [[ -z ${LSIO_NON_ROOT_USER} ]]; then
exec \
s6-notifyoncheck -d -n 300 -w 1000 -c "nc -z 127.0.0.1 8443" \
s6-setuidgid abc \
/app/code-server/bin/code-server \
--bind-addr 0.0.0.0:8443 \
--user-data-dir /config/data \
--extensions-dir /config/extensions \
--disable-telemetry \
--auth "${AUTH}" \
--app-name "${PWA_APPNAME}" \
"${PROXY_DOMAIN_ARG}" \
"${DEFAULT_WORKSPACE:-/config/workspace}"
else
exec \
s6-notifyoncheck -d -n 300 -w 1000 -c "nc -z 127.0.0.1 8443" \
/app/code-server/bin/code-server \ /app/code-server/bin/code-server \
--bind-addr "[::]:8443" \ --bind-addr 0.0.0.0:8443 \
--user-data-dir /config/data \ --user-data-dir /config/data \
--extensions-dir /config/extensions \ --extensions-dir /config/extensions \
--disable-telemetry \ --disable-telemetry \
--auth "${AUTH}" \ --auth "${AUTH}" \
--app-name "${PWA_APPNAME}" \
"${PROXY_DOMAIN_ARG}" \ "${PROXY_DOMAIN_ARG}" \
"${DEFAULT_WORKSPACE:-/config/workspace}" "${DEFAULT_WORKSPACE:-/config/workspace}"
fi