add k8s deployment, improved Dockerfiles and docker-compose.yaml files

This commit is contained in:
Justin Kreller
2025-02-09 22:36:17 +00:00
parent f6a7dd78ae
commit c3be6dd63a
18 changed files with 748 additions and 263 deletions

23
charts/.helmignore Normal file
View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

7
charts/Chart.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v2
name: htwkalender
description: The Helm chart for the HTWKalender
version: 0.0.1
appVersion: "0.0.1"
type: application

262
charts/ci-build-deploy.yml Normal file
View File

@ -0,0 +1,262 @@
.build-image:
stage: build
image: docker:latest
services:
- docker:dind
tags:
- image
#variables:
# DOCKER_HOST: tcp://docker:2376
# DOCKER_TLS_CERTDIR: "/certs"
# DOCKER_TLS_VERIFY: 1
# DOCKER_CERT_PATH: "/certs/client"
before_script:
- echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
script:
- |
docker build --pull \
-t $IMAGE_TAG \
-f $DOCKERFILE \
--target $BUILD_TARGET \
$BUILD_PATH
- docker push "$IMAGE_TAG"
artifacts:
paths:
- .env_file
expire_in: 1 hour
.build-data-manager-image:
extends: .build-image
variables:
BUILD_TARGET: "prod"
BUILD_PATH: "./services"
DOCKERFILE: "./services/data-manager/Dockerfile"
after_script:
- echo "export DATA_MANAGER_IMAGE=$IMAGE_TAG" >> .env_file
build-data-manager-image-dev:
extends: .build-data-manager-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/data-manager:dev"
rules:
- if: $CI_COMMIT_BRANCH == "development"
changes:
- services/data-manager/**/*
build-data-manager-image-prod:
extends: .build-data-manager-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/data-manager:latest"
rules:
- if: $CI_COMMIT_BRANCH == "main"
changes:
- services/data-manager/**/*
.build-ical-image:
extends: .build-image
variables:
BUILD_TARGET: "prod"
BUILD_PATH: "./services"
DOCKERFILE: "./services/ical/Dockerfile"
after_script:
- echo "export ICAL_IMAGE=$IMAGE_TAG" >> .env_file
build-ical-image-dev:
extends: .build-ical-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/ical:dev"
needs:
- job: build-data-manager-image-dev
rules:
- if: $CI_COMMIT_BRANCH == "development"
changes:
- services/ical/**/*
build-ical-image-prod:
extends: .build-ical-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/ical:latest"
needs:
- job: build-data-manager-image-prod
rules:
- if: $CI_COMMIT_BRANCH == "main"
changes:
- services/ical/**/*
.build-frontend-image:
extends: .build-image
variables:
BUILD_TARGET: "prod"
BUILD_PATH: "./frontend"
DOCKERFILE: "./frontend/Dockerfile"
after_script:
- echo "export FRONTEND_IMAGE=$IMAGE_TAG" >> .env_file
build-frontend-image-dev:
extends: .build-frontend-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/frontend:dev"
needs:
- job: build-ical-image-dev
rules:
- if: $CI_COMMIT_BRANCH == "development"
changes:
- frontend/**/*
build-frontend-image-prod:
extends: .build-frontend-image
variables:
IMAGE_TAG: "$CI_REGISTRY_IMAGE/frontend:latest"
needs:
- job: build-ical-image-prod
rules:
- if: $CI_COMMIT_BRANCH == "main"
changes:
- frontend/**/*
.package-helm-chart:
stage: package
image:
name: alpine/helm:3
entrypoint: [""]
variables:
CHARTS_DIR: $CI_PROJECT_DIR/charts
VALUES_FILE: $CI_PROJECT_DIR/charts/values.yaml
CHART_FILE: $CI_PROJECT_DIR/charts/Chart.yaml
before_script:
- apk add --no-cache gettext
- echo "HELM_ARTIFACT_JOB_NAME=$CI_JOB_NAME" >> .env_file
- echo "PROJECT_URL=$PROJECT_URL" >> .env_file
- echo "PROJECT_NAME=$PROJECT_NAME" >> .env_file
- source .env_file
- echo "Updating deployment URLs..."
- yq e -i '(.dataManager.image.name) = env(DATA_MANAGER_IMAGE)' $VALUES_FILE
- yq e -i '(.ical.image.name) = env(ICAL_IMAGE)' $VALUES_FILE
- yq e -i '(.frontend.image.name) = env(FRONTEND_IMAGE)' $VALUES_FILE
- yq e -i '(.frontend.host) = env(PROJECT_URL)' $VALUES_FILE
after_script:
- yq e -i 'explode(.)' "$VALUES_FILE"
- helm dependency update $CHARTS_DIR
- helm package $CHARTS_DIR --destination ./
artifacts:
paths:
- "*.tgz"
- .env_file
expire_in: 1 hour
package-helm-chart-dev:
extends: .package-helm-chart
variables:
PROJECT_URL: $PROJECT_URL_DEV
PROJECT_NAME: $CI_PROJECT_NAME-dev
script:
- yq e -i '(.production) = false' $VALUES_FILE
needs:
- build-frontend-image-dev
rules:
- if: '$CI_COMMIT_BRANCH == "development"'
changes:
- services/data-manager/**/*
- services/ical/**/*
- frontend/**/*
package-helm-chart-prod:
extends: .package-helm-chart
variables:
PROJECT_URL: $PROJECT_URL_PROD
PROJECT_NAME: $CI_PROJECT_NAME
script:
- yq e -i '(.frontend.googleSiteVerification) = env(GOOGLE_VERIFICATION)' $VALUES_FILE
- |
cat <<EOF > configmap-google.yaml
{{- if .Values.production }}
apiVersion: v1
kind: ConfigMap
metadata:
name: configmap-google
namespace: $PROJECT_NAME
data:
$GOOGLE_VERIFICATION.html: |
google-site-verification: $GOOGLE_VERIFICATION.html
{{- end }}
EOF
- envsubst < configmap-google.yaml > $CHARTS_DIR/templates/configmap-google.yaml
needs:
- build-frontend-image-prod
rules:
- if: '$CI_COMMIT_BRANCH == "main"'
changes:
- services/data-manager/**/*
- services/ical/**/*
- frontend/**/*
trigger_deploy:
stage: deploy
image: alpine:latest
before_script:
- apk add --no-cache curl
- source .env_file
script:
- echo "Triggering deploy pipeline ..."
- |
curl -X POST \
-F "token=$CI_DEPLOY_REPO_TRIGGER_TOKEN" \
-F "ref=$CI_DEPLOY_REPO_REF" \
-F "variables[UPSTREAM_PROJECT_NAME]=$PROJECT_NAME" \
-F "variables[UPSTREAM_PROJECT_ID]=$CI_PROJECT_ID" \
-F "variables[UPSTREAM_COMMIT_REF_NAME]=$CI_COMMIT_REF_NAME" \
-F "variables[UPSTREAM_HELM_ARTIFACT_JOB_NAME]=$HELM_ARTIFACT_JOB_NAME" \
-F "variables[UPSTREAM_REGISTRY_PATH]=$CI_REGISTRY_IMAGE" \
"$CI_API_V4_URL/projects/$CI_DEPLOY_REPO_ID/trigger/pipeline"
- "echo 'The $PROJECT_NAME can be viewed on: $PROJECT_URL'"
rules:
- if: '$CI_COMMIT_BRANCH == "main" || $CI_COMMIT_BRANCH == "development"'
deploy-dev:
stage: deploy
image: alpine:latest
before_script:
- apk add --no-cache openssh-client sed
- eval $(ssh-agent -s)
- ssh-add <(echo "$CI_SSH_KEY" | tr -d '\r')
script:
- sed -i -e "s|DOCKER_REGISTRY_REPO|$CI_REGISTRY_IMAGE|" docker-compose.dev.yml
- sed -i -e "s|DEV_TAG|dev|" docker-compose.dev.yml
- 'scp -P $CI_SSH_PORT -o StrictHostKeyChecking=no -o LogLevel=ERROR ./docker-compose.dev.yml
./reverseproxy.dev.conf $CI_SSH_USER@$CI_SSH_DEV_HOST:/home/$CI_SSH_USER/docker/htwkalender/
'
- 'ssh -p $CI_SSH_PORT -o StrictHostKeyChecking=no -o LogLevel=ERROR $CI_SSH_USER@$CI_SSH_DEV_HOST
"cd /home/$CI_SSH_USER/docker/htwkalender/ && docker login -u $CI_REGISTRY_USER
-p $CI_REGISTRY_PASSWORD $CI_REGISTRY && docker compose -f ./docker-compose.dev.yml
down && docker compose -f ./docker-compose.dev.yml up -d --remove-orphans && docker
logout"
'
rules:
- if: $CI_COMMIT_BRANCH == "development"
deploy-main:
stage: deploy
image: alpine:latest
before_script:
- apk add --no-cache openssh-client sed # install dependencies
- eval $(ssh-agent -s) # set some ssh variables
- ssh-add <(echo "$CI_SSH_KEY" | tr -d '\r')
script:
# replace some placeholders
- sed -i -e "s|DOCKER_REGISTRY_REPO|$CI_REGISTRY_IMAGE|" docker-compose.prod.yml
- sed -i -e "s|PROD_TAG|latest|" docker-compose.prod.yml
# upload necessary files to the server
- >
scp -P $CI_SSH_PORT -o StrictHostKeyChecking=no -o LogLevel=ERROR ./docker-compose.prod.yml ./reverseproxy.conf
$CI_SSH_USER@$CI_SSH_HOST:/home/$CI_SSH_USER/docker/htwkalender/
# ssh to the server and start the service
- >
ssh -p $CI_SSH_PORT -o StrictHostKeyChecking=no -o LogLevel=ERROR $CI_SSH_USER@$CI_SSH_HOST
"cd /home/$CI_SSH_USER/docker/htwkalender/ &&
docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY &&
docker compose -f ./docker-compose.prod.yml down && docker compose -f ./docker-compose.prod.yml up -d --remove-orphans && docker logout &&
docker exec --user root htwkalender-htwkalender-frontend-1 /bin/sh -c \"echo 'google-site-verification: $GOOGLE_VERIFICATION.html' > ./$GOOGLE_VERIFICATION.html\" "
rules:
- if: $CI_COMMIT_BRANCH == "main"

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "htwkalender.name" -}}
{{- default .Release.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "htwkalender.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "htwkalender.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "htwkalender.labels" -}}
helm.sh/chart: {{ include "htwkalender.chart" . }}
{{ include "htwkalender.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "htwkalender.selectorLabels" -}}
app.kubernetes.io/name: {{ include "htwkalender.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "htwkalender.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "htwkalender.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,50 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "htwkalender.name" . }}-data-manager
namespace: {{ .Release.Namespace }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
component: data-manager
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.dataManager.replicas }}
selector:
matchLabels:
{{- include "htwkalender.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "htwkalender.labels" . | nindent 8 }}
component: data-manager
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
runAsGroup: 1000
containers:
- name: {{ .Chart.Name }}-data-manager
image: "{{ .Values.dataManager.image.name }}"
imagePullPolicy: {{ .Values.dataManager.image.pullPolicy }}
ports:
- name: data-manager
containerPort: {{ .Values.dataManager.service.targetPort }}
protocol: TCP
volumeMounts:
{{- if .Values.persistence.enabled }}
- name: {{ include "htwkalender.name" . }}-storage
mountPath: /htwkalender-data-manager/data
{{- end }}
volumes:
{{- if .Values.persistence.enabled }}
- name: {{ include "htwkalender.name" . }}-storage
persistentVolumeClaim:
claimName: {{ include "htwkalender.name" . }}-storage
{{- end }}

View File

@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "htwkalender.name" . }}-frontend
namespace: {{ .Release.Namespace }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
component: frontend
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.frontend.replicas }}
selector:
matchLabels:
{{- include "htwkalender.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "htwkalender.labels" . | nindent 8 }}
component: frontend
spec:
containers:
- name: {{ .Chart.Name }}-frontend
image: "{{ .Values.frontend.image.name }}"
imagePullPolicy: {{ .Values.frontend.image.pullPolicy }}
ports:
- name: frontend
containerPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP
volumeMounts:
{{- if .Values.production }}
- name: configmap-google
mountPath: /{{ .Values.frontend.googleSiteVerification }}.html
subPath: {{ .Values.frontend.googleSiteVerification }}.html
readOnly: true
{{- end }}
{{- if .Values.production }}
volumes:
- name: configmap-google
configMap:
name: configmap-google
{{- end }}

View File

@ -0,0 +1,38 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "htwkalender.name" . }}-ical
namespace: {{ .Release.Namespace }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
component: ical
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
replicas: {{ .Values.ical.replicas }}
selector:
matchLabels:
{{- include "htwkalender.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.annotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "htwkalender.labels" . | nindent 8 }}
component: ical
spec:
containers:
- name: {{ .Chart.Name }}-ical
image: "{{ .Values.ical.image.name }}"
imagePullPolicy: {{ .Values.ical.image.pullPolicy }}
ports:
- name: ical
containerPort: {{ .Values.ical.service.targetPort }}
protocol: TCP
env:
- name: DATA_MANAGER_URL
value: {{ .Values.dataManager.service.name }}

View File

@ -0,0 +1,16 @@
{{- if .Values.persistence.enabled }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ include "htwkalender.name" . }}-storage
namespace: {{ .Release.Namespace }}
spec:
{{- with .Values.persistence.accessModes }}
accessModes:
{{- toYaml . | nindent 4 }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistence.size }}
storageClassName: {{ .Values.persistence.storageClassName }}
{{- end }}

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.dataManager.service.name }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
{{- include "htwkalender.selectorLabels" . | nindent 4 }}
component: data-manager
ports:
- port: {{ .Values.dataManager.service.port }}
targetPort: {{ .Values.dataManager.service.targetPort }}
protocol: TCP
name: data-manager

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.frontend.service.name }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
{{- include "htwkalender.selectorLabels" . | nindent 4 }}
component: frontend
ports:
- port: {{ .Values.frontend.service.port }}
targetPort: {{ .Values.frontend.service.targetPort }}
protocol: TCP
name: frontend

View File

@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Values.ical.service.name }}
labels:
{{- include "htwkalender.labels" . | nindent 4 }}
namespace: {{ .Release.Namespace }}
spec:
type: ClusterIP
selector:
{{- include "htwkalender.selectorLabels" . | nindent 4 }}
component: ical
ports:
- port: {{ .Values.ical.service.port }}
targetPort: {{ .Values.ical.service.targetPort }}
protocol: TCP
name: ical

127
charts/values.yaml Normal file
View File

@ -0,0 +1,127 @@
production: true
dataManager:
name: data-manager
replicas: 1
image:
name: "PLACEHOLDER"
pullPolicy: &PULL_POLICY Always
service:
name: &service_data_manager htwkalender-data-manager
port: &service_data_manager_port 80
targetPort: 8090
ical:
name: ical
replicas: 2
image:
name: "PLACEHOLDER"
pullPolicy: *PULL_POLICY
service:
name: &service_ical htwkalender-ical
port: &service_ical_port 80
targetPort: 8091
frontend:
name: frontend
host: &frontend_host "FRONTEND_URL"
googleSiteVerification: "PLACEHOLDER"
replicas: 1
image:
name: "PLACEHOLDER"
pullPolicy: *PULL_POLICY
service:
name: &service_frontend htwkalender-frontend
port: &service_frontend_port 80
targetPort: 8000
persistence:
enabled: true
accessModes:
- ReadWriteOnce
storageClassName: "PLACEHOLDER"
size: 19Gi
# This is to setup the liveness and readiness probes more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
ingress:
hosts:
- host: *frontend_host
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: *service_frontend
port:
number: *service_frontend_port
- path: /api/feed/room
pathType: ImplementationSpecific
backend:
service:
name: *service_ical
port:
number: *service_ical_port
- path: /api/feed
pathType: ImplementationSpecific
backend:
service:
name: *service_ical
port:
number: *service_ical_port
- path: /api
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /api/modules
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /api/events/types
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /api/rooms
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /api/schedule
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /api/courses
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port
- path: /_
pathType: ImplementationSpecific
backend:
service:
name: *service_data_manager
port:
number: *service_data_manager_port