feature: Adds more operational services.

This commit is contained in:
greysoh 2024-11-05 10:47:53 -05:00
parent 9f85ec639d
commit a704e75851
Signed by: imterah
GPG key ID: 8FA7DD57BA6CEA37
47 changed files with 1484 additions and 519 deletions

View file

@ -1,4 +0,0 @@
# IP map
* `192.168.2.11` = PostgreSQL
* `192.168.2.12` = MariaDB/MySQL
* `192.168.2.13-14` = Reserved (maybe add Redis?)

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: mariadb-details
labels:
app: mariadb
data:
MARIADB_DATABASE: mdb_db

View file

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mariadb
labels:
app: mariadb
annotations:
metallb.universe.tf/loadBalancerIPs: 192.168.2.12
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 3306
targetPort: 3306
selector:
app: mariadb

View file

@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mariadb-deployment
spec:
replicas: 2
selector:
matchLabels:
app: mariadb
template:
metadata:
labels:
app: mariadb
spec:
containers:
- name: mariadb
image: "mariadb:11.2.4"
ports:
- containerPort: 3306
envFrom:
- configMapRef:
name: mariadb-details
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
- name: MARIADB_DATABASE
value: mdb_db
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: mariadb_data
volumes:
- name: mariadb_data
persistentVolumeClaim:
claimName: mariadb-volume-claim

View file

@ -1,42 +0,0 @@
[meta]
format_ver = 1
[mariadb_configmap]
mode = k3s
depends_on = metallb_ip_config:db_credentials
[#mariadb_configmap/k3s]
mode = install
yml_path = ./configmap.yml
[mariadb_pv]
mode = k3s
depends_on = mariadb_configmap
[#mariadb_pv/k3s]
mode = install
yml_path = ./pv.yml
[mariadb_pv_claim]
mode = k3s
depends_on = mariadb_pv
[#mariadb_pv_claim/k3s]
mode = install
yml_path = ./pv-claim.yml
[mariadb]
mode = k3s
depends_on = mariadb_pv_claim
[#mariadb/k3s]
mode = install
yml_path = ./mariadb.yml
[mariadb_svc]
mode = k3s
depends_on = mariadb
[#mariadb_svc/k3s]
mode = install
yml_path = ./mariadb-svc.yml

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mariadb-volume-claim
labels:
app: mariadb
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: mariadb-volume
labels:
type: local
app: mariadb
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
hostPath:
path: /var/lib/mysql

View file

@ -1,30 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: pgadmin
spec:
ports:
- name: web
port: 80
targetPort: web
selector:
app: pgadmin
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pgadmin-ingress
spec:
rules:
- host: "pgadmin.hofers.cloud"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pgadmin
port:
name: web

View file

@ -1,30 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pgadmin
spec:
replicas: 1
selector:
matchLabels:
app: pgadmin
template:
metadata:
labels:
app: pgadmin
spec:
containers:
- name: pgadmin
image: dpage/pgadmin4
ports:
- containerPort: 80
env:
- name: PGADMIN_DEFAULT_EMAIL
valueFrom:
secretKeyRef:
name: pgadmin-credentials
key: default-email
- name: PGADMIN_DEFAULT_PASSWORD
valueFrom:
secretKeyRef:
name: pgadmin-credentials
key: default-password

View file

@ -1,34 +0,0 @@
[meta]
format_ver = 1
[pgadmin_pv]
mode = k3s
depends_on = traefik:postgres_svc
[#pgadmin_pv/k3s]
mode = install
yml_path = ./pv.yml
[pgadmin_pv_claim]
mode = k3s
depends_on = pgadmin_pv
[#pgadmin_pv_claim/k3s]
mode = install
yml_path = ./pv-claim.yml
[pgadmin]
mode = k3s
depends_on = pgadmin_pv_claim
[#pgadmin/k3s]
mode = install
yml_path = ./pgadmin.yml
[pgadmin_svc]
mode = k3s
depends_on = pgadmin
[#pgadmin_svc/k3s]
mode = install
yml_path = ./pgadmin-svc.yml

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: pgadmin-volume-claim
labels:
app: pgadmin
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: pgadmin-data
labels:
type: local
app: pgadmin
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
hostPath:
path: /var/lib/pgadmin

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-db-details
labels:
app: postgres
data:
POSTGRES_DB: ps_db

View file

@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: postgres
labels:
app: postgres
annotations:
metallb.universe.tf/loadBalancerIPs: 192.168.2.11
spec:
type: LoadBalancer
ports:
- protocol: TCP
port: 5432
targetPort: 5432
selector:
app: postgres

View file

@ -1,41 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: postgres-deployment
spec:
replicas: 2
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
spec:
containers:
- name: postgres
image: "postgres:16"
imagePullPolicy: IfNotPresent
ports:
- containerPort: 5432
envFrom:
- configMapRef:
name: postgres-db-details
env:
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: db-credentials
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: db-credentials
key: password
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgresdata
volumes:
- name: postgresdata
persistentVolumeClaim:
claimName: postgres-volume-claim

View file

@ -1,42 +0,0 @@
[meta]
format_ver = 1
[postgres_configmap]
mode = k3s
depends_on = metallb_ip_config:db_credentials
[#postgres_configmap/k3s]
mode = install
yml_path = ./configmap.yml
[postgres_pv]
mode = k3s
depends_on = postgres_configmap
[#postgres_pv/k3s]
mode = install
yml_path = ./pv.yml
[postgres_pv_claim]
mode = k3s
depends_on = postgres_pv
[#postgres_pv_claim/k3s]
mode = install
yml_path = ./pv-claim.yml
[postgres]
mode = k3s
depends_on = postgres_pv_claim
[#postgres/k3s]
mode = install
yml_path = ./postgres.yml
[postgres_svc]
mode = k3s
depends_on = postgres
[#postgres_svc/k3s]
mode = install
yml_path = ./postgres-svc.yml

View file

@ -1,13 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: postgres-volume-claim
labels:
app: postgres
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-volume
labels:
type: local
app: postgres
spec:
storageClassName: manual
capacity:
storage: 10Gi
accessModes:
- ReadWriteMany
hostPath:
path: /data/postgresql

View file

@ -1,17 +0,0 @@
[meta]
format_ver = 1
[postgres]
description = PostgreSQL
mode = include
path = ./postgresql/project.ini
[mariadb]
description = MariaDB
mode = include
path = ./mariadb/project.ini
[pgadmin]
description = pgAdmin
mode = include
path = ./pgadmin/project.ini

View file

@ -1,9 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: db-credentials
name: traefik-cf-creds
data:
# Kubernetes base64 encodes the data
# By default, this is:
username: ZGF0YWJhc2U= # database
password: ZGF0YWJhc2U= # database
cf-email: Y2xvdWRmbGFyZUBleGFtcGxlLmNvbQ== # database
cf-key: a2V5 # database

View file

@ -0,0 +1,28 @@
## Mail server configuration
passboltEnv.plain.EMAIL_DEFAULT_FROM=
passboltEnv.plain.EMAIL_DEFAULT_FROM_NAME=Passbolt
passboltEnv.plain.EMAIL_TRANSPORT_DEFAULT_HOST=smtp.gmail.com
passboltEnv.secret.EMAIL_TRANSPORT_DEFAULT_USERNAME=
passboltEnv.secret.EMAIL_TRANSPORT_DEFAULT_PASSWORD=
## GPG Information
passboltEnv.plain.APP_FULL_BASE_URL=https://passbolt.hofers.cloud
passboltEnv.plain.PASSBOLT_KEY_EMAIL=noreply@passbolt.hofers.cloud
## Misc domain configuration
ingress.hosts[0].host=passbolt.hofers.cloud
livenessProbe.httpGet.httpHeaders[0].value=passbolt.hofers.cloud
readinessProbe.httpGet.httpHeaders[0].value=passbolt.hofers.cloud
## GPG Keys
# Private key
gpgServerKeyPrivate=
# Public key
gpgServerKeyPublic=
passboltEnv.secret.PASSBOLT_GPG_SERVER_KEY_FINGERPRINT=
## JWT Information
# Private Key
jwtServerPrivate=
# Public
jwtServerPublic=

View file

@ -0,0 +1,14 @@
# App Base
gitea.config.APP_NAME=Personal Git Server
ingress.hosts[0].host=git.greysoh.dev
gitea.config.server.ROOT_URL=https://git.greysoh.dev
# User configuration
gitea.admin.username=example
gitea.admin.password=test
gitea.admin.email=greyson@hofers.cloud
gitea.admin.passwordMode=initialOnlyNoReset
# Data configuration
postgresql.primary.persistence.size=10Gi
persistence.size=32Gi

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: pgadmin-credentials
data:
# Kubernetes base64 encodes the data
# By default, this is:
default-email: cGdhZG1pbkBleGFtcGxlLmNvbQ== # pgadmin@example.com
default-password: ZGF0YWJhc2U= # database

View file

@ -0,0 +1 @@
adminPassword=password

View file

@ -1,16 +1,9 @@
[meta]
format_ver = 1
[db_credentials]
[traefik_cf_credentials]
mode = k3s
[#db_credentials/k3s]
[#traefik_cf_credentials/k3s]
mode = install
yml_path = ./database-credentials.yml
[pgadmin_default_credentials]
mode = k3s
[#pgadmin_default_credentials/k3s]
mode = install
yml_path = ./pgadmin-default-login.yml
yml_path = ./cloudflare-credentials.yml

View file

@ -0,0 +1,2 @@
oauth.clientId=clientId
oauth.clientSecret=tskey-client-secret

View file

@ -62,6 +62,8 @@ class HelmSettings:
repo: Optional[str]
namespace_name: Optional[str]
create_namespace: bool
options_file: Optional[str]
set_vars: Optional[str]
@dataclass
class KubeSettings:
@ -157,7 +159,9 @@ def parse_project(contents: str, workdir=os.getcwd()) -> list[Project]:
found_project["name"],
found_project["repo"] if "repo" in found_project else None,
found_project["namespace"] if "namespace" in found_project else None,
create_namespace
create_namespace,
os.path.join(workdir, found_project["options_file"]) if "options_file" in found_project else None,
os.path.join(workdir, found_project["variable_file"]) if "variable_file" in found_project else None,
)
project_obj = Project(
@ -266,7 +270,7 @@ def sort_projects(projects: list[Project]) -> list[Project]:
while project_list_staging:
n = project_list_staging.pop(0)
sorted_projects.append(n)
nodes_with_edges = list(filter(lambda x: n.name in x.depends_on, projects))
for m in nodes_with_edges:
@ -277,8 +281,9 @@ def sort_projects(projects: list[Project]) -> list[Project]:
# Check for circular dependencies/cycles
if any(project.depends_on for project in projects):
print(list(filter(lambda project: len(project.depends_on) != 0, projects)))
raise ValueError("Found circular dependency")
return sorted_projects
def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
@ -298,13 +303,14 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
changeset_meta_id = ""
for line in k3s_config_str.splitlines():
if line.strip().startswith("certificate-authority-data"):
data = line.strip()[line.strip().index(" ") + 1:]
stripped_line = line.strip()
if stripped_line.startswith("certificate-authority-data"):
data = stripped_line[stripped_line.index(" ") + 1:]
data_in_bytes = bytearray(changeset_meta_id + data, "utf-8")
changeset_meta_id = hashlib.md5(data_in_bytes).hexdigest()
base_changeset_path = f"meta/{changeset_meta_id}"
try:
os.mkdir(base_changeset_path)
except FileExistsError:
@ -312,7 +318,7 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
dir_contents = os.listdir(base_changeset_path)
changeset_path = f"{base_changeset_path}/gen_{len(dir_contents) + 1}/"
try:
shutil.copytree(f"{base_changeset_path}/gen_{len(dir_contents)}/", changeset_path)
except FileNotFoundError:
@ -320,21 +326,24 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
os.mkdir(f"{changeset_path}/k3hashes")
os.mkdir(f"{changeset_path}/helmhashes")
os.mkdir(f"{changeset_path}/shellhashes")
for project in sorted_projects:
match project.mode:
case "helm":
if project.helm_settings == None:
continue
if project.helm_settings.mode == "add_repo":
if project.helm_settings.repo == None or project.helm_settings.name == None:
print("ERROR: 'add_repo' is set but either repo or name is undefined")
exit(1)
data_in_bytes = bytearray(f"add_repo.{project.helm_settings.repo}_{project.helm_settings.name}", "utf-8")
meta_id = hashlib.md5(data_in_bytes).hexdigest()
if not os.path.isfile(f"{changeset_path}/helmhashes/{meta_id}"):
Path(f"{changeset_path}/helmhashes/{meta_id}").touch()
changeset_values[project.name] = [
f"helm repo add {project.helm_settings.name} {project.helm_settings.repo}"
]
@ -342,26 +351,81 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
if project.helm_settings.name == None or project.helm_settings.repo == None:
print("ERROR: 'upgrade' or 'install' is set but either: name, or repo, is undefined")
exit(1)
data_in_bytes = bytearray(f"install.{project.helm_settings.repo}_{project.helm_settings.name}", "utf-8")
meta_id = hashlib.md5(data_in_bytes).hexdigest()
create_namespace = "--create-namespace" if project.helm_settings.create_namespace else ""
namespace = f"--namespace {project.helm_settings.namespace_name}" if project.helm_settings.namespace_name else ""
if not os.path.isfile(f"{changeset_path}/helmhashes/{meta_id}") and project.helm_settings.mode == "install":
options_file = f"-f {project.helm_settings.options_file}" if project.helm_settings.options_file else ""
should_still_continue = False
variables = ""
if project.helm_settings.set_vars:
with open(project.helm_settings.set_vars, "r") as variable_file:
contents = variable_file.read().splitlines()
contents = list(map(lambda x: x.strip(), contents))
contents = list(filter(lambda x: not x.startswith("#") and x != "", contents))
for content in contents:
key = content[0:content.index("=")]
value = content[content.index("=")+1:]
variables += f"--set \"{key}\"=\"{value}\" "
variables = variables[:len(variables)-1]
if project.helm_settings.options_file:
data_in_bytes = bytearray(f"{project.helm_settings.options_file}", "utf-8")
options_file_meta_id = hashlib.md5(data_in_bytes).digest().hex()
if not os.path.isfile(f"{changeset_path}/helmhashes/{options_file_meta_id}"):
file_hash = ""
with open(project.helm_settings.options_file, "rb") as helm_options_file:
data = helm_options_file.read()
file_hash = hashlib.md5(data).hexdigest()
with open(f"{changeset_path}/helmhashes/{options_file_meta_id}", "w") as helm_options_metaid_file:
helm_options_metaid_file.write(file_hash)
should_still_continue = True
else:
file_hash = ""
with open(project.helm_settings.options_file, "rb") as helm_options_file:
data = helm_options_file.read()
file_hash = hashlib.md5(data).hexdigest()
with open(f"{changeset_path}/helmhashes/{options_file_meta_id}", "r+") as helm_options_metaid_file:
read_hash = helm_options_metaid_file.read()
if read_hash != file_hash:
helm_options_metaid_file.seek(0)
helm_options_metaid_file.write(file_hash)
should_still_continue = True
if (not os.path.isfile(f"{changeset_path}/helmhashes/{meta_id}") or should_still_continue) and project.helm_settings.mode == "install":
Path(f"{changeset_path}/helmhashes/{meta_id}").touch()
changeset_values[project.name] = [
f"helm repo update {project.helm_settings.repo[:project.helm_settings.repo.index("/")]}",
f"helm upgrade --install {project.helm_settings.name} {project.helm_settings.repo} {create_namespace} {namespace}"
f"helm upgrade --install {options_file} {variables} {project.helm_settings.name} \"{project.helm_settings.repo}\" {create_namespace} {namespace}"
]
elif project.helm_settings.mode == "upgrade" or mode == "update":
changeset_values[project.name] = [
f"helm repo update {project.helm_settings.repo[:project.helm_settings.repo.index("/")]}",
f"helm upgrade {project.helm_settings.name} {project.helm_settings.repo} {create_namespace} {namespace}"
f"helm upgrade {options_file} {variables} {project.helm_settings.name} \"{project.helm_settings.repo}\" {create_namespace} {namespace}"
]
case "k3s":
if project.kube_settings == None:
continue
commands_to_run = []
data_in_bytes = bytearray(f"{project.kube_settings.yml_path}", "utf-8")
meta_id = hashlib.md5(data_in_bytes).digest().hex()
@ -371,7 +435,7 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
with open(project.kube_settings.yml_path, "rb") as kube_file:
data = kube_file.read()
file_hash = hashlib.md5(data).hexdigest()
with open(f"{changeset_path}/k3hashes/{meta_id}", "w") as kube_metaid_file:
kube_metaid_file.write(file_hash)
else:
@ -380,19 +444,20 @@ def generate_change_set(projects: list[Project]) -> dict[str, list[str]]:
with open(project.kube_settings.yml_path, "rb") as kube_file:
data = kube_file.read()
file_hash = hashlib.md5(data).hexdigest()
with open(f"{changeset_path}/k3hashes/{meta_id}", "r+") as kube_metaid_file:
read_hash = kube_metaid_file.read()
if read_hash == file_hash:
continue
else:
kube_metaid_file.seek(0)
kube_metaid_file.write(file_hash)
changeset_values[project.name] = [
f"kubectl apply -f {project.kube_settings.yml_path}"
]
# commands_to_run.append(f"kubectl delete -f {project.kube_settings.yml_path}")
commands_to_run.append(f"kubectl apply -f {project.kube_settings.yml_path}")
changeset_values[project.name] = commands_to_run
case _:
raise Exception("Could not match project type?")
@ -403,12 +468,15 @@ def sigint_handler(signum, frame):
if changeset_path == None:
print("Changeset path is not set yet. Exiting...")
if signum != None:
sys.exit(0)
if changeset_path == None:
exit(2)
shutil.rmtree(changeset_path)
if signum != None:
print("Exiting...")
sys.exit(0)
@ -431,11 +499,10 @@ if not projects:
print("Generating changesets...")
change_set = generate_change_set(projects)
if not change_set:
print("No changes detected.")
exit(0)
if args.dryrun_only:
if not change_set:
print("No changes detected.")
sigint_handler(None, None)
print("Generating changeset script (writing to stderr!)")
@ -443,10 +510,14 @@ if args.dryrun_only:
for project_name in change_set:
print(f'echo "Applying changeset for \'{project_name}\'..."', file=sys.stderr)
for command in change_set[project_name]:
print(command, file=sys.stderr)
else:
if not change_set:
print("No changes detected.")
exit(0)
for project_name in change_set:
print(f"Applying changeset for '{project_name}'...")

View file

@ -4,9 +4,9 @@ metadata:
name: first-pool
spec:
addresses:
- 192.168.2.10-192.168.2.254
- 192.168.2.10-192.168.2.254
---
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: example
name: first-pool-advertisement

View file

@ -4,7 +4,7 @@ format_ver = 1
[traefik_role]
description = Traefik role for self
mode = k3s
depends_on = metallb_ip_config:traefik_cf_credentials
depends_on = metallb_ip_config:traefik_cf_credentials:longhorn_storage_class
[#traefik_role/k3s]
mode = install
@ -28,19 +28,10 @@ depends_on = traefik_account
mode = install
yml_path = ./role-binding.yml
[traefik_pv]
description = Traefik certificate storage
mode = k3s
depends_on = traefik_role_binding
[#traefik_pv/k3s]
mode = install
yml_path = ./pv.yml
[traefik_pv_claim]
description = Traefik certificate storage claim
mode = k3s
depends_on = traefik_pv
depends_on = traefik_role_binding
[#traefik_pv_claim/k3s]
mode = install
@ -62,4 +53,4 @@ depends_on = traefik
[#traefik_dashboard/k3s]
mode = install
yml_path = ./traefik-dashboard.yml
yml_path = ./traefik-dashboard.yml

View file

@ -2,12 +2,13 @@ apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: traefik-volume-claim
namespace: kube-system
labels:
app: traefik
spec:
storageClassName: manual
accessModes:
- ReadWriteMany
storageClassName: longhorn
resources:
requests:
storage: 5Gi
storage: 100Mi

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: traefik-certs-volume
labels:
type: local
app: traefik
spec:
storageClassName: manual
capacity:
storage: 5Gi
accessModes:
- ReadWriteMany
hostPath:
path: /ssl-certs/

View file

@ -41,7 +41,7 @@ metadata:
spec:
type: LoadBalancer
ports:
- targetPort: web
- targetPort: web-tls
port: 443
selector:
app: traefik
app: traefik

View file

@ -1,12 +1,12 @@
kind: Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik-deployment
labels:
app: traefik
spec:
replicas: 2
replicas: 0
selector:
matchLabels:
app: traefik
@ -20,16 +20,19 @@ spec:
- name: traefik
image: traefik:v3.1
args:
- --api.insecure
- --providers.kubernetesingress
- --certificatesresolvers.cloudflare.acme.dnschallenge.provider=cloudflare
- --certificatesresolvers.cloudflare.acme.email=greysonhofer09@gmail.com
- --certificatesresolvers.cloudflare.acme.dnschallenge.resolvers=1.1.1.1
- --certificatesresolvers.cloudflare.acme.storage=/ssl-certs/acme-cloudflare.json
- "--entryPoints.web.address=:80"
- "--entryPoints.websecure.address=:443"
- "--entryPoints.websecure.http.tls.certresolver=myresolver"
- "--certificatesresolvers.letsencrypt.acme.email=greyson@hofers.cloud"
# - "--certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web"
- "--certificatesresolvers.letsencrypt.acme.tlschallenge=true"
- "--certificatesresolvers.letsencrypt.acme.storage=/sslcerts/cert.json"
# - "--api.insecure"
- "--providers.kubernetesingress"
ports:
- name: web
containerPort: 80
- name: web
- name: web-tls
containerPort: 443
- name: dashboard
containerPort: 8080
@ -45,9 +48,9 @@ spec:
name: traefik-cf-creds
key: cf-key
volumeMounts:
- mountPath: /ssl-certs/
- mountPath: /ssl-certs
name: cert-data
volumes:
- name: cert-data
persistentVolumeClaim:
claimName: traefik-volume-claim
claimName: traefik-volume-claim

View file

@ -0,0 +1,29 @@
[meta]
format_ver = 1
[longhorn_repo]
mode = helm
[#longhorn_repo/helm]
mode = add_repo
name = longhorn
repo = https://charts.longhorn.io
[longhorn]
mode = helm
depends_on = longhorn_repo
[#longhorn/helm]
mode = install
name = longhorn
repo = longhorn/longhorn
namespace = longhorn-system
create_namespace = true
[longhorn_storage_class]
depends_on = longhorn
mode = k3s
[#longhorn_storage_class/k3s]
mode = install
yml_path = ./storage-class.yml

View file

@ -0,0 +1,11 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
provisioner: driver.longhorn.io
allowVolumeExpansion: true
parameters:
numberOfReplicas: "3"
staleReplicaTimeout: "2880" # 48 hours in minutes
fromBackup: ""
fsType: "ext4"

View file

@ -6,6 +6,11 @@ description = Secret Values
mode = include
path = ./secrets/project.ini
[longhorn]
description = Longhorn Distributed Storage
mode = include
path = ./longhorn/project.ini
[loadbalancer]
description = LoadBalancer Configuration
mode = include
@ -16,7 +21,7 @@ description = Various Dashboards
mode = include
path = ./dashboard/project.ini
[database]
description = Database Software
[services]
description = Services to Use
mode = include
path = ./databases/project.ini
path = ./services/project.ini

View file

@ -0,0 +1,747 @@
# Default values for gitea.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## @section Global
#
## @param global.imageRegistry global image registry override
## @param global.imagePullSecrets global image pull secrets override; can be extended by `imagePullSecrets`
## @param global.storageClass global storage class override
## @param global.hostAliases global hostAliases which will be added to the pod's hosts files
global:
imageRegistry: ""
## E.g.
## imagePullSecrets:
## - myRegistryKeySecretName
##
imagePullSecrets: []
storageClass: "longhorn"
hostAliases: []
# - ip: 192.168.137.2
# hostnames:
# - example.com
## @param replicaCount number of replicas for the deployment
replicaCount: 1
## @section strategy
## @param strategy.type strategy type
## @param strategy.rollingUpdate.maxSurge maxSurge
## @param strategy.rollingUpdate.maxUnavailable maxUnavailable
strategy:
type: "RollingUpdate"
rollingUpdate:
maxSurge: "100%"
maxUnavailable: 0
## @param clusterDomain cluster domain
clusterDomain: cluster.local
## @section Image
## @param image.registry image registry, e.g. gcr.io,docker.io
## @param image.repository Image to start for this pod
## @param image.tag Visit: [Image tag](https://code.forgejo.org/forgejo/-/packages/container/forgejo/versions). Defaults to `appVersion` within Chart.yaml.
## @param image.digest Image digest. Allows to pin the given image tag. Useful for having control over mutable tags like `latest`
## @param image.pullPolicy Image pull policy
## @param image.rootless Wether or not to pull the rootless version of Forgejo
## @param image.fullOverride Completely overrides the image registry, path/image, tag and digest. **Adjust `image.rootless` accordingly and review [Rootless defaults](#rootless-defaults).**
image:
registry: code.forgejo.org
repository: forgejo/forgejo
# Overrides the image tag whose default is the chart appVersion.
tag: ""
digest: ""
pullPolicy: IfNotPresent
rootless: true
fullOverride: ""
## @param imagePullSecrets Secret to use for pulling the image
imagePullSecrets: []
## @section Security
# Security context is only usable with rootless image due to image design
## @param podSecurityContext.fsGroup Set the shared file system group for all containers in the pod.
podSecurityContext:
fsGroup: 1000
## @param containerSecurityContext Security context
containerSecurityContext: {}
# allowPrivilegeEscalation: false
# capabilities:
# drop:
# - ALL
# # Add the SYS_CHROOT capability for root and rootless images if you intend to
# # run pods on nodes that use the container runtime cri-o. Otherwise, you will
# # get an error message from the SSH server that it is not possible to read from
# # the repository.
# # https://gitea.com/gitea/helm-chart/issues/161
# add:
# - SYS_CHROOT
# privileged: false
# readOnlyRootFilesystem: true
# runAsGroup: 1000
# runAsNonRoot: true
# runAsUser: 1000
## @deprecated The securityContext variable has been split two:
## - containerSecurityContext
## - podSecurityContext.
## @param securityContext Run init and Forgejo containers as a specific securityContext
securityContext: {}
## @param podDisruptionBudget Pod disruption budget
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## @section Service
service:
## @param service.http.type Kubernetes service type for web traffic
## @param service.http.port Port number for web traffic
## @param service.http.clusterIP ClusterIP setting for http autosetup for deployment is None
## @param service.http.loadBalancerIP LoadBalancer IP setting
## @param service.http.nodePort NodePort for http service
## @param service.http.externalTrafficPolicy If `service.http.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
## @param service.http.externalIPs External IPs for service
## @param service.http.ipFamilyPolicy HTTP service dual-stack policy
## @param service.http.ipFamilies HTTP service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.http.loadBalancerSourceRanges Source range filter for http loadbalancer
## @param service.http.annotations HTTP service annotations
## @param service.http.labels HTTP service additional labels
## @param service.http.loadBalancerClass Loadbalancer class
http:
type: ClusterIP
port: 3000
clusterIP: None
loadBalancerIP:
nodePort:
externalTrafficPolicy:
externalIPs:
ipFamilyPolicy:
ipFamilies:
loadBalancerSourceRanges: []
annotations: {}
labels: {}
loadBalancerClass:
## @param service.ssh.type Kubernetes service type for ssh traffic
## @param service.ssh.port Port number for ssh traffic
## @param service.ssh.clusterIP ClusterIP setting for ssh autosetup for deployment is None
## @param service.ssh.loadBalancerIP LoadBalancer IP setting
## @param service.ssh.nodePort NodePort for ssh service
## @param service.ssh.externalTrafficPolicy If `service.ssh.type` is `NodePort` or `LoadBalancer`, set this to `Local` to enable source IP preservation
## @param service.ssh.externalIPs External IPs for service
## @param service.ssh.ipFamilyPolicy SSH service dual-stack policy
## @param service.ssh.ipFamilies SSH service dual-stack familiy selection,for dual-stack parameters see official kubernetes [dual-stack concept documentation](https://kubernetes.io/docs/concepts/services-networking/dual-stack/).
## @param service.ssh.hostPort HostPort for ssh service
## @param service.ssh.loadBalancerSourceRanges Source range filter for ssh loadbalancer
## @param service.ssh.annotations SSH service annotations
## @param service.ssh.labels SSH service additional labels
## @param service.ssh.loadBalancerClass Loadbalancer class
ssh:
type: ClusterIP
port: 22
clusterIP: None
loadBalancerIP:
nodePort:
externalTrafficPolicy:
externalIPs:
ipFamilyPolicy:
ipFamilies:
hostPort:
loadBalancerSourceRanges: []
annotations: {}
labels: {}
loadBalancerClass:
## @section Ingress
## @param ingress.enabled Enable ingress
## @param ingress.className Ingress class name
## @param ingress.annotations Ingress annotations
## @param ingress.hosts[0].host Default Ingress host
## @param ingress.hosts[0].paths[0].path Default Ingress path
## @param ingress.hosts[0].paths[0].pathType Ingress path type
## @param ingress.tls Ingress tls settings
## @extra ingress.apiVersion Specify APIVersion of ingress object. Mostly would only be used for argocd.
ingress:
enabled: true
# className: nginx
className:
annotations:
{}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: git.example.com
paths:
- path: /
pathType: Prefix
tls: []
# - secretName: chart-example-tls
# hosts:
# - git.example.com
# Mostly for argocd or any other CI that uses `helm template | kubectl apply` or similar
# If helm doesn't correctly detect your ingress API version you can set it here.
# apiVersion: networking.k8s.io/v1
## @section deployment
#
## @param resources Kubernetes resources
resources:
{}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
## @param schedulerName Use an alternate scheduler, e.g. "stork"
schedulerName: ""
## @param nodeSelector NodeSelector for the deployment
nodeSelector: {}
## @param tolerations Tolerations for the deployment
tolerations: []
## @param affinity Affinity for the deployment
affinity: {}
## @param topologySpreadConstraints TopologySpreadConstraints for the deployment
topologySpreadConstraints: []
## @param dnsConfig dnsConfig for the deployment
dnsConfig: {}
## @param priorityClassName priorityClassName for the deployment
priorityClassName: ""
## @param deployment.env Additional environment variables to pass to containers
## @param deployment.terminationGracePeriodSeconds How long to wait until forcefully kill the pod
## @param deployment.labels Labels for the deployment
## @param deployment.annotations Annotations for the Forgejo deployment to be created
deployment:
env:
[]
# - name: VARIABLE
# value: my-value
terminationGracePeriodSeconds: 60
labels: {}
annotations: {}
## @section ServiceAccount
## @param serviceAccount.create Enable the creation of a ServiceAccount
## @param serviceAccount.name Name of the created ServiceAccount, defaults to release name. Can also link to an externally provided ServiceAccount that should be used.
## @param serviceAccount.automountServiceAccountToken Enable/disable auto mounting of the service account token
## @param serviceAccount.imagePullSecrets Image pull secrets, available to the ServiceAccount
## @param serviceAccount.annotations Custom annotations for the ServiceAccount
## @param serviceAccount.labels Custom labels for the ServiceAccount
serviceAccount:
create: false
name: ""
automountServiceAccountToken: false
imagePullSecrets: []
# - name: private-registry-access
annotations: {}
labels: {}
## @section Persistence
#
## @param persistence.enabled Enable persistent storage
## @param persistence.create Whether to create the persistentVolumeClaim for shared storage
## @param persistence.mount Whether the persistentVolumeClaim should be mounted (even if not created)
## @param persistence.claimName Use an existing claim to store repository information
## @param persistence.size Size for persistence to store repo information
## @param persistence.accessModes AccessMode for persistence
## @param persistence.labels Labels for the persistence volume claim to be created
## @param persistence.annotations.helm.sh/resource-policy Resource policy for the persistence volume claim
## @param persistence.storageClass Name of the storage class to use
## @param persistence.subPath Subdirectory of the volume to mount at
## @param persistence.volumeName Name of persistent volume in PVC
persistence:
enabled: true
create: true
mount: true
claimName: gitea-shared-storage
size: 10Gi
accessModes:
- ReadWriteOnce
labels: {}
storageClass:
subPath:
volumeName: ""
annotations:
helm.sh/resource-policy: keep
## @param extraVolumes Additional volumes to mount to the Forgejo deployment
extraVolumes: []
# - name: postgres-ssl-vol
# secret:
# secretName: gitea-postgres-ssl
## @param extraContainerVolumeMounts Mounts that are only mapped into the Forgejo runtime/main container, to e.g. override custom templates.
extraContainerVolumeMounts: []
## @param extraInitVolumeMounts Mounts that are only mapped into the init-containers. Can be used for additional preconfiguration.
extraInitVolumeMounts: []
## @deprecated The extraVolumeMounts variable has been split two:
## - extraContainerVolumeMounts
## - extraInitVolumeMounts
## As an example, can be used to mount a client cert when connecting to an external Postgres server.
## @param extraVolumeMounts **DEPRECATED** Additional volume mounts for init containers and the Forgejo main container
extraVolumeMounts: []
# - name: postgres-ssl-vol
# readOnly: true
# mountPath: "/pg-ssl"
## @section Init
## @param initPreScript Bash shell script copied verbatim to the start of the init-container.
initPreScript: ""
#
# initPreScript: |
# mkdir -p /data/git/.postgresql
# cp /pg-ssl/* /data/git/.postgresql/
# chown -R git:git /data/git/.postgresql/
# chmod 400 /data/git/.postgresql/postgresql.key
## @param initContainers.resources.limits initContainers.limits Kubernetes resource limits for init containers
## @param initContainers.resources.requests.cpu initContainers.requests.cpu Kubernetes cpu resource limits for init containers
## @param initContainers.resources.requests.memory initContainers.requests.memory Kubernetes memory resource limits for init containers
initContainers:
resources:
limits: {}
requests:
cpu: 100m
memory: 128Mi
# Configure commit/action signing prerequisites
## @section Signing
#
## @param signing.enabled Enable commit/action signing
## @param signing.gpgHome GPG home directory
## @param signing.privateKey Inline private gpg key for signed internal Git activity
## @param signing.existingSecret Use an existing secret to store the value of `signing.privateKey`
signing:
enabled: false
gpgHome: /data/git/.gnupg
privateKey: ""
# privateKey: |-
# -----BEGIN PGP PRIVATE KEY BLOCK-----
# ...
# -----END PGP PRIVATE KEY BLOCK-----
existingSecret: ""
## @section Gitea
#
gitea:
## @param gitea.admin.username Username for the Forgejo admin user
## @param gitea.admin.existingSecret Use an existing secret to store admin user credentials
## @param gitea.admin.password Password for the Forgejo admin user
## @param gitea.admin.email Email for the Forgejo admin user
## @param gitea.admin.passwordMode Mode for how to set/update the admin user password. Options are: initialOnlyNoReset, initialOnlyRequireReset, and keepUpdated
admin:
# existingSecret: gitea-admin-secret
username: gitea_admin
password: r8sA8CPHD9!bt6d
email: "gitea@local.domain"
passwordMode: keepUpdated
## @param gitea.metrics.enabled Enable Forgejo metrics
## @param gitea.metrics.serviceMonitor.enabled Enable Forgejo metrics service monitor
metrics:
enabled: false
serviceMonitor:
enabled: false
# additionalLabels:
# prometheus-release: prom1
## @param gitea.ldap LDAP configuration
ldap:
[]
# - name: "LDAP 1"
# existingSecret:
# securityProtocol:
# host:
# port:
# userSearchBase:
# userFilter:
# adminFilter:
# emailAttribute:
# bindDn:
# bindPassword:
# usernameAttribute:
# publicSSHKeyAttribute:
# Either specify inline `key` and `secret` or refer to them via `existingSecret`
## @param gitea.oauth OAuth configuration
oauth:
[]
# - name: 'OAuth 1'
# provider:
# key:
# secret:
# existingSecret:
# autoDiscoverUrl:
# useCustomUrls:
# customAuthUrl:
# customTokenUrl:
# customProfileUrl:
# customEmailUrl:
## @param gitea.additionalConfigSources Additional configuration from secret or configmap
additionalConfigSources: []
# - secret:
# secretName: gitea-app-ini-oauth
# - configMap:
# name: gitea-app-ini-plaintext
## @param gitea.additionalConfigFromEnvs Additional configuration sources from environment variables
additionalConfigFromEnvs: []
## @param gitea.podAnnotations Annotations for the Forgejo pod
podAnnotations: {}
## @param gitea.ssh.logLevel Configure OpenSSH's log level. Only available for root-based Forgejo image.
ssh:
logLevel: "INFO"
## @section `app.ini` overrides
## @descriptionStart
##
## Every value described in the [Cheat
## Sheet](https://forgejo.org/docs/latest/admin/config-cheat-sheet/) can be
## set as a Helm value. Configuration sections map to (lowercased) YAML
## blocks, while the keys themselves remain in all caps.
##
## @descriptionEnd
config:
# values in the DEFAULT section
# (https://forgejo.org/docs/latest/admin/config-cheat-sheet/#overall-default)
# are un-namespaced
## @param gitea.config.APP_NAME Application name, used in the page title
APP_NAME: "Forgejo: Beyond coding. We forge."
## @param gitea.config.RUN_MODE Application run mode, affects performance and debugging: `dev` or `prod`
RUN_MODE: prod
## @param gitea.config.repository General repository settings
repository: {}
## @param gitea.config.cors Cross-origin resource sharing settings
cors: {}
## @param gitea.config.ui User interface settings
ui: {}
## @param gitea.config.markdown Markdown parser settings
markdown: {}
## @param gitea.config.server [object] General server settings
server:
SSH_PORT: 22 # rootful image
SSH_LISTEN_PORT: 2222 # rootless image
## @param gitea.config.database Database configuration (only necessary with an [externally managed DB](https://code.forgejo.org/forgejo-helm/forgejo-helm#external-database)).
database: {}
## @param gitea.config.indexer Settings for what content is indexed and how
indexer: {}
## @param gitea.config.queue Job queue configuration
queue: {}
## @param gitea.config.admin Admin user settings
admin: {}
## @param gitea.config.security Site security settings
security: {}
## @param gitea.config.camo Settings for the [camo](https://github.com/cactus/go-camo) media proxy server (disabled by default)
camo: {}
## @param gitea.config.openid Configuration for authentication with OpenID (disabled by default)
openid: {}
## @param gitea.config.oauth2_client OAuth2 client settings
oauth2_client: {}
## @param gitea.config.service Configuration for miscellaneous Forgejo services
service: {}
## @param gitea.config.ssh.minimum_key_sizes SSH minimum key sizes
ssh.minimum_key_sizes: {}
## @param gitea.config.webhook Webhook settings
webhook: {}
## @param gitea.config.mailer Mailer configuration (disabled by default)
mailer: {}
## @param gitea.config.email.incoming Configuration for handling incoming mail (disabled by default)
email.incoming: {}
## @param gitea.config.cache Cache configuration
cache: {}
## @param gitea.config.session Session/cookie handling
session: {}
## @param gitea.config.picture User avatar settings
picture: {}
## @param gitea.config.project Project board defaults
project: {}
## @param gitea.config.attachment Issue and PR attachment configuration
attachment: {}
## @param gitea.config.log Logging configuration
log: {}
## @param gitea.config.cron Cron job configuration
cron: {}
## @param gitea.config.git Global settings for Git
git: {}
## @param gitea.config.metrics Settings for the Prometheus endpoint (disabled by default)
metrics: {}
## @param gitea.config.api Settings for the Swagger API documentation endpoints
api: {}
## @param gitea.config.oauth2 Settings for the [OAuth2 provider](https://forgejo.org/docs/latest/admin/oauth2-provider/)
oauth2: {}
## @param gitea.config.i18n Internationalization settings
i18n: {}
## @param gitea.config.markup Configuration for advanced markup processors
markup: {}
## @param gitea.config.highlight.mapping File extension to language mapping overrides for syntax highlighting
highlight.mapping: {}
## @param gitea.config.time Locale settings
time: {}
## @param gitea.config.migrations Settings for Git repository migrations
migrations: {}
## @param gitea.config.federation Federation configuration
federation: {}
## @param gitea.config.packages Package registry settings
packages: {}
## @param gitea.config.mirror Configuration for repository mirroring
mirror: {}
## @param gitea.config.lfs Large File Storage configuration
lfs: {}
## @param gitea.config.repo-avatar Repository avatar storage configuration
repo-avatar: {}
## @param gitea.config.avatar User/org avatar storage configuration
avatar: {}
## @param gitea.config.storage General storage settings
storage: {}
## @param gitea.config.proxy Proxy configuration (disabled by default)
proxy: {}
## @param gitea.config.actions Configuration for [Forgejo Actions](https://forgejo.org/docs/latest/user/actions/)
actions: {}
## @param gitea.config.other Uncategorized configuration options
other: {}
## @section LivenessProbe
#
## @param gitea.livenessProbe.enabled Enable liveness probe
## @param gitea.livenessProbe.tcpSocket.port Port to probe for liveness
## @param gitea.livenessProbe.initialDelaySeconds Initial delay before liveness probe is initiated
## @param gitea.livenessProbe.timeoutSeconds Timeout for liveness probe
## @param gitea.livenessProbe.periodSeconds Period for liveness probe
## @param gitea.livenessProbe.successThreshold Success threshold for liveness probe
## @param gitea.livenessProbe.failureThreshold Failure threshold for liveness probe
# Modify the liveness probe for your needs or completely disable it by commenting out.
livenessProbe:
enabled: true
tcpSocket:
port: http
initialDelaySeconds: 200
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
## @section ReadinessProbe
#
## @param gitea.readinessProbe.enabled Enable readiness probe
## @param gitea.readinessProbe.tcpSocket.port Port to probe for readiness
## @param gitea.readinessProbe.initialDelaySeconds Initial delay before readiness probe is initiated
## @param gitea.readinessProbe.timeoutSeconds Timeout for readiness probe
## @param gitea.readinessProbe.periodSeconds Period for readiness probe
## @param gitea.readinessProbe.successThreshold Success threshold for readiness probe
## @param gitea.readinessProbe.failureThreshold Failure threshold for readiness probe
# Modify the readiness probe for your needs or completely disable it by commenting out.
readinessProbe:
enabled: true
tcpSocket:
port: http
initialDelaySeconds: 5
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
# # Uncomment the startup probe to enable and modify it for your needs.
## @section StartupProbe
#
## @param gitea.startupProbe.enabled Enable startup probe
## @param gitea.startupProbe.tcpSocket.port Port to probe for startup
## @param gitea.startupProbe.initialDelaySeconds Initial delay before startup probe is initiated
## @param gitea.startupProbe.timeoutSeconds Timeout for startup probe
## @param gitea.startupProbe.periodSeconds Period for startup probe
## @param gitea.startupProbe.successThreshold Success threshold for startup probe
## @param gitea.startupProbe.failureThreshold Failure threshold for startup probe
startupProbe:
enabled: false
tcpSocket:
port: http
initialDelaySeconds: 60
timeoutSeconds: 1
periodSeconds: 10
successThreshold: 1
failureThreshold: 10
## @section Redis® Cluster
## @descriptionStart
## Redis® Cluster is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) if enabled in the values.
## Complete Configuration can be taken from their website.
## Redis cluster and [Redis](#redis) cannot be enabled at the same time.
## @descriptionEnd
#
## @param redis-cluster.enabled Enable redis cluster
## @param redis-cluster.usePassword Whether to use password authentication
## @param redis-cluster.cluster.nodes Number of redis cluster master nodes
## @param redis-cluster.cluster.replicas Number of redis cluster master node replicas
redis-cluster:
enabled: true
usePassword: false
cluster:
nodes: 3 # default: 6
replicas: 0 # default: 1
## @section Redis®
## @descriptionStart
## Redis® is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/redis) if enabled in the values.
## Complete Configuration can be taken from their website.
## Redis and [Redis cluster](#redis-cluster) cannot be enabled at the same time.
## @descriptionEnd
#
## @param redis.enabled Enable redis standalone or replicated
## @param redis.architecture Whether to use standalone or replication
## @param redis.global.redis.password Required password
## @param redis.master.count Number of Redis master instances to deploy
redis:
enabled: false
architecture: standalone
global:
redis:
password: changeme
master:
count: 1
## @section PostgreSQL HA
## @descriptionStart
## PostgreSQL HA is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) if enabled in the values.
## Complete Configuration can be taken from their website.
## @descriptionEnd
#
## @param postgresql-ha.enabled Enable PostgreSQL HA chart
## @param postgresql-ha.postgresql.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql-ha.global.postgresql.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql-ha.global.postgresql.username Name for a custom user to create (overrides `auth.username`)
## @param postgresql-ha.global.postgresql.password Name for a custom password to create (overrides `auth.password`)
## @param postgresql-ha.postgresql.repmgrPassword Repmgr Password
## @param postgresql-ha.postgresql.postgresPassword postgres Password
## @param postgresql-ha.pgpool.adminPassword pgpool adminPassword
## @param postgresql-ha.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
## @param postgresql-ha.primary.persistence.size PVC Storage Request for PostgreSQL HA volume
postgresql-ha:
global:
postgresql:
database: gitea
password: gitea
username: gitea
enabled: false
postgresql:
repmgrPassword: changeme2
postgresPassword: changeme1
password: changeme4
pgpool:
adminPassword: changeme3
service:
ports:
postgresql: 5432
primary:
persistence:
size: 10Gi
## @section PostgreSQL
## @descriptionStart
## PostgreSQL is loaded as a dependency from [Bitnami](https://github.com/bitnami/charts/tree/master/bitnami/postgresql) if enabled in the values.
## Complete Configuration can be taken from their website.
## @descriptionEnd
#
## @param postgresql.enabled Enable PostgreSQL
## @param postgresql.global.postgresql.auth.password Password for the `gitea` user (overrides `auth.password`)
## @param postgresql.global.postgresql.auth.database Name for a custom database to create (overrides `auth.database`)
## @param postgresql.global.postgresql.auth.username Name for a custom user to create (overrides `auth.username`)
## @param postgresql.global.postgresql.service.ports.postgresql PostgreSQL service port (overrides `service.ports.postgresql`)
## @param postgresql.primary.persistence.size PVC Storage Request for PostgreSQL volume
postgresql:
enabled: true
global:
postgresql:
auth:
password: gitea
database: gitea
username: gitea
service:
ports:
postgresql: 5432
primary:
persistence:
size: 10Gi
# By default, removed or moved settings that still remain in a user defined values.yaml will cause Helm to fail running the install/update.
# Set it to false to skip this basic validation check.
## @section Advanced
## @param checkDeprecation Set it to false to skip this basic validation check.
## @param test.enabled Set it to false to disable test-connection Pod.
## @param test.image.name Image name for the wget container used in the test-connection Pod.
## @param test.image.tag Image tag for the wget container used in the test-connection Pod.
checkDeprecation: true
test:
enabled: true
image:
name: busybox
tag: latest
## @param extraDeploy Array of extra objects to deploy with the release
##
extraDeploy: []

View file

@ -0,0 +1,14 @@
[meta]
format_ver = 1
[forgejo]
description = Forgejo Helm
mode = helm
depends_on = traefik:longhorn_storage_class
[#forgejo/helm]
mode = install
name = forgejo-personal
repo = oci://code.forgejo.org/forgejo-helm/forgejo
options_file = forgejo.yml
variable_file = ../../secrets/personal-forgejo.env

View file

@ -0,0 +1,366 @@
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
## Dependencies configuration parameters
## Redis dependency parameters
# -- Install redis as a depending chart
redisDependencyEnabled: true
# -- Install mariadb as a depending chart
mariadbDependencyEnabled: true
# -- Install mariadb as a depending chart
postgresqlDependencyEnabled: false
global:
imageRegistry: ""
imagePullSecrets: []
# Configure redis dependency chart
redis:
auth:
# -- Enable redis authentication
enabled: true
# -- Configure redis password
password: "P4ssb0lt"
sentinel:
# -- Enable redis sentinel
enabled: true
## MariaDB dependency parameters
# Configure mariadb as a dependency chart
mariadb:
# -- Configure mariadb architecture
architecture: replication
auth:
# -- Configure mariadb auth root password
rootPassword: root
# -- Configure mariadb auth username
username: passbolt
# -- Configure mariadb auth password
password: P4ssb0lt
# -- Configure mariadb auth database
database: passbolt
# -- Configure mariadb auth replicationPassword
replicationPassword: P4ssb0ltReplica
# -- Configure parameters for the primary instance.
primary:
# -- Configure persistence options.
persistence:
# -- Enable persistence on MariaDB primary replicas using a `PersistentVolumeClaim`. If false, use emptyDir
enabled: true
# -- Name of an existing `PersistentVolumeClaim` for MariaDB primary replicas. When it's set the rest of persistence parameters are ignored.
existingClaim: ""
# -- Subdirectory of the volume to mount at
subPath: ""
# -- Primary persistent volume storage Class
storageClass: "longhorn"
# -- Labels for the PVC
labels: {}
# -- Primary persistent volume claim annotations
annotations: {}
# -- Primary persistent volume access Modes
accessModes:
- ReadWriteOnce
# -- Primary persistent volume size
size: 8Gi
# -- Selector to match an existing Persistent Volume
selector: {}
# -- Configure parameters for the secondary instance.
secondary:
# -- Configure persistence options.
persistence:
# -- Enable persistence on MariaDB secondary replicas using a `PersistentVolumeClaim`. If false, use emptyDir
enabled: true
# -- Subdirectory of the volume to mount at
subPath: ""
# -- Secondary persistent volume storage Class
storageClass: "longhorn"
# -- Labels for the PVC
labels: {}
# -- Secondary persistent volume claim annotations
annotations: {}
# -- Secondary persistent volume access Modes
accessModes:
- ReadWriteOnce
# -- Secondary persistent volume size
size: 8Gi
# -- Selector to match an existing Persistent Volume
selector: {}
## Passbolt configuration
## Passbolt container and sidecar parameters
app:
# -- Configure pasbolt deployment init container that waits for database
databaseInitContainer:
# -- Toggle pasbolt deployment init container that waits for database
enabled: true
#initImage:
# # -- Configure pasbolt deployment init container image client for database
# client: mariadb
# registry: ""
# # -- Configure pasbolt deployment image repsitory
# repository: mariadb
# # -- Configure pasbolt deployment image pullPolicy
# pullPolicy: IfNotPresent
# # -- Overrides the image tag whose default is the chart appVersion.
# tag: latest
image:
# -- Configure pasbolt deployment image repsitory
registry: ""
repository: passbolt/passbolt
# -- Configure pasbolt deployment image pullPolicy
pullPolicy: IfNotPresent
# Allowed options: mariadb, mysql or postgresql
database:
kind: mariadb
# -- Configure ssl on mariadb/mysql clients
# -- In case this is enabled, you will be responsible for creating and mounting the certificates and
# -- additional configutions on both the client and the server.
# ssl: off
cache:
# Use CACHE_CAKE_DEFAULT_* variables to configure the connection to redis instance
# on the passboltEnv configuration section
redis:
# -- By enabling redis the chart will mount a configuration file on /etc/passbolt/app.php
# That instructs passbolt to store sessions on redis and to use it as a general cache.
enabled: true
sentinelProxy:
# -- Inject a haproxy sidecar container configured as a proxy to redis sentinel
# Make sure that CACHE_CAKE_DEFAULT_SERVER is set to '127.0.0.1' to use the proxy
enabled: true
# -- Configure redis sentinel proxy image
image:
registry: ""
# -- Configure redis sentinel image repository
repository: haproxy
# -- Configure redis sentinel image tag
tag: "latest"
# -- Configure redis sentinel container resources
resources: {}
# -- Configure the passbolt deployment resources
extraPodLabels: {}
resources: {}
tls:
# -- If autogenerate is true, the chart will generate a secret with a certificate for APP_FULL_BASE_URL hostname
# -- if autogenerate is false, existingSecret should be filled with an existing tls kind secret name
# @ignored
autogenerate: true
#existingSecret: ""
# -- Enable email cron
cronJobEmail:
enabled: true
schedule: "* * * * *"
extraPodLabels: {}
## Passbolt environment parameters
# -- Pro subscription key in base64 only if you are using pro version
# subscriptionKey:
# -- Configure passbolt subscription key path
# subscription_keyPath: /etc/passbolt/subscription_key.txt
# -- Configure passbolt gpg directory
gpgPath: /etc/passbolt/gpg
# -- Gpg server private key in base64
gpgServerKeyPrivate: ""
# -- Gpg server public key in base64
gpgServerKeyPublic: ""
# -- Name of the existing secret for the GPG server keypair. The secret must contain the `serverkey.asc` and `serverkey_private.asc` keys.
gpgExistingSecret: ""
# -- Name of the existing secret for the JWT server keypair. The secret must contain the `jwt.key` and `jwt.pem` keys.
jwtExistingSecret: ""
# -- Configure passbolt jwt directory
jwtPath: /etc/passbolt/jwt
# -- JWT server private key in base64
jwtServerPrivate: ""
# -- JWT server public key in base64
jwtServerPublic: ""
# -- Forces overwrite JWT keys
jwtCreateKeysForced: false
jobCreateJwtKeys:
extraPodLabels: {}
jobCreateGpgKeys:
extraPodLabels: {}
passboltEnv:
plain:
# -- Configure passbolt privacy url
PASSBOLT_LEGAL_PRIVACYPOLICYURL: https://www.passbolt.com/privacy
# -- Configure passbolt to force ssl
PASSBOLT_SSL_FORCE: false
# -- Toggle passbolt public registration
PASSBOLT_REGISTRATION_PUBLIC: false
# -- Configure passbolt cake cache server
CACHE_CAKE_DEFAULT_SERVER: 127.0.0.1
# -- Configure passbolt default email service port
EMAIL_TRANSPORT_DEFAULT_PORT: 587
# -- Toggle passbolt debug mode
DEBUG: false
# -- Toggle passbolt selenium mode
PASSBOLT_SELENIUM_ACTIVE: false
# -- Configure passbolt license path
PASSBOLT_PLUGINS_LICENSE_LICENSE: /etc/passbolt/subscription_key.txt
# -- Configure passbolt jwt private key path
PASSBOLT_JWT_SERVER_KEY: /var/www/passbolt/config/jwt/jwt.key
# -- Configure passbolt jwt public key path
PASSBOLT_JWT_SERVER_PEM: /var/www/passbolt/config/jwt/jwt.pem
# -- Toggle passbolt jwt authentication
PASSBOLT_PLUGINS_JWT_AUTHENTICATION_ENABLED: true
# -- Download Command for kubectl
KUBECTL_DOWNLOAD_CMD: curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
secret:
# -- Configure passbolt cake cache password
CACHE_CAKE_DEFAULT_PASSWORD: P4ssb0lt
# -- Configure passbolt default database password
DATASOURCES_DEFAULT_PASSWORD: P4ssb0lt
# -- Configure passbolt default database username
DATASOURCES_DEFAULT_USERNAME: passbolt
# -- Configure passbolt default database
DATASOURCES_DEFAULT_DATABASE: passbolt
# -- Configure passbolt server gpg key fingerprint
# PASSBOLT_GPG_SERVER_KEY_FINGERPRINT:
# -- Configure passbolt security salt.
# SECURITY_SALT:
# -- Environment variables to add to the passbolt pods
extraEnv: []
# -- Environment variables from secrets or configmaps to add to the passbolt pods
extraEnvFrom:
[]
# - secretRef:
# name: passbolt-secret
## Passbolt deployment parameters
# -- If autoscaling is disabled this will define the number of pods to run
replicaCount: 2
# Configure autoscaling on passbolt deployment
autoscaling:
# -- Enable autoscaling on passbolt deployment
enabled: false
# -- Configure autoscaling minimum replicas
minReplicas: 1
# -- Configure autoscaling maximum replicas
maxReplicas: 100
# -- Configure autoscaling target CPU uptilization percentage
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# -- Enable role based access control
rbacEnabled: true
# -- Configure passbolt container livenessProbe
livenessProbe:
# @ignore
httpGet:
port: https
scheme: HTTPS
path: /healthcheck/status.json
httpHeaders:
- name: Host
value: passbolt.hofers.cloud
initialDelaySeconds: 20
periodSeconds: 10
# -- Configure passbolt container RadinessProbe
readinessProbe:
# @ignore
httpGet:
port: https
scheme: HTTPS
httpHeaders:
- name: Host
value: passbolt.hofers.cloud
path: /healthcheck/status.json
initialDelaySeconds: 5
periodSeconds: 10
# Configure network policies to allow ingress access passbolt pods
# networkPolicy defines which labels are allowed to reach to passbolt
# and which namespaces
networkPolicy:
# -- Enable network policies to allow ingress access passbolt pods
enabled: false
# -- Configure network policies label for ingress deployment
label: app.kubernetes.io/name
# -- Configure network policies podLabel for podSelector
podLabel: ingress-nginx
# -- Configure network policies namespaceLabel for namespaceSelector
namespaceLabel: ingress-nginx
# -- Configure image pull secrets
imagePullSecrets: []
# -- Value to override the chart name on default
nameOverride: ""
# -- Value to override the whole fullName
fullnameOverride: ""
serviceAccount:
# -- Specifies whether a service account should be created
create: true
# -- Annotations to add to the service account
annotations: {}
# -- Map of annotation for passbolt server pod
podAnnotations: {}
# -- Security Context configuration for passbolt server pod
podSecurityContext:
{}
# fsGroup: 2000
service:
# -- Configure passbolt service type
type: ClusterIP
# -- Annotations to add to the service
annotations: {}
# -- Configure the service ports
ports:
# -- Configure the HTTPS port
https:
# -- Configure passbolt HTTPS service port
port: 443
# -- Configure passbolt HTTPS service targetPort
targetPort: 443
# -- Configure passbolt HTTPS service port name
name: https
http:
# -- Configure passbolt HTTP service port
port: 80
# -- Configure passbolt HTTP service targetPort
targetPort: 80
# -- Configure passbolt HTTP service port name
name: http
ingress:
# -- Enable passbolt ingress
enabled: true
# -- Configure passbolt ingress annotations
annotations: {}
# -- Configure passbolt ingress hosts
hosts:
# @ignored
- host: passbolt.hofers.cloud
paths:
- path: /
port: http
pathType: ImplementationSpecific
# -- Configure passbolt deployment nodeSelector
nodeSelector: {}
# -- Configure passbolt deployment tolerations
tolerations: []
# -- Configure passbolt deployment affinity
affinity: {}
# -- Add additional volumes, e.g. for overwriting config files
extraVolumes: []
# -- Add additional volume mounts, e.g. for overwriting config files
extraVolumeMounts: []

View file

@ -0,0 +1,24 @@
[meta]
format_ver = 1
[passbolt_repo]
description = Passbolt Helm Repository
mode = helm
depends_on = traefik:longhorn_storage_class
[#passbolt_repo/helm]
mode = add_repo
name = passbolt
repo = https://download.passbolt.com/charts/passbolt
[passbolt]
description = Passbolt Password Manager
mode = helm
depends_on = passbolt_repo
[#passbolt/helm]
mode = install
name = mypassbolt
repo = passbolt/passbolt
options_file = passbolt.yml
variable_file = ../../secrets/passbolt.env

View file

@ -0,0 +1,15 @@
persistentVolumeClaim:
enabled: true
storageClass: longhorn
serviceWeb:
annotations:
metallb.universe.tf/allow-shared-ip: pihole-svc
metallb.universe.tf/loadBalancerIPs: 192.168.2.20
type: LoadBalancer
serviceDns:
annotations:
metallb.universe.tf/allow-shared-ip: pihole-svc
metallb.universe.tf/loadBalancerIPs: 192.168.2.20
type: LoadBalancer

View file

@ -0,0 +1,23 @@
[meta]
format_ver = 1
[pihole_repo]
description = Pihole Helm Repository
mode = helm
depends_on = traefik:longhorn_storage_class
[#pihole_repo/helm]
mode = add_repo
name = mojo2600pihole
repo = https://mojo2600.github.io/pihole-kubernetes/
[pihole]
mode = helm
depends_on = passbolt_repo
[#pihole/helm]
mode = install
name = pihole
repo = mojo2600pihole/pihole
options_file = pihole.yml
variable_file = ../../secrets/pihole.env

View file

@ -0,0 +1,18 @@
[meta]
format_ver = 1
[passbolt_project]
mode = include
path = ./passbolt/project.ini
[forgejo_personal_project]
mode = include
path = ./forgejo/project.ini
[pihole_project]
mode = include
path = ./pihole/project.ini
[tailscale_project]
mode = include
path = ./tailscale/project.ini

View file

@ -0,0 +1,12 @@
apiVersion: tailscale.com/v1alpha1
kind: Connector
metadata:
name: ts-kube
spec:
hostname: ts-kube
subnetRouter:
advertiseRoutes:
- "10.0.0.0/24"
- "192.168.0.0/24"
- "192.168.2.0/24"
exitNode: true

View file

@ -0,0 +1,32 @@
[meta]
format_ver = 1
[tailscale_repo]
description = Tailscale Helm Repository
mode = helm
depends_on = traefik:longhorn_storage_class
[#tailscale_repo/helm]
mode = add_repo
name = tailscale
repo = https://pkgs.tailscale.com/helmcharts
[tailscale]
mode = helm
depends_on = tailscale_repo
[#tailscale/helm]
mode = install
name = tailscale
repo = tailscale/tailscale-operator
variable_file = ../../secrets/tailscale.env
namespace_name = tailscale
create_namespace = yes
[tailscale_connectors]
mode = k3s
depends_on = tailscale
[#tailscale_connectors/k3s]
mode = install
yml_path = ./connectors.yml

View file

@ -9,7 +9,7 @@ import socket
import json
import sys
def json_to_bytes(str: str) -> bytearray:
def json_to_bytes(str: dict[str, bool | str]) -> bytearray:
return bytearray(json.dumps(str), "utf-8")
# Who needs Flask, anyways?
@ -17,11 +17,11 @@ class HTTPHandler(http.server.BaseHTTPRequestHandler):
def send_headers(self):
self.send_header("Content-Type", "application/json")
self.end_headers()
def do_POST(self):
if self.path == "/api/installer_update_webhook":
content_length = 0
try:
content_length = int(self.headers.get('Content-Length'))
except ValueError:
@ -79,7 +79,7 @@ class HTTPHandler(http.server.BaseHTTPRequestHandler):
output_coloring = "light_yellow"
elif res == "FAIL":
output_coloring = "light_red"
result_text_component = f" {resp_decoded_data["result"]} " if "result" in resp_decoded_data else " "
final_output_text = f"{str_formatted_time} {resp_decoded_data["event_type"].upper()} {resp_decoded_data["level"]}:{result_text_component}{resp_decoded_data["name"]} ({resp_decoded_data["description"]})"
@ -103,7 +103,7 @@ class HTTPHandler(http.server.BaseHTTPRequestHandler):
"success": False,
"error": "Unknown route"
}))
def do_GET(self):
resolved_path = str(Path(self.path).resolve())
file_path = getcwd() + resolved_path
@ -124,7 +124,7 @@ class HTTPHandler(http.server.BaseHTTPRequestHandler):
}))
except () as exception:
exception.print_exception()
def log_message(self, format: str, *args):
status_code = 0
@ -136,7 +136,7 @@ class HTTPHandler(http.server.BaseHTTPRequestHandler):
# Disable logging for the /api/ endpoint for POST requests unless the error code > 400
if len(args) >= 1 and args[0].startswith("POST") and self.path.startswith("/api/") and status_code < 400:
return
super().log_message(format, *args)
port = int(sys.argv[1]) if "SERVE_DEVELOP" not in environ else 10240
@ -144,4 +144,4 @@ server = socketserver.TCPServer(("", port), HTTPHandler)
server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("[x] started HTTP server.")
server.serve_forever()
server.serve_forever()

View file

@ -1,7 +1,7 @@
#!/usr/bin/env bash
if [ ! -f "config/.env" ]; then
echo "# NOTE: The UUID should be automatically generated, when running nix-shell. However, if it isn't, then" > .env
echo "# run uuidgen and change the below value." >> .env
echo "# NOTE: The UUID should be automatically generated, when running nix-shell. However, if it isn't, then" > config/.env
echo "# run uuidgen and change the below value." >> config/.env
cat config/.env.example >> config/.env
# Apple moment