feature: Gets Ubuntu autoinstall working.

This commit is contained in:
greysoh 2024-08-02 08:13:51 -04:00
parent eb1dc99389
commit 61ccbf61d6
23 changed files with 357 additions and 523 deletions

View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
sudo apt update
sudo apt install -y avahi-daemon curl
ufw allow 6443/tcp
ufw allow from 10.42.0.0/16 to any
ufw allow from 10.43.0.0/16 to any
curl "https://get.docker.com/" -L | bash
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="agent --server https://$UPSTREAM_HOSTNAME:6443 --token $K3S_TOKEN" sh -s -

View file

@ -0,0 +1,9 @@
#!/usr/bin/env bash
sudo apt update
sudo apt install -y avahi-daemon curl
ufw allow 6443/tcp
ufw allow from 10.42.0.0/16 to any
ufw allow from 10.43.0.0/16 to any
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --cluster-init --token $K3S_TOKEN --disable servicelb" sh -s -

View file

@ -0,0 +1,9 @@
#!/usr/bin/env bash
sudo apt update
sudo apt install -y avahi-daemon curl
ufw allow 6443/tcp
ufw allow from 10.42.0.0/16 to any
ufw allow from 10.43.0.0/16 to any
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="server --server https://$UPSTREAM_HOSTNAME:6443 --token $K3S_TOKEN --disable servicelb" sh -s -

View file

@ -1,84 +0,0 @@
let
pkgs = import <nixpkgs> {};
k3s_token = (import ./secrets.nix).services.k3s.token;
in {
imports = [
./commons.nix
];
systemd.services.k3s = {
enable = true;
description = "KittehCluster's modified k3s service";
# From L324: https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/builder.nix
path = with pkgs; [
kmod
socat
iptables
iproute2
ipset
bridge-utils
ethtool
util-linux
conntrack-tools
runc
bash
];
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "k3s-hack" ''
rm -rf /tmp/k3shack
# Manually recreate the symlinks. Don't @ me.
mkdir /tmp/k3shack
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/containerd
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/crictl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/ctr
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-agent
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-certificate
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-completion
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-etcd-snapshot
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-secrets-encrypt
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-server
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-token
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/kubectl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s
export PATH=/tmp/k3shack:$PATH
k3s agent --token ${k3s_token} --server https://kitteh-node-1-k3s-server:6443
'';
};
};
virtualisation.docker.enable = true;
networking.firewall = {
enable = true;
allowedTCPPorts = [
# HTTP(s)
80
443
# Docker swarm
2377
7946
4789
# K3s
6443
2379
2380
];
allowedUDPPorts = [
# Docker swarm
7946
# K3s
8472
];
};
}

View file

@ -1,84 +0,0 @@
let
pkgs = import <nixpkgs> {};
update_script = builtins.readFile ./update.sh;
in {
imports = [
./secrets.nix
./hardware-configuration.nix
];
swapDevices = [
{
device = "/var/lib/swapfile";
size = 4 * 1024;
}
];
boot.loader.grub.enable = true;
boot.loader.grub.device = "/dev/vda";
systemd.services.kittehclean = {
enable = true;
description = "Cleans up this Kitteh node & runs init tasks";
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "kittehclean" ''
echo "KittehCluster: Running cleanup tasks..."
chmod -R 644 /etc/rancher 2> /dev/null > /dev/null
chmod -R 644 /var/lib/rancher 2> /dev/null > /dev/null
# Because I'm lazy (and this works), we use this method to write the file
rm -rf /home/clusteradm/update
ln -s ${pkgs.writeShellScript "update" update_script} /home/clusteradm/update
echo "Done."
'';
};
wantedBy = ["network-online.target"];
};
networking.networkmanager.enable = true;
services.openssh = {
enable = true;
settings = {
PasswordAuthentication = false;
};
};
services.avahi.enable = true;
services.avahi.openFirewall = true;
system.nssModules = pkgs.lib.optional true pkgs.nssmdns;
system.nssDatabases.hosts = pkgs.lib.optionals true (pkgs.lib.mkMerge [
(pkgs.lib.mkBefore ["mdns4_minimal [NOTFOUND=return]"]) # before resolution
(pkgs.lib.mkAfter ["mdns4"]) # after dns
]);
users.users.clusteradm = {
initialPassword = "1234";
isNormalUser = true;
extraGroups = ["sudoer" "wheel" "docker"];
};
environment.systemPackages = with pkgs; [
nano
vim
bash
htop
bottom
# Updating
git
# K3s command line tools
k3s
];
system.stateVersion = "24.05";
}

View file

@ -1,69 +0,0 @@
let
pkgs = import <nixpkgs> {};
k3s_token = (import ./secrets.nix).services.k3s.token;
in {
imports = [
./commons.nix
];
systemd.services.k3s = {
enable = true;
description = "KittehCluster's modified k3s service";
# From L324: https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/builder.nix
path = with pkgs; [
kmod
socat
iptables
iproute2
ipset
bridge-utils
ethtool
util-linux
conntrack-tools
runc
bash
];
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "k3s-hack" ''
rm -rf /tmp/k3shack
# Manually recreate the symlinks. Don't @ me.
mkdir /tmp/k3shack
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/containerd
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/crictl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/ctr
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-agent
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-certificate
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-completion
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-etcd-snapshot
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-secrets-encrypt
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-server
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-token
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/kubectl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s
export PATH=/tmp/k3shack:$PATH
k3s server --token ${k3s_token} --server https://kitteh-node-1-k3s-server:6443 --disable servicelb
'';
};
};
# K3s settings
networking.firewall = {
enable = true;
allowedTCPPorts = [
6443
2379
2380
];
allowedUDPPorts = [
8472
];
};
}

View file

@ -0,0 +1,5 @@
K3S_TOKEN="shared.secret.here"
# NOTE: Password here is not strong! This password is '1234'.
SETUP_USERNAME="clusteradm"
SETUP_PASSWORD="\$y\$j9T\$zoVys9dfUO/jrysh2Dtim1\$ZQbbt9Qw5qXw0NNCQ7ckdOaVM.QY70sxU82/cQz.siB"

View file

@ -0,0 +1,18 @@
[kitteh-node-1/server]
hostname = kitteh-node-1-k3s-server
role = server-init
[kitteh-node-1/agent]
hostname = kitteh-node-1-k3s-agent
upstream = kitteh-node-1/server
role = agent
[kitteh-node-2/server]
hostname = kitteh-node-2-k3s-server
upstream = kitteh-node-1/server
role = server
[kitteh-node-2/agent]
hostname = kitteh-node-2-k3s-agent
upstream = kitteh-node-1/server
role = agent

View file

@ -1,73 +0,0 @@
#!/usr/bin/env bash
export TERM="xterm-256color"
clear
echo "KittehCluster installer"
echo "Codename 'tundra'"
echo
sed -e 's/\s*\([\+0-9a-zA-Z]*\).*/\1/' << EOF | sudo fdisk /dev/vda
o # dos disk label
n # new partition
p # primary partition
1 # setup boot partition
2048 # align first sector (performance reasons?)
+500M # boot partition size
n # new partition
p # primary partition
2 # partion number 2
# default, start immediately after preceding partition
# default, extend partition to end of disk
a # make a partition bootable
1 # bootable partition is partition 1 -- /dev/vda1
w # write the partition table
q # and we're done
EOF
sudo mkfs.fat -F 32 /dev/vda1
sudo fatlabel /dev/vda1 BOOT
sudo mkfs.ext4 /dev/vda2 -L ROOT
sudo mount /dev/vda2 /mnt
sudo mkdir -p /mnt/boot
sudo mount /dev/vda1 /mnt/boot
sudo nixos-generate-config --root /mnt
sudo mv /mnt/etc/nixos/hardware-configuration.nix /tmp/hardware-configuration.nix
sudo rm -rf /mnt/etc/nixos/* /mnt/etc/nixos/.*
sudo nix-shell -p git --command "git clone $GIT_REPO /mnt/etc/nixos"
if [ ! -f "/mnt/etc/nixos/install-script.sh" ]; then
echo "DEBUG: checking out 'tundra' branch..."
sudo nix-shell -p git --command "cd /mnt/etc/nixos; git checkout tundra"
fi
sudo mv /tmp/hardware-configuration.nix /mnt/etc/nixos/nixinfra/
sudo mv $SECRETS_PATH /mnt/etc/nixos/nixinfra/secrets.nix
sudo bash -c "NIXOS_CONFIG=/mnt/etc/nixos/nixinfra/$NIX_INSTALL_PATH nixos-install"
RET=$?
if [ $RET -ne 0 ]; then
echo "Failed to install! Attempting to spawn bash for debugging..."
echo "NOTE: You will not see a bash prompt (for some reason)"
bash
echo "Bash exited."
else
echo "Successfully installed! Finishing install..."
mkdir /mnt/home/clusteradm/.bin
echo "NIX_INSTALL_PATH=/etc/nixos/nixinfra/$NIX_INSTALL_PATH" > /mnt/home/clusteradm/.bin/.env
echo 'export PATH="$PATH:/home/clusteradm/.bin"' >> /mnt/home/clusteradm/.bashrc
echo 'export PATH="$PATH:/home/clusteradm/.bin"' >> /mnt/home/clusteradm/.zshrc
sleep 60
echo "Rebooting"
sudo reboot
exit
fi
echo "Unmounting filesystems..."
sudo umount -f /mnt/boot
sudo umount -f /mnt
echo "Done."

View file

@ -1,35 +1,66 @@
#!/usr/bin/env bash
SSH_SERVER="$1"
SERVER_INSTALL_PATH="$1"
ssh-to-srv() {
ssh -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" nixos@$SSH_SERVER $@
}
HTTP_PORT="$((1024 + $RANDOM % 65535))"
TMPDIR="/tmp/server_http_$HTTP_PORT"
if [ "$GIT_REPO" == "" ]; then
export GIT_REPO="https://git.hofers.cloud/greysoh/kittehcluster"
fi
BASE_IPS="$(ip a | grep "inet" | grep "brd" | cut -d "/" -f 1 | cut -d " " -f 6)"
if [ "$NIX_INSTALL_PATH" == "" ]; then
echo "ERROR: the environment variable 'NIX_INSTALL_PATH' is not set!"
echo "This can be fixed by setting it to the path of the nix file, i.e:"
echo "$ NIX_INSTALL_PATH=kitteh-node-1/server.nix ./install.sh"
EXT_10_DOT_IPS="$(echo "$BASE_IPS" | grep "10.")"
EXT_192168_IPS="$(echo "$BASE_IPS" | grep "192.168.")"
EXT_172_16_IPS="$(echo "$BASE_IPS" | grep "172.16.")"
EXTERNAL_IP_FULL=$EXT_10_DOT_IPS$'\n'$EXT_192168_IPS$'\n'$EXT_172_16_IPS$'\n'
if [ "$SERVER_INSTALL_PATH" = "" ]; then
echo "You didn't pass in all the arguments! Usage:"
echo " ./install.sh \$INSTALL_KEY"
exit 1
fi
if [ ! -f "secrets.nix" ]; then
echo "ERROR: secrets.nix doesn't exit! Copy that file, and setup your secrets, please."
exit 1
./merge.py "$SERVER_INSTALL_PATH"
echo "[x] initializing..."
mkdir $TMPDIR
echo "#cloud-config" > $TMPDIR/user-data
cat /tmp/script.yml >> $TMPDIR/user-data
if [ "$(uname)" == "Linux" ]; then
echo "[x] stopping firewall (Linux)..."
sudo systemctl stop firewall
fi
echo "Initializing..."
touch $TMPDIR/meta-data
touch $TMPDIR/vendor-data
# Ugh, gotta reimplement ssh-copy-id real quick...
# TODO: see if there's a way to specify custom arguments to ssh-copy-id's SSH process
for i in ~/.ssh/id_*.pub; do
echo "Copying public key '$i'..."
ssh-to-srv bash -c "'mkdir -p ~/.ssh; touch ~/.ssh/authorized_keys; echo -n $(cat $i | base64) | base64 -d > ~/.ssh/authorized_keys'"
done
echo "[x] starting HTTP server..."
echo " - Listening on port $HTTP_PORT."
echo " - Add one of these command line options for Ubuntu (guessed local IP):"
ssh-to-srv bash -c "'echo -n $(cat secrets.nix | base64) | base64 -d > /tmp/secrets.nix'"
ssh-to-srv bash -c "'echo -n $(cat install-script.sh | base64) | base64 -d > /tmp/install.sh'"
ssh-to-srv bash -c "'GIT_REPO=$GIT_REPO NIX_INSTALL_PATH=$NIX_INSTALL_PATH SECRETS_PATH=/tmp/secrets.nix bash /tmp/install.sh'"
while IFS= read -r IP; do
# I'm too lazy to do root causing of this shit.
if [ "$IP" != "" ]; then
echo " - autoinstall \"ds=nocloud-net;s=http://$IP:$HTTP_PORT/\""
fi
done <<< "$EXTERNAL_IP_FULL"
echo " - Choose the right IP."
echo
SERVE_SCRIPT="$PWD/serve.py"
pushd $TMPDIR > /dev/null
python3 $SERVE_SCRIPT $HTTP_PORT
popd > /dev/null
echo "[x] running cleanup tasks..."
rm -rf $TMPDIR
if [ "$(uname)" == "Linux" ]; then
echo "[x] starting firewall (Linux)..."
sudo systemctl start firewall
fi

View file

@ -1,10 +0,0 @@
let
pkgs = import <nixpkgs> {};
in {
imports = [
../commons.agent.nix
];
networking.hostName = "kitteh-node-1-k3s-agent";
environment.variables.NIX_BUILD_ID = "kitteh-node-1/agent";
}

View file

@ -1,75 +0,0 @@
# Because this behaves as cluster init, all the "commons.server.nix" seperation
# isn't in here. However, normal commons is. Just fyi.
let
pkgs = import <nixpkgs> {};
k3s_token = (import ../secrets.nix).services.k3s.token;
in {
imports = [
../commons.nix
];
networking.hostName = "kitteh-node-1-k3s-server";
environment.variables.NIX_BUILD_ID = "kitteh-node-1/server";
systemd.services.k3s = {
enable = true;
description = "KittehCluster's modified k3s service";
# From L324: https://github.com/NixOS/nixpkgs/blob/master/pkgs/applications/networking/cluster/k3s/builder.nix
path = with pkgs; [
kmod
socat
iptables
iproute2
ipset
bridge-utils
ethtool
util-linux
conntrack-tools
runc
bash
];
serviceConfig = {
Type = "simple";
ExecStart = pkgs.writeShellScript "k3s-hack" ''
rm -rf /tmp/k3shack
# Manually recreate the symlinks. Don't @ me.
mkdir /tmp/k3shack
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/containerd
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/crictl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/ctr
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-agent
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-certificate
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-completion
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-etcd-snapshot
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-secrets-encrypt
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-server
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s-token
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/kubectl
ln -s ${pkgs.k3s}/bin/.k3s-wrapped /tmp/k3shack/k3s
export PATH=/tmp/k3shack:$PATH
k3s server --cluster-init --token ${k3s_token} --disable servicelb
'';
};
};
# K3s settings
networking.firewall = {
enable = true;
allowedTCPPorts = [
6443
2379
2380
];
allowedUDPPorts = [
8472
];
};
}

View file

@ -1,10 +0,0 @@
let
pkgs = import <nixpkgs> {};
in {
imports = [
../commons.agent.nix
];
networking.hostName = "kitteh-node-2-k3s-agent";
environment.variables.NIX_BUILD_ID = "kitteh-node-2/agent";
}

View file

@ -1,10 +0,0 @@
let
pkgs = import <nixpkgs> {};
in {
imports = [
../commons.server.nix
];
networking.hostName = "kitteh-node-2-k3s-server";
environment.variables.NIX_BUILD_ID = "kitteh-node-2/server";
}

99
serverinfra/merge.py Executable file
View file

@ -0,0 +1,99 @@
#!/usr/bin/env python3
from os import environ, path, listdir
from sys import argv
import configparser
import base64
import yaml
for item in ["K3S_TOKEN", "SETUP_USERNAME", "SETUP_PASSWORD"]:
if item not in environ:
print(f"ERROR: .env failed to load! (missing environment variable '{item}')")
exit(1)
if len(argv) < 2:
print("ERROR: Missing the server name")
exit(1)
server_name = argv[1]
server_infra_contents = ""
with open("config/infrastructure.ini", "r") as f:
server_infra_contents = f.read()
infrastructure = configparser.ConfigParser()
infrastructure.read_string(server_infra_contents)
if server_name not in infrastructure:
print("ERROR: Server not found in infrastructure document")
exit(1)
infra_server = infrastructure[server_name]
ubuntu_install_contents = ""
with open("ubuntu-install.yml", "r") as f:
ubuntu_install_contents = f.read()
yaml_install_script = yaml.load(ubuntu_install_contents, Loader=yaml.CLoader)
for item in ["hostname", "role"]:
if item not in infra_server:
print(f"ERROR: Missing {item} in {server_name}")
exit(1)
custom_shell_script = "#!/usr/bin/env bash\n"
custom_shell_script += f"export K3S_TOKEN=\"{environ["K3S_TOKEN"]}\"\n"
custom_shell_script += f"export SERVER_NAME=\"{server_name}\"\n"
custom_shell_script += f"export SERVER_HOSTNAME=\"{infra_server["hostname"]}\"\n"
if "upstream" in infra_server:
upstream_name = infra_server["upstream"]
if upstream_name not in infrastructure:
print(f"ERROR: Could not find upstream server '{upstream_name}'")
exit(1)
upstream_server = infrastructure[infra_server["upstream"]]
if "hostname" not in upstream_server:
print(f"ERROR: Missing hostname in upstream '{upstream_name}'")
exit(1)
custom_shell_script += f"export UPSTREAM_NAME=\"{upstream_name}\"\n"
custom_shell_script += f"export UPSTREAM_HOSTNAME=\"{upstream_server["hostname"]}\"\n"
custom_shell_script += "\n"
with open(f"base-scripts/role.{infra_server["role"]}.sh", "r") as base_script:
custom_shell_script += base_script.read()
encoded_custom_shell_script = base64.b64encode(bytes(custom_shell_script, "utf-8")).decode("utf-8")
yaml_install_script["autoinstall"]["late-commands"] = []
yaml_install_script["autoinstall"]["late-commands"].append(f"bash -c \"echo \"{encoded_custom_shell_script}\" | base64 -d > /target/postinstall_script\"")
yaml_install_script["autoinstall"]["late-commands"].append("curtin in-target -- bash /postinstall_script")
yaml_install_script["autoinstall"]["late-commands"].append("rm -rf /target/postinstall_script")
yaml_install_script["autoinstall"]["ssh"]["authorized-keys"] = []
ssh_directory_contents = []
try:
ssh_directory_contents = listdir(path.expanduser("~/.ssh/"))
except FileNotFoundError:
pass
for file in ssh_directory_contents:
if file.endswith(".pub"):
with open(path.join(path.expanduser("~/.ssh/"), file), "r") as ssh_public_key:
yaml_install_script["autoinstall"]["ssh"]["authorized-keys"].append(ssh_public_key.read())
yaml_install_script["autoinstall"]["identity"]["hostname"] = infra_server["hostname"]
yaml_install_script["autoinstall"]["identity"]["username"] = environ["SETUP_USERNAME"]
yaml_install_script["autoinstall"]["identity"]["password"] = environ["SETUP_PASSWORD"]
ubuntu_install_contents = yaml.dump(yaml_install_script, Dumper=yaml.CDumper)
with open("/tmp/script.yml", "w") as new_install_script:
new_install_script.write(ubuntu_install_contents)

View file

@ -1,18 +0,0 @@
# Example secrets configuration
# There is a better way to do this, but this works.
# To get started:
# 1. Copy this file to 'secrets.nix'
# 2. Run uuidgen (or some other algorithm) to generate a shared secret, and replace services.k3s.token's value with that
# 3. Copy your SSH key(s) into the authorized_keys section.
# 4. Profit!
let
pkgs = import <nixpkgs> {};
in {
services.k3s.token = "shared.secret.here";
users.users.clusteradm.openssh.authorizedKeys.keys = [
];
}

29
serverinfra/serve.py Normal file
View file

@ -0,0 +1,29 @@
# TODO:
# Install logging over HTTP *could* be implemented (see autoinstall documentation), however
# it is not implemented here.
import socketserver
import http.server
import socket
import sys
requests = set()
class HTTPHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
http.server.SimpleHTTPRequestHandler.do_GET(self)
requests.add(self.path)
found_meta_data = "/meta-data" in requests
found_user_data = "/user-data" in requests
found_vendor_data = "/vendor-data" in requests
if found_meta_data and found_user_data and found_vendor_data:
print("[x] sent all our data, exiting...")
sys.exit(0)
server = socketserver.TCPServer(("", int(sys.argv[1])), HTTPHandler)
server.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print("[x] started HTTP server.")
server.serve_forever()

29
serverinfra/shell Executable file
View file

@ -0,0 +1,29 @@
#!/usr/bin/env bash
if [ ! -f "conifg/.env" ]; then
echo "# NOTE: The UUID should be automatically generated, when running nix-shell. However, if it isn't, then" > .env
echo "# run uuidgen and change the below value." >> .env
cat config/.env.example >> config/.env
# Apple moment
sed "s/K3S_TOKEN=\"shared.secret.here\"/K3S_TOKEN=\"$(uuidgen)\"/g" config/.env > config/.env.2
mv config/.env.2 config/.env
echo "INFO: Before running any installation scripts, you should look over the contents of the file '.env',"
echo "and modify the contents as needed."
echo
fi
echo "Installation usage:"
echo " - ./install.sh \$IP:"
echo " Installs Ubuntu Server on \$IP. You will find the correct password in Help > Help on SSH access"
echo
echo "Have fun!"
set -a
source config/.env
set +a
bash
EXIT_CODE=$?
exit $EXIT_CODE

14
serverinfra/shell.nix Normal file
View file

@ -0,0 +1,14 @@
{
pkgs ? import <nixpkgs> { },
}: pkgs.mkShell {
buildInputs = with pkgs; [
python312
# Packages
python312Packages.pyyaml
];
shellHook = ''
./shell
'';
}

View file

@ -0,0 +1,57 @@
#cloud-config
# See the autoinstall documentation at:
# https://canonical-subiquity.readthedocs-hosted.com/en/latest/reference/autoinstall-reference.html
autoinstall:
apt:
disable_components: []
fallback: offline-install
geoip: true
mirror-selection:
primary:
- country-mirror
- arches: &id001
- amd64
- i386
uri: http://archive.ubuntu.com/ubuntu/
- arches: &id002
- s390x
- arm64
- armhf
- powerpc
- ppc64el
- riscv64
uri: http://ports.ubuntu.com/ubuntu-ports
preserve_sources_list: false
security:
- arches: *id001
uri: http://security.ubuntu.com/ubuntu/
- arches: *id002
uri: http://ports.ubuntu.com/ubuntu-ports
codecs:
install: false
drivers:
install: false
kernel:
package: linux-generic
keyboard:
layout: us
toggle: null
variant: ""
locale: en_US.UTF-8
oem:
install: auto
source:
id: ubuntu-server-minimal
search_drivers: false
identity:
realname: Cluster Administrator
ssh:
allow-pw: false
install-server: true
storage:
layout:
name: lvm
match:
path: /dev/vda
updates: security
version: 1

View file

@ -1,44 +0,0 @@
nix_bld_unset_err() {
echo "ERROR: NIX_BUILD_ID is not set (should be set by default!)"
echo " Please set NIX_BUILD_ID manually. i.e:"
echo " NIX_BUILD_ID=kitteh-node-1/agent updater"
exit 1
}
if [[ "$NIX_BUILD_ID" == "" ]]; then
if [[ ! -f "/tmp/nixbuildid" ]]; then
nix_bld_unset_err
fi
source /tmp/nixbuildid
if [[ "$NIX_BUILD_ID" == "" ]]; then
nix_bld_unset_err
fi
fi
if [[ "$UID" != "0" ]]; then
# Hacky workaround for failing to read NIX_BUILD_ID when called like:
# - $: ./update
# but this works:
# - $: sudo su
# - #: ./update
# NOTE: Calling `$: sudo ./update` still doesn't work with this hack. Just use `./update`, man.
echo "NIX_BUILD_ID=$NIX_BUILD_ID" > /tmp/nixbuildid
chmod +x /tmp/nixbuildid
sudo $0 $@
STATUS_CODE=$?
rm -rf /tmp/nixbuildid
exit $STATUS_CODE
fi
pushd /etc/nixos 2> /dev/null > /dev/null
git pull
popd 2> /dev/null > /dev/null
export NIX_PATH="$(printf $NIX_PATH | sed --expression="s#/etc/nixos/configuration.nix#/etc/nixos/nixinfra/$NIX_BUILD_ID.nix#g")"
nixos-rebuild switch --upgrade