cleanup for single node k3s zurrli

This commit is contained in:
Tobias Brunner 2022-12-23 21:00:08 +01:00
parent 4f4146963e
commit 603b92f5a7
Signed by: tobru
SSH Key Fingerprint: SHA256:kywVhvCA+MIxL6eBgoQa+BfC/ROJqcfD2bpy1PR6Ebk
11 changed files with 78 additions and 1268 deletions

2
.envrc Normal file
View File

@ -0,0 +1,2 @@
export KUBECONFIG=~/.kube/config_zurrli

View File

@ -10,6 +10,8 @@
Secrets are encrypted using [SOPS](https://github.com/mozilla/sops) and [age](https://github.com/FiloSottile/age).
Argo CD uses [KSOPS](https://github.com/viaduct-ai/kustomize-sops) and [kustomize](https://github.com/kubernetes-sigs/kustomize/).
Install `sops` and `age` packages on Arch Linux.
Public key: `age1dfk8euu7afvw7ge5l2qek45z23hdq5anjd56cy4d7kcsf0e0e5pqfjylx8`
The installation and configuration happens in a kustomize patch in `argocd/`.
@ -18,7 +20,6 @@ A good helper to work with SOPS encrypted secrets is [vscode-sops](https://githu
The `age` key needs to be stored at `$HOME/.config/sops/age/keys.txt`
### Usage
Create a normal secret with a `.sops.yaml` file ending. Encrypt it with:

View File

@ -0,0 +1,6 @@
apiVersion: v1
kind: Secret
metadata:
name: tailscale-auth-backup
stringData:
TS_AUTH_KEY: 59396c31b40fb066b9de87cf9bcf1942a00f178fb6c69020

View File

@ -0,0 +1,68 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
- apiGroups: [""]
resourceNames: ["tailscale"]
resources: ["secrets"]
verbs: ["get", "update"]
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: tailscale
subjects:
- kind: ServiceAccount
name: tailscale
roleRef:
kind: Role
name: tailscale
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: tailscale-auth
stringData:
TS_AUTH_KEY: 3987bd130c13a8d01f3614185691b0bdf48599de8f2a3345
---
apiVersion: v1
kind: Pod
metadata:
name: subnet-router
labels:
app: tailscale
spec:
serviceAccountName: tailscale
containers:
- name: tailscale
imagePullPolicy: Always
image: "ghcr.io/tailscale/tailscale:latest"
env:
# Store the state in a k8s secret
- name: TS_KUBE_SECRET
value: tailscale
- name: TS_USERSPACE
value: "true"
- name: TS_AUTH_KEY
valueFrom:
secretKeyRef:
name: tailscale-auth
key: TS_AUTH_KEY
optional: true
- name: TS_ROUTES
value: "10.96.0.0/12,10.244.0.0/16"
- name: TS_EXTRA_ARGS
value: "--login-server https://headscale.tbrnt.ch"
securityContext:
runAsUser: 1000
runAsGroup: 1000

View File

@ -1,19 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: external-snapshotter
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: system
source:
path: system/external-snapshotter
repoURL: https://git.tbrnt.ch/tobru/gitops-zurrli.git
targetRevision: HEAD
destination:
namespace: kube-system
server: https://kubernetes.default.svc
syncPolicy:
automated:
selfHeal: false

View File

@ -1,19 +0,0 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: volsync
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: system
source:
chart: volsync
repoURL: https://backube.github.io/helm-charts/
targetRevision: 0.5.0
destination:
namespace: volsync
server: https://kubernetes.default.svc
syncPolicy:
syncOptions:
- CreateNamespace=true

File diff suppressed because it is too large Load Diff

View File

@ -1,155 +0,0 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: snapshot-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- list
- delete
- update
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: snapshot-controller-runner
rules:
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- events
verbs:
- list
- watch
- create
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotclasses
verbs:
- get
- list
- watch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents
verbs:
- create
- get
- list
- watch
- update
- delete
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshotcontents/status
verbs:
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots
verbs:
- get
- list
- watch
- update
- patch
- apiGroups:
- snapshot.storage.k8s.io
resources:
- volumesnapshots/status
verbs:
- update
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: snapshot-controller-leaderelection
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: snapshot-controller-leaderelection
subjects:
- kind: ServiceAccount
name: snapshot-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: snapshot-controller-role
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: snapshot-controller-runner
subjects:
- kind: ServiceAccount
name: snapshot-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: snapshot-controller
namespace: kube-system
spec:
minReadySeconds: 15
replicas: 2
selector:
matchLabels:
app: snapshot-controller
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
app: snapshot-controller
spec:
containers:
- args:
- --v=5
- --leader-election=true
image: registry.k8s.io/sig-storage/snapshot-controller:v6.1.0
imagePullPolicy: IfNotPresent
name: snapshot-controller
serviceAccountName: snapshot-controller

View File

@ -1,10 +1,8 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- storageclasses.yaml
- unattended-upgrades.yaml
- tailscale-subnet-router.yaml
- machinedeployments.yaml
- kube-cleanup-operator.yaml
generators:
- secret-generator.yaml

View File

@ -1,50 +0,0 @@
apiVersion: cluster.k8s.io/v1alpha1
kind: MachineDeployment
metadata:
annotations:
k8c.io/operating-system-profile: osp-ubuntu
machinedeployment.clusters.k8s.io/revision: "1"
name: zurrli-pool1
spec:
minReadySeconds: 0
progressDeadlineSeconds: 600
replicas: 2
revisionHistoryLimit: 1
selector:
matchLabels:
workerset: zurrli-pool1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
workerset: zurrli-pool1
namespace: kube-system
spec:
metadata:
labels:
workerset: zurrli-pool1
providerSpec:
value:
caPublicKey: ""
cloudProvider: hetzner
cloudProviderSpec:
image: ubuntu-20.04
labels:
kubeone_cluster_name: zurrli
zurrli-workers: pool1
location: nbg1
networks:
- "2074236"
serverType: cx21
operatingSystem: ubuntu
operatingSystemSpec:
distUpgradeOnBoot: false
sshPublicKeys:
- |
ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBAHdMd0oZvl5GrXP9wTJYVsY8oH+4naSE5C7y/tbacZ tobru
versions:
kubelet: 1.24.6

View File

@ -1,10 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
namespace: kube-system
name: hcloud-volumes
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.hetzner.cloud
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true