diff --git a/ops/sso/prod.jsonnet b/ops/sso/prod.jsonnet new file mode 100644 index 00000000..07f152e4 --- /dev/null +++ b/ops/sso/prod.jsonnet @@ -0,0 +1,15 @@ +local sso = import "sso.libsonnet"; + +{ + sso: sso { + cfg+: { + domain: "sso.hackerspace.pl", + database+: { + host: "public.crdb-waw1.svc.cluster.local", + name: "sso", + username: "sso", + tlsSecret: "client-sso-certificate", + }, + }, + }, +} diff --git a/ops/sso/sso.libsonnet b/ops/sso/sso.libsonnet new file mode 100644 index 00000000..3ac30027 --- /dev/null +++ b/ops/sso/sso.libsonnet @@ -0,0 +1,126 @@ +# kubectl create secret generic sso --from-literal=secret_key=$(pwgen 24 1) --from-literal=ldap_bind_password=... + +local kube = import "../../kube/kube.libsonnet"; + +{ + local app = self, + local cfg = app.cfg, + + cfg:: { + namespace: "sso", + image: "registry.k0.hswaw.net/informatic/sso-v2@sha256:a44055a4f1d2a4e0708838b571f3a3c018f3b97adfea71ae0cf1df98246bf6cf", + domain: error "domain must be set", + database: { + host: error "database.host must be set", + name: error "database.name must be set", + username: error "database.username must be set", + port: 26257, + tlsSecret: error "database.tlsSecret must be set", + }, + }, + + ns: kube.Namespace(app.cfg.namespace), + + deployment: app.ns.Contain(kube.Deployment("sso")) { + spec+: { + replicas: 1, + template+: { + spec+: { + volumes_: { + crdb: { + secret: { + secretName: cfg.database.tlsSecret, + defaultMode: std.parseOctal("0600"), + }, + }, + tlscopy: kube.EmptyDirVolume(), # see initContainers_.secretCopy + }, + securityContext: { + runAsUser: 100, + runAsGroup: 101, + fsGroup: 101, + }, + initContainers_: { + # psycopg2 / libpq wants its TLS secret keys to be only + # readable by running process. As k8s exposes + # secrets/configmaps as symlinks, libpq gets confused + # and refuses to start, unless we dereference these into + # a local copy with proper permissions. + secretCopy: kube.Container("secret-copy") { + image: cfg.image, + command: ["sh", "-c", "cp -fv /tls-orig/* /tls && chmod 0400 /tls/*"], + volumeMounts_: { + crdb: { mountPath: "/tls-orig" }, + tlscopy: { mountPath: "/tls" }, + }, + }, + }, + containers_: { + web: kube.Container("sso") { + image: cfg.image, + ports_: { + http: { containerPort: 5000 }, + }, + env_: { + DATABASE_URI: "cockroachdb://%s@%s:%d/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s" % [ + cfg.database.username, + cfg.database.host, + cfg.database.port, + cfg.database.name, + "/tls/ca.crt", + "/tls/tls.crt", + "/tls/tls.key", + ], + + LDAP_BIND_PASSWORD: { secretKeyRef: { name: "sso", key: "ldap_bind_password" } }, + SECRET_KEY: { secretKeyRef: { name: "sso", key: "secret_key" } }, + LOGGING_LEVEL: "DEBUG", + }, + volumeMounts_: { + tlscopy: { mountPath: "/tls" }, + }, + }, + }, + }, + }, + }, + }, + + svc: app.ns.Contain(kube.Service("sso")) { + target_pod:: app.deployment.spec.template, + spec+: { + ports: [ + { name: "http", port: 5000, targetPort: 5000, protocol: "TCP" }, + ], + type: "ClusterIP", + }, + }, + + ingress: app.ns.Contain(kube.Ingress("sso")) { + metadata+: { + annotations+: { + "kubernetes.io/tls-acme": "true", + "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod", + "nginx.ingress.kubernetes.io/proxy-body-size": "0", + }, + }, + spec+: { + tls: [ + { + hosts: [cfg.domain], + secretName: "sso-tls", + }, + ], + rules: [ + { + host: cfg.domain, + http: { + paths: [ + { path: "/", backend: app.svc.name_port }, + ] + }, + } + ], + }, + }, +}