forked from hswaw/hscloud
Change in sso-v2 unifies id_token and userinfo endpoint handling - now groups, nickname, email and preferred_username keys are present in id_tokens as well. https://code.hackerspace.pl/informatic/sso-v2/commit/?id=c4c810cd255a7bfcab5ced3fb88c8b311b518c34 Change-Id: Ib22994edc067fd83701590182f8096f6fca692ba
126 lines
4.7 KiB
Jsonnet
126 lines
4.7 KiB
Jsonnet
# kubectl create secret generic sso --from-literal=secret_key=$(pwgen 24 1) --from-literal=ldap_bind_password=...
|
|
|
|
local kube = import "../../../kube/kube.libsonnet";
|
|
|
|
{
|
|
local app = self,
|
|
local cfg = app.cfg,
|
|
|
|
cfg:: {
|
|
namespace: "sso",
|
|
image: "registry.k0.hswaw.net/informatic/sso-v2@sha256:3b277a8e2b3c3225d7da10aee37774266f9eb2aa536e7a390160f550b3556087",
|
|
domain: error "domain must be set",
|
|
database: {
|
|
host: error "database.host must be set",
|
|
name: error "database.name must be set",
|
|
username: error "database.username must be set",
|
|
port: 26257,
|
|
tlsSecret: error "database.tlsSecret must be set",
|
|
},
|
|
},
|
|
|
|
ns: kube.Namespace(app.cfg.namespace),
|
|
|
|
deployment: app.ns.Contain(kube.Deployment("sso")) {
|
|
spec+: {
|
|
replicas: 1,
|
|
template+: {
|
|
spec+: {
|
|
volumes_: {
|
|
crdb: {
|
|
secret: {
|
|
secretName: cfg.database.tlsSecret,
|
|
defaultMode: std.parseOctal("0600"),
|
|
},
|
|
},
|
|
tlscopy: kube.EmptyDirVolume(), # see initContainers_.secretCopy
|
|
},
|
|
securityContext: {
|
|
runAsUser: 100,
|
|
runAsGroup: 101,
|
|
fsGroup: 101,
|
|
},
|
|
initContainers_: {
|
|
# psycopg2 / libpq wants its TLS secret keys to be only
|
|
# readable by running process. As k8s exposes
|
|
# secrets/configmaps as symlinks, libpq gets confused
|
|
# and refuses to start, unless we dereference these into
|
|
# a local copy with proper permissions.
|
|
secretCopy: kube.Container("secret-copy") {
|
|
image: cfg.image,
|
|
command: ["sh", "-c", "cp -fv /tls-orig/* /tls && chmod 0400 /tls/*"],
|
|
volumeMounts_: {
|
|
crdb: { mountPath: "/tls-orig" },
|
|
tlscopy: { mountPath: "/tls" },
|
|
},
|
|
},
|
|
},
|
|
containers_: {
|
|
web: kube.Container("sso") {
|
|
image: cfg.image,
|
|
ports_: {
|
|
http: { containerPort: 5000 },
|
|
},
|
|
env_: {
|
|
DATABASE_URI: "cockroachdb://%s@%s:%d/%s?sslmode=require&sslrootcert=%s&sslcert=%s&sslkey=%s" % [
|
|
cfg.database.username,
|
|
cfg.database.host,
|
|
cfg.database.port,
|
|
cfg.database.name,
|
|
"/tls/ca.crt",
|
|
"/tls/tls.crt",
|
|
"/tls/tls.key",
|
|
],
|
|
|
|
LDAP_BIND_PASSWORD: { secretKeyRef: { name: "sso", key: "ldap_bind_password" } },
|
|
SECRET_KEY: { secretKeyRef: { name: "sso", key: "secret_key" } },
|
|
LOGGING_LEVEL: "DEBUG",
|
|
},
|
|
volumeMounts_: {
|
|
tlscopy: { mountPath: "/tls" },
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
|
|
svc: app.ns.Contain(kube.Service("sso")) {
|
|
target_pod:: app.deployment.spec.template,
|
|
spec+: {
|
|
ports: [
|
|
{ name: "http", port: 5000, targetPort: 5000, protocol: "TCP" },
|
|
],
|
|
type: "ClusterIP",
|
|
},
|
|
},
|
|
|
|
ingress: app.ns.Contain(kube.Ingress("sso")) {
|
|
metadata+: {
|
|
annotations+: {
|
|
"kubernetes.io/tls-acme": "true",
|
|
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
|
|
"nginx.ingress.kubernetes.io/proxy-body-size": "0",
|
|
},
|
|
},
|
|
spec+: {
|
|
tls: [
|
|
{
|
|
hosts: [cfg.domain],
|
|
secretName: "sso-tls",
|
|
},
|
|
],
|
|
rules: [
|
|
{
|
|
host: cfg.domain,
|
|
http: {
|
|
paths: [
|
|
{ path: "/", backend: app.svc.name_port },
|
|
]
|
|
},
|
|
}
|
|
],
|
|
},
|
|
},
|
|
}
|