kube: standardize on a `local top = self` convention

A convention is introduced to specify `local top = self` declaration at the top of an app/service/component's jsonnet, representing the top-level object. Reasoning is as following:

- `top` is more universal/unambiguous than `app`
- `top` is usually shorter than $NAME
- a conventional `top` instead of $NAME (coupled with other conventions introduced) makes app jsonnets wonderfully copy-paste'able, aiding in learning and quickly building

Change-Id: I7ece83ce7e97021ad98a6abb3500fb9839936811
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/1805
Reviewed-by: q3k <q3k@hackerspace.pl>
master
radex 2023-11-24 12:01:49 +01:00
parent 99ed6a7abb
commit c995c212d2
19 changed files with 224 additions and 224 deletions

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/hscloud.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
@ -36,22 +36,22 @@ local kube = import "../../../kube/hscloud.libsonnet";
WEB_DOMAIN: cfg.webDomain,
BIND_ADDR: "0.0.0.0:8080",
//DB_HOST: app.postgres.svc.host,
//DB_HOST: top.postgres.svc.host,
DB_HOST: "boston-packets.hackerspace.pl",
DB_USER: "mailman",
DB_NAME: "mailman-web",
DB_PASS: kube.SecretKeyRef(app.config, "postgres-pass"),
DB_PASS: kube.SecretKeyRef(top.config, "postgres-pass"),
DB_PORT: "5432",
SMTP_HOST: "mail.hackerspace.pl",
SMTP_PORT: "587",
SMTP_USER: "postorius",
SMTP_PASSWORD: kube.SecretKeyRef(app.config, "smtp-password"),
SMTP_PASSWORD: kube.SecretKeyRef(top.config, "smtp-password"),
SECRET_KEY: kube.SecretKeyRef(app.config, "django-secret-key"),
MAILMAN_REST_API_PASS: kube.SecretKeyRef(app.config, 'mailman-api-password'),
MAILMAN_ARCHIVER_KEY: kube.SecretKeyRef(app.config, 'mailman-archiver-key'),
SECRET_KEY: kube.SecretKeyRef(top.config, "django-secret-key"),
MAILMAN_REST_API_PASS: kube.SecretKeyRef(top.config, 'mailman-api-password'),
MAILMAN_ARCHIVER_KEY: kube.SecretKeyRef(top.config, 'mailman-archiver-key'),
},
@ -66,19 +66,19 @@ local kube = import "../../../kube/hscloud.libsonnet";
initContainers_: {
migrate: kube.Container("migrate") {
image: cfg.images.web,
env_: app.env,
env_: top.env,
args: [
"manage", "migrate",
],
},
},
volumes_: {
config: kube.SecretVolume(app.wireproxyConfig),
config: kube.SecretVolume(top.wireproxyConfig),
},
containers_: {
default: kube.Container("default") {
image: cfg.images.web,
env_: app.env,
env_: top.env,
args: ["serve"],
ports_: {
web: { containerPort: 8080 },
@ -160,7 +160,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
svcWeb: ns.Contain(kube.Service("web")) {
target:: app.web,
target:: top.web,
spec+: {
# hax
type: "LoadBalancer",
@ -171,7 +171,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
#ingress: ns.Contain(kube.SimpleIngress("mailman")) {
# hosts:: [cfg.webDomain],
# target_service:: app.svcWeb,
# target_service:: top.svcWeb,
#},
config: ns.Contain(kube.Secret("config")) {

View File

@ -3,8 +3,8 @@ local postgres = import "../../../kube/postgres_v.libsonnet";
local redis = import "../../../kube/redis.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
@ -64,33 +64,33 @@ local redis = import "../../../kube/redis.libsonnet";
// REDIS_PASS is not used directly by the apps, it's just used to embed
// a secret fragment into REDIS_URL.
REDIS_PASS: kube.SecretKeyRef(app.config, "redis-pass"),
REDIS_URL: "redis://:$(REDIS_PASS)@%s" % [app.redis.svc.host_colon_port],
REDIS_PASS: kube.SecretKeyRef(top.config, "redis-pass"),
REDIS_URL: "redis://:$(REDIS_PASS)@%s" % [top.redis.svc.host_colon_port],
DB_HOST: app.postgres.svc.host,
DB_HOST: top.postgres.svc.host,
DB_USER: "mastodon",
DB_NAME: "mastodon",
DB_PASS: kube.SecretKeyRef(app.config, "postgres-pass"),
DB_PASS: kube.SecretKeyRef(top.config, "postgres-pass"),
DB_PORT: "5432",
ES_ENABLED: "false",
SECRET_KEY_BASE: kube.SecretKeyRef(app.config, "secret-key-base"),
OTP_SECRET: kube.SecretKeyRef(app.config, "otp-secret"),
SECRET_KEY_BASE: kube.SecretKeyRef(top.config, "secret-key-base"),
OTP_SECRET: kube.SecretKeyRef(top.config, "otp-secret"),
VAPID_PRIVATE_KEY: kube.SecretKeyRef(app.config, "vapid-private"),
VAPID_PUBLIC_KEY: kube.SecretKeyRef(app.config, "vapid-public"),
VAPID_PRIVATE_KEY: kube.SecretKeyRef(top.config, "vapid-private"),
VAPID_PUBLIC_KEY: kube.SecretKeyRef(top.config, "vapid-public"),
SMTP_SERVER: "mail.hackerspace.pl",
SMTP_PORT: "587",
SMTP_LOGIN: "mastodon",
SMTP_PASSWORD: kube.SecretKeyRef(app.config, "smtp-password"),
SMTP_PASSWORD: kube.SecretKeyRef(top.config, "smtp-password"),
SMTP_FROM_ADDRESS: "mastodon-noreply@hackerspace.pl",
S3_ENABLED: "true",
S3_BUCKET: cfg.objectStorage.bucket,
AWS_ACCESS_KEY_ID: kube.SecretKeyRef(app.config, "object-access-key-id"),
AWS_SECRET_ACCESS_KEY: kube.SecretKeyRef(app.config, "object-secret-access-key"),
AWS_ACCESS_KEY_ID: kube.SecretKeyRef(top.config, "object-access-key-id"),
AWS_SECRET_ACCESS_KEY: kube.SecretKeyRef(top.config, "object-secret-access-key"),
S3_HOSTNAME: "object.ceph-waw3.hswaw.net",
S3_ENDPOINT: "https://object.ceph-waw3.hswaw.net",
@ -106,7 +106,7 @@ local redis = import "../../../kube/redis.libsonnet";
OIDC_CLIENT_ID: cfg.oidc.clientId,
OIDC_REDIRECT_URI: "https://%s/auth/auth/openid_connect/callback" % [cfg.webDomain],
OIDC_SECURITY_ASSUME_EMAIL_IS_VERIFIED: "true",
OIDC_CLIENT_SECRET: kube.SecretKeyRef(app.config, "oidc-client-secret"),
OIDC_CLIENT_SECRET: kube.SecretKeyRef(top.config, "oidc-client-secret"),
OIDC_AUTH_ENDPOINT: "https://sso.hackerspace.pl/oauth/authorize",
OIDC_TOKEN_ENDPOINT: "https://sso.hackerspace.pl/oauth/token",
OIDC_USER_INFO_ENDPOINT: "https://sso.hackerspace.pl/api/1/userinfo",
@ -126,7 +126,7 @@ local redis = import "../../../kube/redis.libsonnet";
database: "mastodon",
username: "mastodon",
prefix: "waw3-",
password: kube.SecretKeyRef(app.config, "postgres-pass"),
password: kube.SecretKeyRef(top.config, "postgres-pass"),
storageClassName: "waw-hdd-redundant-3",
storageSize: "100Gi",
opts: { wal_level: "logical" },
@ -139,7 +139,7 @@ local redis = import "../../../kube/redis.libsonnet";
appName: "mastodon",
storageClassName: "waw-hdd-redundant-3",
prefix: "waw3-",
password: kube.SecretKeyRef(app.config, "redis-pass"),
password: kube.SecretKeyRef(top.config, "redis-pass"),
},
},
@ -152,7 +152,7 @@ local redis = import "../../../kube/redis.libsonnet";
initContainers_: {
migrate: kube.Container("migrate") {
image: cfg.images.mastodon,
env_: app.env {
env_: top.env {
//That's confusing one - all the random "how to mastodon in docker" tutorials
//say you need to set it. However, with this set, the web dashboard was sad
//about unfinished migrations.
@ -169,7 +169,7 @@ local redis = import "../../../kube/redis.libsonnet";
containers_: {
default: kube.Container("default") {
image: cfg.images.mastodon,
env_: app.env,
env_: top.env,
command: [
"bundle", "exec",
"rails", "s", "-p", "3000",
@ -211,7 +211,7 @@ local redis = import "../../../kube/redis.libsonnet";
containers_: {
default: kube.Container("default") {
image: cfg.images.mastodon,
env_: app.env,
env_: top.env,
command: [
"bundle", "exec",
"sidekiq",
@ -241,7 +241,7 @@ local redis = import "../../../kube/redis.libsonnet";
containers_: {
default: kube.Container("default") {
image: cfg.images.mastodon,
env_: app.env {
env_: top.env {
"STREAMING_CLUSTER_NUM": "1",
},
command: [
@ -276,11 +276,11 @@ local redis = import "../../../kube/redis.libsonnet";
},
svcWeb: ns.Contain(kube.Service("web")) {
target:: app.web,
target:: top.web,
},
svcStreaming: ns.Contain(kube.Service("streaming")) {
target:: app.streaming,
target:: top.streaming,
},
@ -305,8 +305,8 @@ local redis = import "../../../kube/redis.libsonnet";
host: cfg.webDomain,
http: {
paths: [
{ path: "/", backend: app.svcWeb.name_port },
{ path: "/api/v1/streaming", backend: app.svcStreaming.name_port },
{ path: "/", backend: top.svcWeb.name_port },
{ path: "/api/v1/streaming", backend: top.svcStreaming.name_port },
],
},
},

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
@ -13,7 +13,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
ns:: error "ns needs to be a kube.Namespace object",
local ns = app.ns,
local ns = top.ns,
deployment: ns.Contain(kube.Deployment("oauth2-cas-proxy")) {
spec+: {
@ -44,6 +44,6 @@ local kube = import "../../../kube/kube.libsonnet";
},
svc: ns.Contain(kube.Service("oauth2-cas-proxy")) {
target:: app.deployment,
target:: top.deployment,
},
}

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
realm: error "cfg.realm must be set",
@ -15,7 +15,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
ns:: error "ns needs to be provided",
local ns = app.ns,
local ns = top.ns,
configMap: ns.Contain(kube.ConfigMap("coturn")) {
data: {
@ -71,8 +71,8 @@ local kube = import "../../../kube/kube.libsonnet";
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
config: kube.ConfigMapVolume(top.configMap),
data: kube.PersistentVolumeClaimVolume(top.dataVolume),
},
containers_: {
coturn: kube.Container("coturn") {
@ -131,7 +131,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
svcTCP: ns.Contain(kube.Service("coturn-tcp")) {
target:: app.deployment,
target:: top.deployment,
metadata+: {
annotations+: {
"metallb.universe.tf/allow-shared-ip": "coturn",
@ -151,7 +151,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
svcUDP: ns.Contain(kube.Service("coturn-udp")) {
target:: app.deployment,
target:: top.deployment,
metadata+: {
annotations+: {
"metallb.universe.tf/allow-shared-ip": "coturn",

View File

@ -98,8 +98,8 @@ local mediaRepo = import "./media-repo.libsonnet";
local coturn = import "./coturn.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
# webDomain is the domain name at which element will run
@ -336,16 +336,16 @@ local coturn = import "./coturn.libsonnet";
synapse: synapse {
ns: ns,
postgres: if cfg.postgres.enable then app.postgres3 else {
postgres: if cfg.postgres.enable then top.postgres3 else {
# If not using on-cluster postgres, pass the config postgres object
# as the postgres object into the synapse lib. It's a bit ugly (we
# should have some common 'config' type instead) but it's good
# enough.
cfg: cfg.postgres,
}, redis: app.redis,
appservices: app.appservices,
cfg+: app.cfg {
image: app.cfg.images.synapse,
}, redis: top.redis,
appservices: top.appservices,
cfg+: top.cfg {
image: top.cfg.images.synapse,
macaroonSecretKey: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } },
registrationSharedSecret: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } },
@ -361,7 +361,7 @@ local coturn = import "./coturn.libsonnet";
// until it spits you a registration YAML and you feed that to a secret.
appservices: {},
ingress: app.namespace.Contain(kube.Ingress("matrix")) {
ingress: top.namespace.Contain(kube.Ingress("matrix")) {
metadata+: {
annotations+: {
"kubernetes.io/tls-acme": "true",
@ -382,19 +382,19 @@ local coturn = import "./coturn.libsonnet";
host: cfg.webDomain,
http: {
paths: [
{ path: path, backend: app.synapse.genericWorker.svc.name_port }
for path in app.synapse.genericWorker.paths
{ path: path, backend: top.synapse.genericWorker.svc.name_port }
for path in top.synapse.genericWorker.paths
] + [
{ path: "/", backend: app.riot.svc.name_port },
{ path: "/_matrix/media/", backend: if cfg.mediaRepo.route then app.mediaRepo.svc.name_port else app.synapse.mediaWorker.svc.name_port },
{ path: "/_matrix/", backend: app.synapse.main.svc.name_port },
{ path: "/", backend: top.riot.svc.name_port },
{ path: "/_matrix/media/", backend: if cfg.mediaRepo.route then top.mediaRepo.svc.name_port else top.synapse.mediaWorker.svc.name_port },
{ path: "/_matrix/", backend: top.synapse.main.svc.name_port },
# Used by OpenID Connect login flow
{ path: "/_synapse/", backend: app.synapse.main.svc.name_port },
{ path: "/_synapse/", backend: top.synapse.main.svc.name_port },
] + (if cfg.cas.enable then [
{ path: "/_cas", backend: app.cas.svc.name_port },
{ path: "/_cas", backend: top.cas.svc.name_port },
] else []) + (if cfg.wellKnown then [
{ path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
{ path: "/.well-known/matrix", backend: top.wellKnown.svc.name_port },
] else [])
},
}

View File

@ -32,8 +32,8 @@ local kube = import "../../../kube/hscloud.libsonnet";
local postgres = import "../../../kube/postgres.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
# webDomain is the domain name at which element will run
@ -96,7 +96,7 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") {
metadata+: app.metadata("synapse-data"),
metadata+: top.metadata("synapse-data"),
storage:: "50Gi",
storageClass:: cfg.storageClassName,
},
@ -112,7 +112,7 @@ local postgres = import "../../../kube/postgres.libsonnet";
signing_key_path: "/data/%s.signing.key" % [cfg.serverName],
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
],
} + (if cfg.cas.enable then {
cas_config: {
@ -123,15 +123,15 @@ local postgres = import "../../../kube/postgres.libsonnet";
} else {}),
synapseConfigMap: kube.ConfigMap("synapse") {
metadata+: app.metadata("synapse"),
metadata+: top.metadata("synapse"),
data: {
"homeserver.yaml": std.manifestYamlDoc(app.synapseConfig),
"homeserver.yaml": std.manifestYamlDoc(top.synapseConfig),
"log.config": importstr "synapse/log.config",
},
},
casDeployment: if cfg.cas.enable then kube.Deployment("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
metadata+: top.metadata("oauth2-cas-proxy"),
spec+: {
replicas: 1,
template+: {
@ -160,22 +160,22 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
casSvc: if cfg.cas.enable then kube.Service("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
target:: app.casDeployment,
metadata+: top.metadata("oauth2-cas-proxy"),
target:: top.casDeployment,
},
synapseDeployment: kube.Deployment("synapse") {
metadata+: app.metadata("synapse"),
metadata+: top.metadata("synapse"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
config: kube.ConfigMapVolume(app.synapseConfigMap),
data: kube.PersistentVolumeClaimVolume(top.dataVolume),
config: kube.ConfigMapVolume(top.synapseConfigMap),
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
},
containers_: {
web: kube.Container("synapse") {
@ -203,7 +203,7 @@ local postgres = import "../../../kube/postgres.libsonnet";
config: { mountPath: "/conf", },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
},
},
},
@ -218,8 +218,8 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
synapseSvc: kube.Service("synapse") {
metadata+: app.metadata("synapse"),
target:: app.synapseDeployment,
metadata+: top.metadata("synapse"),
target:: top.synapseDeployment,
},
riotConfig:: {
@ -253,9 +253,9 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
riotConfigMap: kube.ConfigMap("riot-web-config") {
metadata+: app.metadata("riot-web-config"),
metadata+: top.metadata("riot-web-config"),
data: {
"config.json": std.manifestJsonEx(app.riotConfig, ""),
"config.json": std.manifestJsonEx(top.riotConfig, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": |||
worker_processes auto;
@ -304,13 +304,13 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
riotDeployment: kube.Deployment("riot-web") {
metadata+: app.metadata("riot-web"),
metadata+: top.metadata("riot-web"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.riotConfigMap),
config: kube.ConfigMapVolume(top.riotConfigMap),
},
containers_: {
web: kube.Container("riot-web") {
@ -343,13 +343,13 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
riotSvc: kube.Service("riot-web") {
metadata+: app.metadata("riot-web"),
target:: app.riotDeployment,
metadata+: top.metadata("riot-web"),
target:: top.riotDeployment,
},
wellKnown: if cfg.wellKnown then {
deployment: kube.Deployment("wellknown") {
metadata+: app.metadata("wellknown"),
metadata+: top.metadata("wellknown"),
spec+: {
replicas: 1,
template+: {
@ -373,8 +373,8 @@ local postgres = import "../../../kube/postgres.libsonnet";
},
},
svc: kube.Service("wellknown") {
metadata+: app.metadata("wellknown"),
target:: app.wellKnown.deployment,
metadata+: top.metadata("wellknown"),
target:: top.wellKnown.deployment,
},
} else {},
@ -388,14 +388,14 @@ local postgres = import "../../../kube/postgres.libsonnet";
ingress: kube.SimpleIngress("matrix") {
hosts:: [cfg.webDomain],
target_service:: app.riotSvc,
metadata+: app.metadata("matrix"),
target_service:: top.riotSvc,
metadata+: top.metadata("matrix"),
extra_paths:: [
{ path: "/_matrix", backend: app.synapseSvc.name_port },
{ path: "/_matrix", backend: top.synapseSvc.name_port },
] + (if cfg.cas.enable then [
{ path: "/_cas", backend: app.casSvc.name_port },
{ path: "/_cas", backend: top.casSvc.name_port },
] else []) + (if cfg.wellKnown then [
{ path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port },
{ path: "/.well-known/matrix", backend: top.wellKnown.svc.name_port },
] else [])
},
}

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image needs to be set",
@ -27,7 +27,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
ns:: error "ns needs to be a kube.Namespace object",
local ns = app.ns,
local ns = top.ns,
config:: {
repo: {
@ -63,7 +63,7 @@ local kube = import "../../../kube/kube.libsonnet";
configSecret: ns.Contain(kube.Secret("media-repo-config")) {
data_: {
"config.yaml": std.manifestJsonEx(app.config, ""),
"config.yaml": std.manifestJsonEx(top.config, ""),
},
},
@ -73,7 +73,7 @@ local kube = import "../../../kube/kube.libsonnet";
template+: {
spec+: {
volumes_: {
config: kube.SecretVolume(app.configSecret),
config: kube.SecretVolume(top.configSecret),
tempdir: kube.EmptyDirVolume(),
},
containers_: {
@ -144,7 +144,7 @@ local kube = import "../../../kube/kube.libsonnet";
"/app/matrix/media-repo-proxy",
"-downstream_host", downstreamHost,
"-upstream_host", upstreamHost,
"-upstream", app.internalSvc.host_colon_port,
"-upstream", top.internalSvc.host_colon_port,
"-listen", ":8080",
],
ports_: {
@ -159,10 +159,10 @@ local kube = import "../../../kube/kube.libsonnet";
} else {},
internalSvc: ns.Contain(kube.Service("media-repo-internal")) {
target:: app.deployment,
target:: top.deployment,
},
svc: if std.length(needProxying) > 0 then ns.Contain(kube.Service("media-repo")) {
target:: app.proxies.deployment,
} else app.internalSvc,
target:: top.proxies.deployment,
} else top.internalSvc,
}

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
# webDomain is the domain name at which element will run
webDomain: error "cfg.webDomain must be set",
@ -12,7 +12,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
ns:: error "ns needs to be a kube.Namespace object",
local ns = app.ns,
local ns = top.ns,
config:: {
"default_hs_url": "https://%s" % [cfg.webDomain],
@ -46,7 +46,7 @@ local kube = import "../../../kube/kube.libsonnet";
configMap: ns.Contain(kube.ConfigMap("riot-web-config")) {
data: {
"config.json": std.manifestJsonEx(app.config, ""),
"config.json": std.manifestJsonEx(top.config, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": importstr "riot/nginx.conf",
},
@ -58,7 +58,7 @@ local kube = import "../../../kube/kube.libsonnet";
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
config: kube.ConfigMapVolume(top.configMap),
},
containers_: {
web: kube.Container("riot-web") {
@ -91,6 +91,6 @@ local kube = import "../../../kube/kube.libsonnet";
},
svc: ns.Contain(kube.Service("riot-web")) {
target:: app.deployment,
target:: top.deployment,
},
}

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image needs to be set",
storageClassName: error "cfg.storrageClassName needs to be set",
@ -24,7 +24,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
ns:: error "ns needs to be provided",
local ns = app.ns,
local ns = top.ns,
postgres:: error "postgres needs to be provided",
redis:: error "redis needs to be provided",
@ -45,7 +45,7 @@ local kube = import "../../../kube/kube.libsonnet";
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
],
notify_appservices: cfg.appserviceWorker == false,
@ -54,8 +54,8 @@ local kube = import "../../../kube/kube.libsonnet";
# *some* federation, needs investigation...
#send_federation: cfg.federationWorker == false,
#federation_sender_instances: if cfg.federationWorker then [
# "%s-%s" % [app.federationSenderWorker.deployment.metadata.name, idx]
# for idx in std.range(0, app.federationSenderWorker.deployment.spec.replicas)
# "%s-%s" % [top.federationSenderWorker.deployment.metadata.name, idx]
# for idx in std.range(0, top.federationSenderWorker.deployment.spec.replicas)
#] else [],
} + (if cfg.cas.enable then {
cas_config: {
@ -74,7 +74,7 @@ local kube = import "../../../kube/kube.libsonnet";
configMap: ns.Contain(kube.ConfigMap("synapse")) {
data: {
"homeserver.yaml": std.manifestYamlDoc(app.config),
"homeserver.yaml": std.manifestYamlDoc(top.config),
"log.config": importstr "synapse/log.config",
},
},
@ -121,13 +121,13 @@ local kube = import "../../../kube/kube.libsonnet";
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
config: kube.ConfigMapVolume(top.configMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
data: kube.PersistentVolumeClaimVolume(top.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
@ -153,18 +153,18 @@ local kube = import "../../../kube/kube.libsonnet";
SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
POSTGRES_PASSWORD: app.postgres.cfg.password,
POSTGRES_USER: app.postgres.cfg.username,
POSTGRES_DB: app.postgres.cfg.database,
POSTGRES_HOST: app.postgres.cfg.host,
POSTGRES_PORT: app.postgres.cfg.port,
POSTGRES_PASSWORD: top.postgres.cfg.password,
POSTGRES_USER: top.postgres.cfg.username,
POSTGRES_DB: top.postgres.cfg.database,
POSTGRES_HOST: top.postgres.cfg.host,
POSTGRES_PORT: top.postgres.cfg.port,
REDIS_PASSWORD: app.redis.cfg.password,
REDIS_PASSWORD: top.redis.cfg.password,
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
TURN_SHARED_SECRET: if cfg.coturn.enable then cfg.coturn.config.authSecret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(app.secretsConfig),
X_SECRETS_CONFIG: std.manifestYamlDoc(top.secretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
@ -172,7 +172,7 @@ local kube = import "../../../kube/kube.libsonnet";
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
for k in std.objectFields(top.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
@ -206,14 +206,14 @@ local kube = import "../../../kube/kube.libsonnet";
# Synapse main process
main: {
deployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
deployment: top.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
localConfig: {
# Following configuration values need to cause master
# process restart.
notify_appservices: app.config.notify_appservices,
# send_federation: app.config.send_federation,
# federation_sender_instances: app.config.federation_sender_instances,
notify_appservices: top.config.notify_appservices,
# send_federation: top.config.send_federation,
# federation_sender_instances: top.config.federation_sender_instances,
},
resources+: {
@ -231,10 +231,10 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
svc: ns.Contain(kube.Service("synapse")) {
target:: app.main.deployment,
target:: top.main.deployment,
},
replicationSvc: ns.Contain(kube.Service("synapse-replication-master")) {
target:: app.main.deployment,
target:: top.main.deployment,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
@ -245,7 +245,7 @@ local kube = import "../../../kube/kube.libsonnet";
genericWorker: {
# Synapse generic worker deployment
deployment: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
deployment: top.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
@ -263,7 +263,7 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
svc: ns.Contain(kube.Service("synapse-generic")) {
target:: app.genericWorker.deployment,
target:: top.genericWorker.deployment,
},
# Following paths can be handled by generic workers.
@ -368,7 +368,7 @@ local kube = import "../../../kube/kube.libsonnet";
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
mediaWorker: {
deployment: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
deployment: top.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
@ -387,13 +387,13 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
svc: ns.Contain(kube.Service("synapse-media")) {
target:: app.mediaWorker.deployment,
target:: top.mediaWorker.deployment,
},
},
appserviceWorker: if cfg.appserviceWorker then {
# Worker responsible for sending traffic to registered appservices
deployment: app.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
deployment: top.SynapseWorker("synapse-appservice", "synapse.app.appservice", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
@ -413,7 +413,7 @@ local kube = import "../../../kube/kube.libsonnet";
} else null,
federationSenderWorker: if cfg.federationWorker then {
deployment: app.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
deployment: top.SynapseWorker("synapse-federation-sender", "synapse.app.federation_sender", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image: error "cfg.image must be set",
@ -37,6 +37,6 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
svc: ns.Contain(kube.Service("wellknown")) {
target:: app.deployment,
target:: top.deployment,
},
}

View File

@ -7,8 +7,8 @@ local policies = import "../../kube/policies.libsonnet";
{
onlyoffice:: {
local oo = self,
local cfg = oo.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "cfg.namespace must be set",
image: "onlyoffice/documentserver:7.0.0.132",
@ -58,7 +58,7 @@ local policies = import "../../kube/policies.libsonnet";
},
},
volumes_: {
data: kube.PersistentVolumeClaimVolume(oo.pvc),
data: kube.PersistentVolumeClaimVolume(top.pvc),
},
},
},
@ -66,12 +66,12 @@ local policies = import "../../kube/policies.libsonnet";
},
svc: ns.Contain(kube.Service("documentserver")) {
target:: oo.deploy,
target:: top.deploy,
},
ingress: ns.Contain(kube.SimpleIngress("office")) {
hosts:: [cfg.domain],
target_service:: oo.svc,
target_service:: top.svc,
},
// Needed because the documentserver runs its own supervisor, and:

View File

@ -1,8 +1,8 @@
local kube = import '../../../kube/hscloud.libsonnet';
{
local internet = self,
local cfg = internet.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: "internet",
appName: "internet-landing",
@ -35,7 +35,7 @@ local kube = import '../../../kube/hscloud.libsonnet';
},
deployment: kube.Deployment("nginx") {
metadata+: internet.metadata("nginx"),
metadata+: top.metadata("nginx"),
spec+: {
replicas: 1,
template+: {
@ -55,13 +55,13 @@ local kube = import '../../../kube/hscloud.libsonnet';
},
svc: kube.Service("frontend") {
metadata+: internet.metadata("frontend"),
target:: internet.deployment,
metadata+: top.metadata("frontend"),
target:: top.deployment,
},
ingress: kube.SimpleIngress("frontend") {
hosts:: [cfg.domain],
target_service:: internet.svc,
metadata+: internet.metadata("frontend"),
target_service:: top.svc,
metadata+: top.metadata("frontend"),
},
}

View File

@ -1,8 +1,8 @@
local kube = import '../../../kube/hscloud.libsonnet';
{
local speedtest = self,
local cfg = speedtest.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: "speedtest",
appName: "speedtest",
@ -34,7 +34,7 @@ local kube = import '../../../kube/hscloud.libsonnet';
},
deployment: kube.Deployment("backend") {
metadata+: speedtest.metadata("backend"),
metadata+: top.metadata("backend"),
spec+: {
replicas: 1,
template+: {
@ -54,13 +54,13 @@ local kube = import '../../../kube/hscloud.libsonnet';
},
svc: kube.Service("public") {
metadata+: speedtest.metadata("public"),
target:: speedtest.deployment,
metadata+: top.metadata("public"),
target:: top.deployment,
},
ingress: kube.SimpleIngress("public") {
hosts:: [cfg.domain],
target_service:: speedtest.svc,
metadata+: speedtest.metadata("public"),
target_service:: top.svc,
metadata+: top.metadata("public"),
},
}

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/hscloud.libsonnet";
{
local gerrit = self,
local cfg = gerrit.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "namespace must be set",
@ -63,8 +63,8 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
},
configmap: kube.ConfigMap(gerrit.name("gerrit")) {
metadata+: gerrit.metadata("configmap"),
configmap: kube.ConfigMap(top.name("gerrit")) {
metadata+: top.metadata("configmap"),
data: {
"gerrit.config": |||
[gerrit]
@ -139,8 +139,8 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
volumes: {
[name]: kube.PersistentVolumeClaim(gerrit.name(name)) {
metadata+: gerrit.metadata("storage"),
[name]: kube.PersistentVolumeClaim(top.name(name)) {
metadata+: top.metadata("storage"),
storage:: cfg.storageSize[name],
storageClass:: cfg.storageClassName,
}
@ -156,14 +156,14 @@ local kube = import "../../../kube/hscloud.libsonnet";
// SecureSecret gets mounted here
secure: { mountPath: "/var/gerrit-secure" },
},
keys: kube.Secret(gerrit.name("keys")) {
metadata+: gerrit.metadata("deployment"),
keys: kube.Secret(top.name("keys")) {
metadata+: top.metadata("deployment"),
//data_: {
// FORGEJO_TOKEN: "fill me when deploying, TODO(q3k): god damn secrets",
//},
},
deployment: kube.Deployment(gerrit.name("gerrit")) {
metadata+: gerrit.metadata("deployment"),
deployment: kube.Deployment(top.name("gerrit")) {
metadata+: top.metadata("deployment"),
spec+: {
replicas: 1,
template+: {
@ -172,21 +172,21 @@ local kube = import "../../../kube/hscloud.libsonnet";
fsGroup: 1000, # gerrit uid
},
volumes_: {
config: kube.ConfigMapVolume(gerrit.configmap),
config: kube.ConfigMapVolume(top.configmap),
secure: { secret: { secretName: cfg.secureSecret} },
} {
[name]: kube.PersistentVolumeClaimVolume(gerrit.volumes[name])
[name]: kube.PersistentVolumeClaimVolume(top.volumes[name])
for name in ["etc", "git", "index", "cache", "db"]
},
containers_: {
gerrit: kube.Container(gerrit.name("gerrit")) {
gerrit: kube.Container(top.name("gerrit")) {
image: cfg.image,
ports_: {
http: { containerPort: 8080 },
ssh: { containerPort: 29418 },
},
env_: {
FORGEJO_TOKEN: { secretKeyRef: { name: gerrit.keys.metadata.name, key: "FORGEJO_TOKEN" }},
FORGEJO_TOKEN: { secretKeyRef: { name: top.keys.metadata.name, key: "FORGEJO_TOKEN" }},
},
resources: cfg.resources,
volumeMounts_: volumeMounts,
@ -206,9 +206,9 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
},
svc: kube.Service(gerrit.name("gerrit")) {
metadata+: gerrit.metadata("service"),
target:: gerrit.deployment,
svc: kube.Service(top.name("gerrit")) {
metadata+: top.metadata("service"),
target:: top.deployment,
spec+: {
ports: [
{ name: "http", port: 80, targetPort: 8080, protocol: "TCP" },
@ -218,9 +218,9 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
},
ingress: kube.SimpleIngress(gerrit.name("gerrit")) {
ingress: kube.SimpleIngress(top.name("gerrit")) {
hosts:: [cfg.domain],
target_service:: gerrit.svc,
metadata+: gerrit.metadata("ingress"),
target_service:: top.svc,
metadata+: top.metadata("ingress"),
},
}

View File

@ -2,8 +2,8 @@ local kube = import "../../kube/hscloud.libsonnet";
local postgres = import "../../kube/postgres.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: "redmine",
@ -53,7 +53,7 @@ local postgres = import "../../kube/postgres.libsonnet";
# (ie. removes surrounding quotes)
rubyYaml(obj, symbols):: std.foldr(function (symbol, str) std.strReplace(str, '"%s"' % symbol, symbol), symbols, std.manifestYamlDoc(obj)),
local ns = kube.Namespace(app.cfg.namespace),
local ns = kube.Namespace(cfg.namespace),
postgres: postgres {
cfg+: {
@ -108,7 +108,7 @@ local postgres = import "../../kube/postgres.libsonnet";
REDMINE_S3_REGION: cfg.storage.region,
REDMINE_MAILING_PASSWORD: cfg.mailing.password,
X_EXTRA_CONFIGURATION: app.rubyYaml({
X_EXTRA_CONFIGURATION: top.rubyYaml({
production: {
email_delivery: {
delivery_method: ":smtp",
@ -127,12 +127,12 @@ local postgres = import "../../kube/postgres.libsonnet";
},
svc: ns.Contain(kube.Service("redmine")) {
target:: app.deployment,
target:: top.deployment,
},
ingress: ns.Contain(kube.SimpleIngress("redmine")) {
hosts:: [cfg.domain],
target_service:: app.svc,
target_service:: top.svc,
},
b: (if std.length(cfg.b.domains) > 0 then {
@ -157,11 +157,11 @@ local postgres = import "../../kube/postgres.libsonnet";
},
},
svc: ns.Contain(kube.Service("b")) {
target:: app.b.deployment,
target:: top.b.deployment,
},
ingress: ns.Contain(kube.SimpleIngress("b")) {
hosts:: cfg.b.domains,
target_service:: app.b.svc,
target_service:: top.b.svc,
},
} else {}),

View File

@ -4,8 +4,8 @@ local kube = import "../../../kube/kube.libsonnet";
local proxy = import "proxy.libsonnet";
{
local factorio = self,
local cfg = factorio.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: error "namespace must be set",
@ -75,26 +75,26 @@ local proxy = import "proxy.libsonnet";
metadata:: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": factorio.makeName("factorio"),
"app.kubernetes.io/name": top.makeName("factorio"),
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": "factorio",
},
},
volumeClaimMods: kube.PersistentVolumeClaim(factorio.makeName("factorio-mods")) {
metadata+: factorio.metadata,
volumeClaimMods: kube.PersistentVolumeClaim(top.makeName("factorio-mods")) {
metadata+: top.metadata,
storage:: "1Gi",
storageClass:: cfg.storageClassName,
},
volumeClaimData: kube.PersistentVolumeClaim(factorio.makeName("factorio")) {
metadata+: factorio.metadata,
volumeClaimData: kube.PersistentVolumeClaim(top.makeName("factorio")) {
metadata+: top.metadata,
storage:: "5Gi",
storageClass:: cfg.storageClassName,
},
configMap: kube.ConfigMap(factorio.makeName("config")) {
metadata+: factorio.metadata,
configMap: kube.ConfigMap(top.makeName("config")) {
metadata+: top.metadata,
data: {
"mods.pb.text": std.join("\n", [
"mod { name: \"%s\" version: \"%s\" }" % [m.name, m.version],
@ -106,16 +106,16 @@ local proxy = import "proxy.libsonnet";
},
},
deployment: kube.Deployment(factorio.makeName("factorio")) {
metadata+: factorio.metadata,
deployment: kube.Deployment(top.makeName("factorio")) {
metadata+: top.metadata,
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
data: kube.PersistentVolumeClaimVolume(factorio.volumeClaimData),
mods: kube.PersistentVolumeClaimVolume(factorio.volumeClaimMods),
config: kube.ConfigMapVolume(factorio.configMap),
data: kube.PersistentVolumeClaimVolume(top.volumeClaimData),
mods: kube.PersistentVolumeClaimVolume(top.volumeClaimMods),
config: kube.ConfigMapVolume(top.configMap),
},
initContainers_: {
modproxy: kube.Container("modproxy") {
@ -134,7 +134,7 @@ local proxy = import "proxy.libsonnet";
},
},
containers_: {
factorio: kube.Container(factorio.makeName("factorio")) {
factorio: kube.Container(top.makeName("factorio")) {
image: cfg.image,
args: [
"/entrypoint.sh",
@ -160,14 +160,14 @@ local proxy = import "proxy.libsonnet";
},
},
},
svc: kube.Service(factorio.makeName("factorio")) {
metadata+: factorio.metadata {
svc: kube.Service(top.makeName("factorio")) {
metadata+: top.metadata {
// hack - have to keep existing naming scheme otherwise we'd lose addresses
labels: {
"app.kubernetes.io/name": cfg.appName,
},
},
target:: factorio.deployment,
target:: top.deployment,
spec+: {
ports: [
{ name: "client", port: 34197, targetPort: 34197, protocol: "UDP" },

View File

@ -1,8 +1,8 @@
local kube = import "../../../kube/kube.libsonnet";
{
local proxy = self,
local cfg = proxy.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
image:: "registry.k0.hswaw.net/games/factorio/modproxy:1589157915-eafe7be328477e8a6590c4210466ef12901f1b9a",
@ -24,7 +24,7 @@ local kube = import "../../../kube/kube.libsonnet";
template+: {
spec+: {
volumes_: {
cas: kube.PersistentVolumeClaimVolume(proxy.pvc),
cas: kube.PersistentVolumeClaimVolume(top.pvc),
},
containers_: {
proxy: kube.Container("proxy") {
@ -51,7 +51,7 @@ local kube = import "../../../kube/kube.libsonnet";
metadata+: {
namespace: "factorio",
},
target:: proxy.deploy,
target:: top.deploy,
spec+: {
ports: [
{ name: "client", port: 4200, targetPort: 4200, protocol: "TCP" },

View File

@ -11,7 +11,7 @@ local postgres = import "../../kube/postgres.libsonnet";
local redis = import "../../kube/redis.libsonnet";
{
local app = self,
local top = self,
local cfg = self.cfg,
cfg:: {
@ -69,7 +69,7 @@ local redis = import "../../kube/redis.libsonnet";
template+: {
spec+: {
volumes_: {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
data: kube.PersistentVolumeClaimVolume(top.dataVolume),
},
securityContext: {
@ -131,13 +131,13 @@ local redis = import "../../kube/redis.libsonnet";
PAPERLESS_SECRET_KEY: { secretKeyRef: { name: "paperless", key: "secret_key" } },
A_REDIS_PASSWORD: app.redis.cfg.password,
A_REDIS_PASSWORD: top.redis.cfg.password,
PAPERLESS_REDIS: "redis://:$(A_REDIS_PASSWORD)@redis:6379",
PAPERLESS_DBHOST: "postgres",
PAPERLESS_DBNAME: app.postgres.cfg.database,
PAPERLESS_DBUSER: app.postgres.cfg.username,
PAPERLESS_DBPASS: app.postgres.cfg.password,
PAPERLESS_DBNAME: top.postgres.cfg.database,
PAPERLESS_DBUSER: top.postgres.cfg.username,
PAPERLESS_DBPASS: top.postgres.cfg.password,
PAPERLESS_ENABLE_HTTP_REMOTE_USER: "true",
PAPERLESS_HTTP_REMOTE_USER_HEADER_NAME: "HTTP_X_FORWARDED_USER",
@ -162,11 +162,11 @@ local redis = import "../../kube/redis.libsonnet";
},
service: ns.Contain(kube.Service("paperless")) {
target:: app.deploy,
target:: top.deploy,
},
ingress: ns.Contain(kube.SimpleIngress("paperless")) {
hosts:: [cfg.domain],
target_service:: app.service,
target_service:: top.service,
},
}

View File

@ -3,8 +3,8 @@
local kube = import "../../../kube/hscloud.libsonnet";
{
local app = self,
local cfg = app.cfg,
local top = self,
local cfg = top.cfg,
cfg:: {
namespace: "sso",
@ -19,7 +19,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
},
local ns = kube.Namespace(app.cfg.namespace),
local ns = kube.Namespace(top.cfg.namespace),
deployment: ns.Contain(kube.Deployment("sso")) {
spec+: {
@ -95,7 +95,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
svc: ns.Contain(kube.Service("sso")) {
target:: app.deployment,
target:: top.deployment,
spec+: {
ports: [
{ name: "http", port: 5000, targetPort: 5000, protocol: "TCP" },
@ -106,6 +106,6 @@ local kube = import "../../../kube/hscloud.libsonnet";
ingress: ns.Contain(kube.SimpleIngress("sso")) {
hosts:: [cfg.domain],
target_service:: app.svc,
target_service:: top.svc,
},
}