1
0
Fork 0

app/matrix: split matrix-ng into submodules, use kube.Namespace.Contain

matrix-ng split into multiple submodules causes some changes in keys
that might've been used for homeserver/riot configuration customization.

Migration to kube.Namespace.Contain has also caused change in Deployment
selectors (immutable fields), thus needing manual removal of these
first.

This is, as always, documented in lib/matrix-ng.libsonnet header.

Change-Id: I39a745ee27e3c55ec748818b9cf9b4e8ba1d2df5
master
informatic 2021-01-31 10:35:38 +01:00
parent 8ec865728e
commit b67ae4893c
6 changed files with 510 additions and 404 deletions

View File

@ -0,0 +1,48 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
image: error "cfg.image must be set",
# webDomain is the domain name at which matrix instance/cas proxy is served
webDomain: error "cfg.webDomain must be set",
oauth2: error "cfg.oauth2 must be set",
},
ns:: error "ns needs to be a kube.Namespace object",
deployment: app.ns.Contain(kube.Deployment("oauth2-cas-proxy")) {
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
proxy: kube.Container("oauth2-cas-proxy") {
image: cfg.image,
ports_: {
http: { containerPort: 5000 },
},
env_: {
BASE_URL: "https://%s" % [cfg.webDomain],
SERVICE_URL: "https://%s" % [cfg.webDomain],
OAUTH2_CLIENT: cfg.oauth2.clientID,
OAUTH2_SECRET: cfg.oauth2.clientSecret,
OAUTH2_SCOPE: cfg.oauth2.scope,
OAUTH2_AUTHORIZE: cfg.oauth2.authorizeURL,
OAUTH2_TOKEN: cfg.oauth2.tokenURL,
OAUTH2_USERINFO: cfg.oauth2.userinfoURL,
},
},
},
},
},
},
},
svc: app.ns.Contain(kube.Service("oauth2-cas-proxy")) {
target_pod:: app.deployment.spec.template,
},
}

View File

@ -15,6 +15,14 @@
# kubectl -n $ns edit secret synapse
# # ...add homeserver_signing_key, redis_password and worker_replication_secret keys
#
# Additionally some resources need to be explicitly removed due to
# label/annotations changes:
# kubectl -n $ns delete deployment riot-web oauth2-cas-proxy wellknown synapse
#
# Some service configuration customization fields have been renamed:
# .riotConfig → .riot.config
# .synapseConfig → .synapse.config
#
# Sequencing appservices is fun. The appservice needs to run first (for
# instance, via a bootstrap job), and on startup it will spit out a
# registration file. This registration file then needs to be fed to synapse -
@ -32,6 +40,11 @@ local kube = import "../../../kube/kube.libsonnet";
local postgres = import "../../../kube/postgres.libsonnet";
local redis = import "../../../kube/redis.libsonnet";
local riot = import "./riot.libsonnet";
local cas = import "./cas.libsonnet";
local wellKnown = import "./wellknown.libsonnet";
local synapse = import "./synapse.libsonnet";
{
local app = self,
local cfg = app.cfg,
@ -104,15 +117,6 @@ local redis = import "../../../kube/redis.libsonnet";
wellKnown: false,
},
metadata(component):: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": "matrix",
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": component,
},
},
namespace: kube.Namespace(cfg.namespace),
postgres3: postgres {
@ -138,405 +142,47 @@ local redis = import "../../../kube/redis.libsonnet";
},
},
dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") {
metadata+: app.metadata("synapse-data"),
spec+: {
storageClassName: cfg.storageClassName,
accessModes: [ "ReadWriteOnce" ],
resources: {
requests: {
storage: "50Gi",
},
},
riot: riot {
ns: app.namespace,
cfg+: {
webDomain: cfg.webDomain,
serverName: cfg.serverName,
image: cfg.images.riot,
},
},
// homeserver.yaml that will be used to run synapse (in synapseConfigMap).
// This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
// deployment.
synapseConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
server_name: cfg.serverName,
public_baseurl: "https://%s" % [cfg.webDomain],
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
],
} + (if cfg.cas.enable then {
cas_config: {
enabled: true,
server_url: "https://%s/_cas" % [cfg.webDomain],
service_url: "https://%s" % [cfg.webDomain],
},
} else {}),
synapseConfigMap: kube.ConfigMap("synapse") {
metadata+: app.metadata("synapse"),
data: {
"homeserver.yaml": std.manifestYamlDoc(app.synapseConfig),
"log.config": importstr "synapse/log.config",
},
},
// homeserver-secrets.yaml contains all the templated secret variables from
// base homeserver.yaml passed as yaml-encoded environment variable.
// $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
synapseSecretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
} + (if cfg.oidc.enable then {
oidc_config: cfg.oidc.config {
enabled: true,
client_secret: "$(OIDC_CLIENT_SECRET)",
},
} else {}),
cas: if cfg.cas.enable && cfg.oidc.enable then error "cfg.cas.enable and cfg.oidc.enable options are exclusive"
else if cfg.cas.enable then {
deployment: kube.Deployment("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
proxy: kube.Container("oauth2-cas-proxy") {
image: cfg.images.casProxy,
ports_: {
http: { containerPort: 5000 },
},
env_: {
BASE_URL: "https://%s" % [cfg.webDomain],
SERVICE_URL: "https://%s" % [cfg.webDomain],
OAUTH2_CLIENT: cfg.cas.oauth2.clientID,
OAUTH2_SECRET: cfg.cas.oauth2.clientSecret,
OAUTH2_SCOPE: cfg.cas.oauth2.scope,
OAUTH2_AUTHORIZE: cfg.cas.oauth2.authorizeURL,
OAUTH2_TOKEN: cfg.cas.oauth2.tokenURL,
OAUTH2_USERINFO: cfg.cas.oauth2.userinfoURL,
},
},
},
},
},
},
},
svc: kube.Service("oauth2-cas-proxy") {
metadata+: app.metadata("oauth2-cas-proxy"),
target_pod:: app.cas.deployment.spec.template,
},
},
# Synapse process Deployment/StatefulSet base resource.
SynapseWorker(name, workerType, builder):: builder(name) {
local worker = self,
cfg:: {
# Configuration customization. Can contain environment substitution
# syntax, as used in worker_name value.
localConfig: {
worker_app: workerType,
worker_name: "$(POD_NAME)",
# The replication listener on the main synapse process.
worker_replication_host: "synapse-replication-master",
worker_replication_http_port: 9093,
},
# Mount app.dataVolume in /data
mountData: false,
},
metadata+: app.metadata(name),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.synapseConfigMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
image: cfg.images.synapse,
command: [
"/bin/sh", "-c", |||
set -e
echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
|||
],
ports_: {
http: { containerPort: 8008 },
metrics: { containerPort: 9092 },
replication: { containerPort: 9093 },
},
env_: {
SYNAPSE_WORKER: workerType,
SYNAPSE_MACAROON_SECRET_KEY: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } },
SYNAPSE_REGISTRATION_SHARED_SECRET: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } },
WORKER_REPLICATION_SECRET: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } },
POSTGRES_PASSWORD: { secretKeyRef: { name: "synapse", key: "postgres_password" } },
REDIS_PASSWORD: { secretKeyRef: { name: "synapse", key: "redis_password" } },
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(app.synapseSecretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
config: { mountPath: "/conf", },
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
},
},
securityContext: {
runAsUser: 991,
runAsGroup: 991,
fsGroup: 991,
},
},
},
},
},
# Synapse main process
synapseDeployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
else if cfg.cas.enable then cas {
ns: app.namespace,
cfg+: {
# Main process doesn't need any configuration customization
localConfig: {}
},
},
synapseSvc: kube.Service("synapse") {
metadata+: app.metadata("synapse"),
target_pod:: app.synapseDeployment.spec.template,
},
synapseReplicationSvc: kube.Service("synapse-replication-master") {
metadata+: app.metadata("synapse-replication-master"),
target_pod:: app.synapseDeployment.spec.template,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
],
image: cfg.images.casProxy,
webDomain: cfg.webDomain,
oauth2: cfg.cas.oauth2,
},
},
# Synapse generic worker deployment
synapseGenericWorker: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
wellKnown: if cfg.wellKnown then wellKnown {
ns: app.namespace,
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["client", "federation"]}],
}],
},
},
},
synapseGenericSvc: kube.Service("synapse-generic") {
metadata+: app.metadata("synapse-generic"),
target_pod:: app.synapseGenericWorker.spec.template,
},
# Following paths can be handled by generic workers.
# See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
synapseGenericWorkerPaths:: [
"/_matrix/client/(v2_alpha|r0)/sync",
"/_matrix/client/(api/v1|v2_alpha|r0)/events",
"/_matrix/client/(api/v1|r0)/initialSync",
"/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync",
"/_matrix/client/(api/v1|r0|unstable)/publicRooms",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state",
"/_matrix/client/(api/v1|r0|unstable)/account/3pid",
"/_matrix/client/(api/v1|r0|unstable)/keys/query",
"/_matrix/client/(api/v1|r0|unstable)/keys/changes",
"/_matrix/client/versions",
"/_matrix/client/(api/v1|r0|unstable)/voip/turnServer",
"/_matrix/client/(api/v1|r0|unstable)/joined_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups/",
# Blocked by https://github.com/matrix-org/synapse/issues/8966
# "/_matrix/client/(api/v1|r0|unstable)/login",
# "/_matrix/client/(r0|unstable)/register",
# "/_matrix/client/(r0|unstable)/auth/.*/fallback/web",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)",
"/_matrix/client/(api/v1|r0|unstable)/join/",
"/_matrix/client/(api/v1|r0|unstable)/profile/",
"/_matrix/federation/v1/event/",
"/_matrix/federation/v1/state/",
"/_matrix/federation/v1/state_ids/",
"/_matrix/federation/v1/backfill/",
"/_matrix/federation/v1/get_missing_events/",
"/_matrix/federation/v1/publicRooms",
"/_matrix/federation/v1/query/",
"/_matrix/federation/v1/make_join/",
"/_matrix/federation/v1/make_leave/",
"/_matrix/federation/v1/send_join/",
"/_matrix/federation/v2/send_join/",
"/_matrix/federation/v1/send_leave/",
"/_matrix/federation/v2/send_leave/",
"/_matrix/federation/v1/invite/",
"/_matrix/federation/v2/invite/",
"/_matrix/federation/v1/query_auth/",
"/_matrix/federation/v1/event_auth/",
"/_matrix/federation/v1/exchange_third_party_invite/",
"/_matrix/federation/v1/user/devices/",
"/_matrix/federation/v1/get_groups_publicised",
"/_matrix/key/v2/query",
"/_matrix/federation/v1/send/",
],
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
synapseMediaWorker: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["media"]}],
}],
},
},
},
synapseMediaSvc: kube.Service("synapse-media") {
metadata+: app.metadata("synapse-media"),
target_pod:: app.synapseMediaWorker.spec.template,
},
riotConfig:: {
"default_hs_url": "https://%s" % [cfg.webDomain],
"disable_custom_urls": false,
"disable_guests": false,
"disable_login_language_selector": false,
"disable_3pid_login": true,
"brand": "Riot",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html",
"bug_report_endpoint_url": "https://riot.im/bugreports/submit",
"features": {
"feature_groups": "labs",
"feature_pinning": "labs",
"feature_reactions": "labs"
},
"default_federate": true,
"default_theme": "light",
"roomDirectory": {
"servers": [
cfg.serverName,
]
},
"welcomeUserId": "@riot-bot:matrix.org",
"enable_presence_by_hs_url": {
"https://matrix.org": false
}
},
riotConfigMap: kube.ConfigMap("riot-web-config") {
metadata+: app.metadata("riot-web-config"),
data: {
"config.json": std.manifestJsonEx(app.riotConfig, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": importstr "riot-nginx.conf",
},
},
riotDeployment: kube.Deployment("riot-web") {
metadata+: app.metadata("riot-web"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.riotConfigMap),
},
containers_: {
web: kube.Container("riot-web") {
image: cfg.images.riot,
ports_: {
http: { containerPort: 8080 },
},
volumeMounts: [
{
name: "config",
mountPath: "/app/config.json",
subPath: "config.json",
},
{
name: "config",
mountPath: "/etc/nginx/nginx.conf",
subPath: "nginx.conf",
},
],
},
},
securityContext: {
// nginx:nginx
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
riotSvc: kube.Service("riot-web") {
metadata+: app.metadata("riot-web"),
target_pod:: app.riotDeployment.spec.template,
},
wellKnown: if cfg.wellKnown then {
deployment: kube.Deployment("wellknown") {
metadata+: app.metadata("wellknown"),
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
web: kube.Container("wellknown") {
image: cfg.images.wellKnown,
ports_: {
http: { containerPort: 8080 },
},
command: ["/app/matrix/wellknown"],
args: ["-hspki_disable", "-domain", cfg.webDomain],
},
},
securityContext: {
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
svc: kube.Service("wellknown") {
metadata+: app.metadata("wellknown"),
target_pod:: app.wellKnown.deployment.spec.template,
image: cfg.images.wellKnown,
webDomain: cfg.webDomain,
},
} else {},
synapse: synapse {
ns: app.namespace,
postgres: app.postgres3,
redis: app.redis,
appservices: app.appservices,
cfg+: app.cfg {
image: app.cfg.images.synapse,
macaroonSecretKey: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } },
registrationSharedSecret: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } },
workerReplicationSecret: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } },
},
},
// Any appservice you add here will require an appservice-X-registration
// secret containing a registration.yaml file. Adding something to this
// dictionary will cause Synapse to not start until that secret is
@ -545,8 +191,8 @@ local redis = import "../../../kube/redis.libsonnet";
// until it spits you a registration YAML and you feed that to a secret.
appservices: {},
ingress: kube.Ingress("matrix") {
metadata+: app.metadata("matrix") {
ingress: app.namespace.Contain(kube.Ingress("matrix")) {
metadata+: {
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
@ -566,15 +212,15 @@ local redis = import "../../../kube/redis.libsonnet";
host: cfg.webDomain,
http: {
paths: [
{ path: path, backend: app.synapseGenericSvc.name_port }
for path in app.synapseGenericWorkerPaths
{ path: path, backend: app.synapse.genericWorker.svc.name_port }
for path in app.synapse.genericWorker.paths
] + [
{ path: "/", backend: app.riotSvc.name_port },
{ path: "/_matrix/media/", backend: app.synapseMediaSvc.name_port },
{ path: "/_matrix/", backend: app.synapseSvc.name_port },
{ path: "/", backend: app.riot.svc.name_port },
{ path: "/_matrix/media/", backend: app.synapse.mediaWorker.svc.name_port },
{ path: "/_matrix/", backend: app.synapse.main.svc.name_port },
# Used by OpenID Connect login flow
{ path: "/_synapse/", backend: app.synapseSvc.name_port },
{ path: "/_synapse/", backend: app.synapse.main.svc.name_port },
] + (if cfg.cas.enable then [
{ path: "/_cas", backend: app.cas.svc.name_port },
] else []) + (if cfg.wellKnown then [

View File

@ -0,0 +1,95 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
# webDomain is the domain name at which element will run
webDomain: error "cfg.webDomain must be set",
# serverName is the server part of the MXID this homeserver will cover
serverName: error "cfg.serverName must be set",
image: error "cfg.image must be set",
},
ns:: error "ns needs to be a kube.Namespace object",
config:: {
"default_hs_url": "https://%s" % [cfg.webDomain],
"disable_custom_urls": false,
"disable_guests": false,
"disable_login_language_selector": false,
"disable_3pid_login": true,
"brand": "Riot",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html",
"bug_report_endpoint_url": "https://riot.im/bugreports/submit",
"features": {
"feature_groups": "labs",
"feature_pinning": "labs",
"feature_reactions": "labs"
},
"default_federate": true,
"default_theme": "light",
"roomDirectory": {
"servers": [
cfg.serverName,
]
},
"welcomeUserId": "@riot-bot:matrix.org",
"enable_presence_by_hs_url": {
"https://matrix.org": false
}
},
configMap: app.ns.Contain(kube.ConfigMap("riot-web-config")) {
data: {
"config.json": std.manifestJsonEx(app.config, ""),
// Standard nginx.conf, made to work when running as unprivileged user.
"nginx.conf": importstr "riot/nginx.conf",
},
},
deployment: app.ns.Contain(kube.Deployment("riot-web")) {
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
},
containers_: {
web: kube.Container("riot-web") {
image: cfg.image,
ports_: {
http: { containerPort: 8080 },
},
volumeMounts: [
{
name: "config",
mountPath: "/app/config.json",
subPath: "config.json",
},
{
name: "config",
mountPath: "/etc/nginx/nginx.conf",
subPath: "nginx.conf",
},
],
},
},
securityContext: {
// nginx:nginx
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
svc: app.ns.Contain(kube.Service("riot-web")) {
target_pod:: app.deployment.spec.template,
},
}

View File

@ -0,0 +1,276 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
image: error "cfg.image needs to be set",
storageClassName: error "cfg.storrageClassName needs to be set",
# webDomain is the domain name at which synapse instance will run
webDomain: error "cfg.webDomain must be set",
# serverName is the server part of the MXID this homeserver will cover
serverName: error "cfg.serverName must be set",
cas: { enable: false },
oidc: { enable: false },
macaroonSecretKey: error "cfg.macaroonSecretKey needs to be set",
registrationSharedSecret: error "cfg.registationSharedSecret needs to be set",
workerReplicationSecret: error "cfg.workerReplicationSecret needs to be set",
},
ns:: error "ns needs to be provided",
postgres:: error "postgres needs to be provided",
redis:: error "redis needs to be provided",
// See matrix-ng.libsonnet for description
appservices:: error "appservices need to be provided",
dataVolume: app.ns.Contain(kube.PersistentVolumeClaim("synapse-data-waw3")) {
spec+: {
storageClassName: cfg.storageClassName,
accessModes: [ "ReadWriteOnce" ],
resources: {
requests: {
storage: "50Gi",
},
},
},
},
// homeserver.yaml that will be used to run synapse (in configMap).
// This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per
// deployment.
config:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] {
server_name: cfg.serverName,
public_baseurl: "https://%s" % [cfg.webDomain],
signing_key_path: "/secrets/homeserver_signing_key",
app_service_config_files: [
"/appservices/%s/registration.yaml" % [k]
for k in std.objectFields(app.appservices)
],
} + (if cfg.cas.enable then {
cas_config: {
enabled: true,
server_url: "https://%s/_cas" % [cfg.webDomain],
service_url: "https://%s" % [cfg.webDomain],
},
} else {}),
configMap: app.ns.Contain(kube.ConfigMap("synapse")) {
data: {
"homeserver.yaml": std.manifestYamlDoc(app.config),
"log.config": importstr "synapse/log.config",
},
},
// homeserver-secrets.yaml contains all the templated secret variables from
// base homeserver.yaml passed as yaml-encoded environment variable.
// $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup
secretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] {
} + (if cfg.oidc.enable then {
oidc_config: cfg.oidc.config {
enabled: true,
client_secret: "$(OIDC_CLIENT_SECRET)",
},
} else {}),
# Synapse process Deployment/StatefulSet base resource.
SynapseWorker(name, workerType, builder):: app.ns.Contain(builder(name)) {
local worker = self,
cfg:: {
# Configuration customization. Can contain environment substitution
# syntax, as used in worker_name value.
localConfig: {
worker_app: workerType,
worker_name: "$(POD_NAME)",
# The replication listener on the main synapse process.
worker_replication_host: "synapse-replication-master",
worker_replication_http_port: 9093,
},
# Mount app.dataVolume in /data
mountData: false,
},
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
config: kube.ConfigMapVolume(app.configMap),
secrets: { secret: { secretName: "synapse" } },
} + {
[k]: { secret: { secretName: "appservice-%s-registration" % [k] } }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: kube.PersistentVolumeClaimVolume(app.dataVolume),
} else {},
containers_: {
web: kube.Container("synapse") {
image: cfg.image,
command: [
"/bin/sh", "-c", |||
set -e
echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml
echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml
exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml
|||
],
ports_: {
http: { containerPort: 8008 },
metrics: { containerPort: 9092 },
replication: { containerPort: 9093 },
},
env_: {
SYNAPSE_WORKER: workerType,
SYNAPSE_MACAROON_SECRET_KEY: cfg.macaroonSecretKey,
SYNAPSE_REGISTRATION_SHARED_SECRET: cfg.registrationSharedSecret,
WORKER_REPLICATION_SECRET: cfg.workerReplicationSecret,
POSTGRES_PASSWORD: app.postgres.cfg.password,
REDIS_PASSWORD: app.redis.cfg.password,
POD_NAME: { fieldRef: { fieldPath: "metadata.name" } },
OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "",
X_SECRETS_CONFIG: std.manifestYamlDoc(app.secretsConfig),
X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig),
},
volumeMounts_: {
config: { mountPath: "/conf", },
secrets: { mountPath: "/secrets" },
} + {
[k]: { mountPath: "/appservices/%s" % [k] }
for k in std.objectFields(app.appservices)
} + if worker.cfg.mountData then {
data: { mountPath: "/data" },
} else {},
},
},
securityContext: {
runAsUser: 991,
runAsGroup: 991,
fsGroup: 991,
},
},
},
},
},
# Synapse main process
main: {
deployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) {
cfg+: {
# Main process doesn't need any configuration customization
localConfig: {}
},
},
svc: app.ns.Contain(kube.Service("synapse")) {
target_pod:: app.main.deployment.spec.template,
},
replicationSvc: app.ns.Contain(kube.Service("synapse-replication-master")) {
target_pod:: app.main.deployment.spec.template,
spec+: {
ports: [
{ port: 9093, name: 'replication', targetPort: 9093 },
],
},
},
},
genericWorker: {
# Synapse generic worker deployment
deployment: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) {
cfg+: {
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["client", "federation"]}],
}],
},
},
},
svc: app.ns.Contain(kube.Service("synapse-generic")) {
target_pod:: app.genericWorker.deployment.spec.template,
},
# Following paths can be handled by generic workers.
# See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md
paths:: [
"/_matrix/client/(v2_alpha|r0)/sync",
"/_matrix/client/(api/v1|v2_alpha|r0)/events",
"/_matrix/client/(api/v1|r0)/initialSync",
"/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync",
"/_matrix/client/(api/v1|r0|unstable)/publicRooms",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state",
"/_matrix/client/(api/v1|r0|unstable)/account/3pid",
"/_matrix/client/(api/v1|r0|unstable)/keys/query",
"/_matrix/client/(api/v1|r0|unstable)/keys/changes",
"/_matrix/client/versions",
"/_matrix/client/(api/v1|r0|unstable)/voip/turnServer",
"/_matrix/client/(api/v1|r0|unstable)/joined_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups",
"/_matrix/client/(api/v1|r0|unstable)/publicised_groups/",
# Blocked by https://github.com/matrix-org/synapse/issues/8966
# "/_matrix/client/(api/v1|r0|unstable)/login",
# "/_matrix/client/(r0|unstable)/register",
# "/_matrix/client/(r0|unstable)/auth/.*/fallback/web",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/",
"/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)",
"/_matrix/client/(api/v1|r0|unstable)/join/",
"/_matrix/client/(api/v1|r0|unstable)/profile/",
"/_matrix/federation/v1/event/",
"/_matrix/federation/v1/state/",
"/_matrix/federation/v1/state_ids/",
"/_matrix/federation/v1/backfill/",
"/_matrix/federation/v1/get_missing_events/",
"/_matrix/federation/v1/publicRooms",
"/_matrix/federation/v1/query/",
"/_matrix/federation/v1/make_join/",
"/_matrix/federation/v1/make_leave/",
"/_matrix/federation/v1/send_join/",
"/_matrix/federation/v2/send_join/",
"/_matrix/federation/v1/send_leave/",
"/_matrix/federation/v2/send_leave/",
"/_matrix/federation/v1/invite/",
"/_matrix/federation/v2/invite/",
"/_matrix/federation/v1/query_auth/",
"/_matrix/federation/v1/event_auth/",
"/_matrix/federation/v1/exchange_third_party_invite/",
"/_matrix/federation/v1/user/devices/",
"/_matrix/federation/v1/get_groups_publicised",
"/_matrix/key/v2/query",
"/_matrix/federation/v1/send/",
],
},
# Synapse media worker. This handles access to uploads and media stored in app.dataVolume
mediaWorker: {
deployment: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) {
cfg+: {
mountData: true,
localConfig+: {
worker_listeners: [{
type: "http",
port: 8008,
x_forwarded: true,
bind_addresses: ["::"],
resources: [{ names: ["media"]}],
}],
},
},
},
svc: app.ns.Contain(kube.Service("synapse-media")) {
target_pod:: app.mediaWorker.deployment.spec.template,
},
},
}

View File

@ -0,0 +1,41 @@
local kube = import "../../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
image: error "cfg.image must be set",
# webDomain is the domain name of matrix homeserver to be served
webDomain: error "cfg.webDomain must be set",
},
ns:: error "ns needs to be a kube.Namespace object",
deployment: app.ns.Contain(kube.Deployment("wellknown")) {
spec+: {
replicas: 1,
template+: {
spec+: {
containers_: {
web: kube.Container("wellknown") {
image: cfg.image,
ports_: {
http: { containerPort: 8080 },
},
command: ["/app/matrix/wellknown"],
args: ["-hspki_disable", "-domain", cfg.webDomain],
},
},
securityContext: {
runAsUser: 101,
runAsGroup: 101,
},
},
},
},
},
svc: app.ns.Contain(kube.Service("wellknown")) {
target_pod:: app.deployment.spec.template,
},
}