From 8ec865728eea47d5c39606c6d65b11be5d0d6c81 Mon Sep 17 00:00:00 2001 From: Piotr Dobrowolski Date: Sat, 30 Jan 2021 13:06:07 +0100 Subject: [PATCH] app/matrix: matrix-ng - synapse deployment cleanup This is a major revamp of our matrix/synapse deployment as a separate .libsonnet module. * synapse version bump to 1.25.0 * riot-web version bump to 1.7.18 * Replaced synapse migration hack we used to template configuration with environment variable replacement done by Kubernetes itself * Implemented support for OpenID Connect, migration from CAS has been verified to be working with some additional configuration options * Moved homeserver signing key into k8s secret, thus making it possible to run synapse processes without a single data volume * Split synapse into main process, generic worker and media repository worker. (latter is the only container using data volume) Both generic worker and media repository worker is running on a single replica, until we get proper HTTP routing/loadbalancing * Riot nginx.conf has been extracted into an external file loaded using importstr. Change-Id: I6c4d34bf41e148a302d1cbe725608a5aeb7b87ba --- app/matrix/lib/matrix-ng.libsonnet | 588 ++++++++++++++++++ app/matrix/lib/riot-nginx.conf | 42 ++ app/matrix/lib/synapse/homeserver-ng.yaml | 134 ++++ .../lib/synapse/homeserver-secrets.yaml | 22 + 4 files changed, 786 insertions(+) create mode 100644 app/matrix/lib/matrix-ng.libsonnet create mode 100644 app/matrix/lib/riot-nginx.conf create mode 100644 app/matrix/lib/synapse/homeserver-ng.yaml create mode 100644 app/matrix/lib/synapse/homeserver-secrets.yaml diff --git a/app/matrix/lib/matrix-ng.libsonnet b/app/matrix/lib/matrix-ng.libsonnet new file mode 100644 index 00000000..08c8153e --- /dev/null +++ b/app/matrix/lib/matrix-ng.libsonnet @@ -0,0 +1,588 @@ +# Matrix server (synapse). +# This needs a secret provisioned, create with: +# ns=matrix +# +# SIGNING_KEY="$(kubectl run -n $ns -i --quiet --restart=Never --rm synapse-generate-config --image=matrixdotorg/synapse:v1.19.2 --env SYNAPSE_SERVER_NAME=dummy --env SYNAPSE_REPORT_STATS=no -o yaml --command -- sh -c '/start.py generate >/dev/null && cat /data/*.signing.key')" +# kubectl -n $ns create secret generic synapse --from-literal=postgres_password=$(pwgen 24 1) --from-literal=macaroon_secret_key=$(pwgen 32 1) --from-literal=registration_shared_secret=$(pwgen 32 1) --from-literal=homeserver_signing_key="$SIGNING_KEY" --from-literal=redis_password=$(pwgen 32 1) --from-literal=worker_replication_secret=$(pwgen 32 1) +# kubectl -n $ns create secret generic oauth2-cas-proxy --from-literal=oauth2_secret=... +# +# When migrating from matrix.libsonnet, instance signing key, redis passwsord +# and worker replication secret need to be added to existing synapse secret: +# +# echo "homeserver_signing_key: $(kubectl -n $ns exec deploy/synapse -- sh -c 'cat /data/*.signing.key' | base64 -w0)" +# echo "redis_password: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" +# echo "worker_replication_secret: $(pwgen 32 1 | tr -d '\n' | base64 -w0)" +# kubectl -n $ns edit secret synapse +# # ...add homeserver_signing_key, redis_password and worker_replication_secret keys +# +# Sequencing appservices is fun. The appservice needs to run first (for +# instance, via a bootstrap job), and on startup it will spit out a +# registration file. This registration file then needs to be fed to synapse - +# this is done via specialy named secrets (appservice-X-registration, for X key +# in the appservices object). +# +# For appservice-irc instances, you can use this oneliner magic to get the +# registration YAML from logs. +# kubectl -n matrix create secret generic appservice-irc-freenode-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-irc-freenode-bootstrap | tail -n +4 | sed -r 's/(.*aliases:.*)/ group_id: "+freenode:hackerspace.pl"\n\1/') +# +# For appservice-telegram instances, you can use this oneliner magic: +# kubectl -n matrix create secret generic appservice-telegram-prod-registration --from-file=registration.yaml=<(kubectl -n matrix logs job/appservice-telegram-prod-bootstrap | grep -A 100 SNIPSNIP | grep -v SNIPSNIP) + +local kube = import "../../../kube/kube.libsonnet"; +local postgres = import "../../../kube/postgres.libsonnet"; +local redis = import "../../../kube/redis.libsonnet"; + +{ + local app = self, + local cfg = app.cfg, + cfg:: { + namespace: error "cfg.namespace must be set", + # webDomain is the domain name at which element will run + webDomain: error "cfg.webDomain must be set", + # serverName is the server part of the MXID this homeserver will cover + serverName: error "cfg.serverName must be set", + storageClassName: "waw-hdd-redundant-3", + + images: { + synapse: "matrixdotorg/synapse:v1.25.0", + riot: "vectorim/riot-web:v1.7.18", + casProxy: "registry.k0.hswaw.net/q3k/oauth2-cas-proxy:0.1.4", + appserviceIRC: "matrixdotorg/matrix-appservice-irc:release-0.17.1", + # That's v0.8.2 - we just don't trust that host to not re-tag images. + appserviceTelegram: "dock.mau.dev/tulir/mautrix-telegram@sha256:9e68eaa80c9e4a75d9a09ec92dc4898b12d48390e01efa4de40ce882a6f7e330", + wellKnown: "registry.k0.hswaw.net/q3k/wellknown:1611960794-adbf560851a46ad0e58b42f0daad7ef19535687c", + }, + + # OpenID Connect provider configuration. + # Currently only client_secret can be provided as a secretKeyRef. + # + # https://${cfg.webDomain}/_synapse/oidc/callback needs to be set as + # allowed OAuth2/OpenID Connect callback URL + # + # See: https://github.com/matrix-org/synapse/blob/v1.25.0/docs/openid.md + oidc: { + enable: false, + config: { + issuer: error "oidc.config.issuer must be set", + client_id: error "oidc.config.client_id must be set", + client_secret: error "oidc.config.client_secret must be set", + + # Set this to true when migrating from existing CAS deployment + allow_existing_users: false, + user_mapping_provider: { + config: { + localpart_template: '{{ user.sub }}', + display_name_template: '{{ user.sub }}', + }, + }, + + # Extra configuration required when migrating from + # oauth2-cas-proxy bound to https://sso.hackerspace.pl + # user_profile_method: "userinfo_endpoint", + # client_auth_method: "client_secret_post", + }, + }, + + # Central Authentication Scheme, a single-sign-on system. Note: this flow is now called 'SSO' in Matrix, we keep this name for legacy reasons. + # Refer to https://matrix.org/docs/spec/client_server/r0.6.1#sso-client-login + cas: { + # whether to enable the CAS proxy (ie. connect to hswaw sso via OAuth) + enable: false, + # generate client ID and secret in with your OAuth2 provider, refer to https://www.oauth.com/oauth2-servers/client-registration/client-id-secret/ + oauth2: { + clientID: error "cas.oauth2.clientID must be set", + clientSecret: error "cas.oauth2.clientSecret must be set", + scope: error "cas.oauth2.scope must be set", + authorizeURL: error "cas.oauth2.authorizeURL must be set", + tokenURL: error "cas.oauth2.tokenURL must be set", + userinfoURL: error "cas.oauth2.userinfoURL must be set", + }, + }, + + # Serve /.well-known/matrix configuration endpoints required when using + # cfg.webDomain directly as mxid. + wellKnown: false, + }, + + metadata(component):: { + namespace: cfg.namespace, + labels: { + "app.kubernetes.io/name": "matrix", + "app.kubernetes.io/managed-by": "kubecfg", + "app.kubernetes.io/component": component, + }, + }, + + namespace: kube.Namespace(cfg.namespace), + + postgres3: postgres { + cfg+: { + namespace: cfg.namespace, + appName: "synapse", + database: "synapse", + username: "synapse", + prefix: "waw3-", + password: { secretKeyRef: { name: "synapse", key: "postgres_password" } }, + storageClassName: cfg.storageClassName, + storageSize: "100Gi", + initdbArgs: "--encoding='UTF8' --lc-collate='C' --lc-ctype='C'", + }, + }, + + redis: redis { + cfg+: { + namespace: cfg.namespace, + appName: "synapse", + storageClassName: cfg.storageClassName, + password: { secretKeyRef: { name: "synapse", key: "redis_password" } }, + }, + }, + + dataVolume: kube.PersistentVolumeClaim("synapse-data-waw3") { + metadata+: app.metadata("synapse-data"), + spec+: { + storageClassName: cfg.storageClassName, + accessModes: [ "ReadWriteOnce" ], + resources: { + requests: { + storage: "50Gi", + }, + }, + }, + }, + + // homeserver.yaml that will be used to run synapse (in synapseConfigMap). + // This is based off of //app/matrix/lib/synapse/homeserver.yaml with some fields overriden per + // deployment. + synapseConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-ng.yaml")[0] { + server_name: cfg.serverName, + public_baseurl: "https://%s" % [cfg.webDomain], + signing_key_path: "/secrets/homeserver_signing_key", + app_service_config_files: [ + "/appservices/%s/registration.yaml" % [k] + for k in std.objectFields(app.appservices) + ], + } + (if cfg.cas.enable then { + cas_config: { + enabled: true, + server_url: "https://%s/_cas" % [cfg.webDomain], + service_url: "https://%s" % [cfg.webDomain], + }, + } else {}), + + synapseConfigMap: kube.ConfigMap("synapse") { + metadata+: app.metadata("synapse"), + data: { + "homeserver.yaml": std.manifestYamlDoc(app.synapseConfig), + "log.config": importstr "synapse/log.config", + }, + }, + + // homeserver-secrets.yaml contains all the templated secret variables from + // base homeserver.yaml passed as yaml-encoded environment variable. + // $(ENVVAR)-encoded variables are resolved by Kubernetes on pod startup + synapseSecretsConfig:: (std.native("parseYaml"))(importstr "synapse/homeserver-secrets.yaml")[0] { + } + (if cfg.oidc.enable then { + oidc_config: cfg.oidc.config { + enabled: true, + client_secret: "$(OIDC_CLIENT_SECRET)", + }, + } else {}), + + cas: if cfg.cas.enable && cfg.oidc.enable then error "cfg.cas.enable and cfg.oidc.enable options are exclusive" + else if cfg.cas.enable then { + deployment: kube.Deployment("oauth2-cas-proxy") { + metadata+: app.metadata("oauth2-cas-proxy"), + spec+: { + replicas: 1, + template+: { + spec+: { + containers_: { + proxy: kube.Container("oauth2-cas-proxy") { + image: cfg.images.casProxy, + ports_: { + http: { containerPort: 5000 }, + }, + env_: { + BASE_URL: "https://%s" % [cfg.webDomain], + SERVICE_URL: "https://%s" % [cfg.webDomain], + OAUTH2_CLIENT: cfg.cas.oauth2.clientID, + OAUTH2_SECRET: cfg.cas.oauth2.clientSecret, + OAUTH2_SCOPE: cfg.cas.oauth2.scope, + OAUTH2_AUTHORIZE: cfg.cas.oauth2.authorizeURL, + OAUTH2_TOKEN: cfg.cas.oauth2.tokenURL, + OAUTH2_USERINFO: cfg.cas.oauth2.userinfoURL, + }, + }, + }, + }, + }, + }, + }, + + svc: kube.Service("oauth2-cas-proxy") { + metadata+: app.metadata("oauth2-cas-proxy"), + target_pod:: app.cas.deployment.spec.template, + }, + }, + + # Synapse process Deployment/StatefulSet base resource. + SynapseWorker(name, workerType, builder):: builder(name) { + local worker = self, + cfg:: { + # Configuration customization. Can contain environment substitution + # syntax, as used in worker_name value. + localConfig: { + worker_app: workerType, + worker_name: "$(POD_NAME)", + + # The replication listener on the main synapse process. + worker_replication_host: "synapse-replication-master", + worker_replication_http_port: 9093, + }, + + # Mount app.dataVolume in /data + mountData: false, + }, + + metadata+: app.metadata(name), + spec+: { + replicas: 1, + template+: { + spec+: { + volumes_: { + config: kube.ConfigMapVolume(app.synapseConfigMap), + secrets: { secret: { secretName: "synapse" } }, + } + { + [k]: { secret: { secretName: "appservice-%s-registration" % [k] } } + for k in std.objectFields(app.appservices) + } + if worker.cfg.mountData then { + data: kube.PersistentVolumeClaimVolume(app.dataVolume), + } else {}, + containers_: { + web: kube.Container("synapse") { + image: cfg.images.synapse, + command: [ + "/bin/sh", "-c", ||| + set -e + echo "${X_SECRETS_CONFIG}" > /tmp/secrets.yaml + echo "${X_LOCAL_CONFIG}" > /tmp/local.yaml + exec python -m ${SYNAPSE_WORKER} --config-path /conf/homeserver.yaml --config-path /tmp/secrets.yaml --config-path /tmp/local.yaml + ||| + ], + ports_: { + http: { containerPort: 8008 }, + metrics: { containerPort: 9092 }, + replication: { containerPort: 9093 }, + }, + env_: { + SYNAPSE_WORKER: workerType, + + SYNAPSE_MACAROON_SECRET_KEY: { secretKeyRef: { name: "synapse", key: "macaroon_secret_key" } }, + SYNAPSE_REGISTRATION_SHARED_SECRET: { secretKeyRef: { name: "synapse", key: "registration_shared_secret" } }, + WORKER_REPLICATION_SECRET: { secretKeyRef: { name: "synapse", key: "worker_replication_secret" } }, + POSTGRES_PASSWORD: { secretKeyRef: { name: "synapse", key: "postgres_password" } }, + REDIS_PASSWORD: { secretKeyRef: { name: "synapse", key: "redis_password" } }, + POD_NAME: { fieldRef: { fieldPath: "metadata.name" } }, + OIDC_CLIENT_SECRET: if cfg.oidc.enable then cfg.oidc.config.client_secret else "", + + X_SECRETS_CONFIG: std.manifestYamlDoc(app.synapseSecretsConfig), + X_LOCAL_CONFIG: std.manifestYamlDoc(worker.cfg.localConfig), + }, + volumeMounts_: { + config: { mountPath: "/conf", }, + secrets: { mountPath: "/secrets" }, + } + { + [k]: { mountPath: "/appservices/%s" % [k] } + for k in std.objectFields(app.appservices) + } + if worker.cfg.mountData then { + data: { mountPath: "/data" }, + } else {}, + }, + }, + securityContext: { + runAsUser: 991, + runAsGroup: 991, + fsGroup: 991, + }, + }, + }, + }, + }, + + # Synapse main process + synapseDeployment: app.SynapseWorker("synapse", "synapse.app.homeserver", kube.Deployment) { + cfg+: { + # Main process doesn't need any configuration customization + localConfig: {} + }, + }, + synapseSvc: kube.Service("synapse") { + metadata+: app.metadata("synapse"), + target_pod:: app.synapseDeployment.spec.template, + }, + synapseReplicationSvc: kube.Service("synapse-replication-master") { + metadata+: app.metadata("synapse-replication-master"), + target_pod:: app.synapseDeployment.spec.template, + spec+: { + ports: [ + { port: 9093, name: 'replication', targetPort: 9093 }, + ], + }, + }, + + # Synapse generic worker deployment + synapseGenericWorker: app.SynapseWorker("synapse-generic", "synapse.app.generic_worker", kube.StatefulSet) { + cfg+: { + localConfig+: { + worker_listeners: [{ + type: "http", + port: 8008, + x_forwarded: true, + bind_addresses: ["::"], + resources: [{ names: ["client", "federation"]}], + }], + }, + }, + }, + synapseGenericSvc: kube.Service("synapse-generic") { + metadata+: app.metadata("synapse-generic"), + target_pod:: app.synapseGenericWorker.spec.template, + }, + + # Following paths can be handled by generic workers. + # See: https://github.com/matrix-org/synapse/blob/master/docs/workers.md + synapseGenericWorkerPaths:: [ + "/_matrix/client/(v2_alpha|r0)/sync", + "/_matrix/client/(api/v1|v2_alpha|r0)/events", + "/_matrix/client/(api/v1|r0)/initialSync", + "/_matrix/client/(api/v1|r0)/rooms/[^/]+/initialSync", + "/_matrix/client/(api/v1|r0|unstable)/publicRooms", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/joined_members", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/context/.*", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/members", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state", + "/_matrix/client/(api/v1|r0|unstable)/account/3pid", + "/_matrix/client/(api/v1|r0|unstable)/keys/query", + "/_matrix/client/(api/v1|r0|unstable)/keys/changes", + "/_matrix/client/versions", + "/_matrix/client/(api/v1|r0|unstable)/voip/turnServer", + "/_matrix/client/(api/v1|r0|unstable)/joined_groups", + "/_matrix/client/(api/v1|r0|unstable)/publicised_groups", + "/_matrix/client/(api/v1|r0|unstable)/publicised_groups/", + # Blocked by https://github.com/matrix-org/synapse/issues/8966 + # "/_matrix/client/(api/v1|r0|unstable)/login", + # "/_matrix/client/(r0|unstable)/register", + # "/_matrix/client/(r0|unstable)/auth/.*/fallback/web", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/send", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/state/", + "/_matrix/client/(api/v1|r0|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)", + "/_matrix/client/(api/v1|r0|unstable)/join/", + "/_matrix/client/(api/v1|r0|unstable)/profile/", + "/_matrix/federation/v1/event/", + "/_matrix/federation/v1/state/", + "/_matrix/federation/v1/state_ids/", + "/_matrix/federation/v1/backfill/", + "/_matrix/federation/v1/get_missing_events/", + "/_matrix/federation/v1/publicRooms", + "/_matrix/federation/v1/query/", + "/_matrix/federation/v1/make_join/", + "/_matrix/federation/v1/make_leave/", + "/_matrix/federation/v1/send_join/", + "/_matrix/federation/v2/send_join/", + "/_matrix/federation/v1/send_leave/", + "/_matrix/federation/v2/send_leave/", + "/_matrix/federation/v1/invite/", + "/_matrix/federation/v2/invite/", + "/_matrix/federation/v1/query_auth/", + "/_matrix/federation/v1/event_auth/", + "/_matrix/federation/v1/exchange_third_party_invite/", + "/_matrix/federation/v1/user/devices/", + "/_matrix/federation/v1/get_groups_publicised", + "/_matrix/key/v2/query", + "/_matrix/federation/v1/send/", + ], + + # Synapse media worker. This handles access to uploads and media stored in app.dataVolume + synapseMediaWorker: app.SynapseWorker("synapse-media", "synapse.app.media_repository", kube.StatefulSet) { + cfg+: { + mountData: true, + localConfig+: { + worker_listeners: [{ + type: "http", + port: 8008, + x_forwarded: true, + bind_addresses: ["::"], + resources: [{ names: ["media"]}], + }], + }, + }, + }, + synapseMediaSvc: kube.Service("synapse-media") { + metadata+: app.metadata("synapse-media"), + target_pod:: app.synapseMediaWorker.spec.template, + }, + + riotConfig:: { + "default_hs_url": "https://%s" % [cfg.webDomain], + "disable_custom_urls": false, + "disable_guests": false, + "disable_login_language_selector": false, + "disable_3pid_login": true, + "brand": "Riot", + "integrations_ui_url": "https://scalar.vector.im/", + "integrations_rest_url": "https://scalar.vector.im/api", + "integrations_jitsi_widget_url": "https://scalar.vector.im/api/widgets/jitsi.html", + + "bug_report_endpoint_url": "https://riot.im/bugreports/submit", + "features": { + "feature_groups": "labs", + "feature_pinning": "labs", + "feature_reactions": "labs" + }, + "default_federate": true, + "default_theme": "light", + "roomDirectory": { + "servers": [ + cfg.serverName, + ] + }, + "welcomeUserId": "@riot-bot:matrix.org", + "enable_presence_by_hs_url": { + "https://matrix.org": false + } + }, + + riotConfigMap: kube.ConfigMap("riot-web-config") { + metadata+: app.metadata("riot-web-config"), + data: { + "config.json": std.manifestJsonEx(app.riotConfig, ""), + // Standard nginx.conf, made to work when running as unprivileged user. + "nginx.conf": importstr "riot-nginx.conf", + }, + }, + + riotDeployment: kube.Deployment("riot-web") { + metadata+: app.metadata("riot-web"), + spec+: { + replicas: 1, + template+: { + spec+: { + volumes_: { + config: kube.ConfigMapVolume(app.riotConfigMap), + }, + containers_: { + web: kube.Container("riot-web") { + image: cfg.images.riot, + ports_: { + http: { containerPort: 8080 }, + }, + volumeMounts: [ + { + name: "config", + mountPath: "/app/config.json", + subPath: "config.json", + }, + { + name: "config", + mountPath: "/etc/nginx/nginx.conf", + subPath: "nginx.conf", + }, + ], + }, + }, + securityContext: { + // nginx:nginx + runAsUser: 101, + runAsGroup: 101, + }, + }, + }, + }, + }, + + riotSvc: kube.Service("riot-web") { + metadata+: app.metadata("riot-web"), + target_pod:: app.riotDeployment.spec.template, + }, + + wellKnown: if cfg.wellKnown then { + deployment: kube.Deployment("wellknown") { + metadata+: app.metadata("wellknown"), + spec+: { + replicas: 1, + template+: { + spec+: { + containers_: { + web: kube.Container("wellknown") { + image: cfg.images.wellKnown, + ports_: { + http: { containerPort: 8080 }, + }, + command: ["/app/matrix/wellknown"], + args: ["-hspki_disable", "-domain", cfg.webDomain], + }, + }, + securityContext: { + runAsUser: 101, + runAsGroup: 101, + }, + }, + }, + }, + }, + svc: kube.Service("wellknown") { + metadata+: app.metadata("wellknown"), + target_pod:: app.wellKnown.deployment.spec.template, + }, + } else {}, + + // Any appservice you add here will require an appservice-X-registration + // secret containing a registration.yaml file. Adding something to this + // dictionary will cause Synapse to not start until that secret is + // available - so change things carefully! + // If bootstrapping a new appservice, just keep it out of this dictionary + // until it spits you a registration YAML and you feed that to a secret. + appservices: {}, + + ingress: kube.Ingress("matrix") { + metadata+: app.metadata("matrix") { + annotations+: { + "kubernetes.io/tls-acme": "true", + "certmanager.k8s.io/cluster-issuer": "letsencrypt-prod", + "nginx.ingress.kubernetes.io/proxy-body-size": "0", + "nginx.ingress.kubernetes.io/use-regex": "true", + }, + }, + spec+: { + tls: [ + { + hosts: [cfg.webDomain], + secretName: "synapse-tls", + }, + ], + rules: [ + { + host: cfg.webDomain, + http: { + paths: [ + { path: path, backend: app.synapseGenericSvc.name_port } + for path in app.synapseGenericWorkerPaths + ] + [ + { path: "/", backend: app.riotSvc.name_port }, + { path: "/_matrix/media/", backend: app.synapseMediaSvc.name_port }, + { path: "/_matrix/", backend: app.synapseSvc.name_port }, + + # Used by OpenID Connect login flow + { path: "/_synapse/", backend: app.synapseSvc.name_port }, + ] + (if cfg.cas.enable then [ + { path: "/_cas", backend: app.cas.svc.name_port }, + ] else []) + (if cfg.wellKnown then [ + { path: "/.well-known/matrix", backend: app.wellKnown.svc.name_port }, + ] else []) + }, + } + ], + }, + }, +} diff --git a/app/matrix/lib/riot-nginx.conf b/app/matrix/lib/riot-nginx.conf new file mode 100644 index 00000000..84276f2d --- /dev/null +++ b/app/matrix/lib/riot-nginx.conf @@ -0,0 +1,42 @@ +# Standard nginx.conf, made to work when running as unprivileged user. + +worker_processes auto; + +error_log /dev/stderr warn; +pid /tmp/nginx.pid; + +events { + worker_connections 1024; +} + +http { + client_body_temp_path /tmp/nginx_client_temp; + proxy_temp_path /tmp/nginx_proxy_temp; + fastcgi_temp_path /tmp/nginx_fastcgi_temp; + uwsgi_temp_path /tmp/nginx_uwsgi_temp; + scgi_temp_path /tmp/nginx_scgi_temp; + + include /etc/nginx/mime.types; + default_type application/octet-stream; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + access_log /dev/stdout main; + sendfile on; + keepalive_timeout 65; + + server { + listen 8080; + server_name localhost; + + location / { + root /usr/share/nginx/html; + index index.html index.htm; + } + + error_page 500 502 503 504 /50x.html; + location = /50x.html { + root /usr/share/nginx/html; + } + } +} diff --git a/app/matrix/lib/synapse/homeserver-ng.yaml b/app/matrix/lib/synapse/homeserver-ng.yaml new file mode 100644 index 00000000..c57d650b --- /dev/null +++ b/app/matrix/lib/synapse/homeserver-ng.yaml @@ -0,0 +1,134 @@ +# vim:ft=yaml + +## Server ## + +server_name: "example.com" +public_baseurl: "https://example.com" +pid_file: /homeserver.pid +web_client: False +soft_file_limit: 0 +log_config: "/conf/log.config" +worker_log_config: "/conf/log.config" + +## Ports ## + +listeners: + - port: 8008 + tls: false + bind_addresses: ['::'] + type: http + x_forwarded: true + + resources: + - names: [client] + compress: true + - names: [federation] + compress: false + + # Metrics + - port: 9092 + type: metrics + bind_address: '0.0.0.0' + + # The HTTP replication port + - port: 9093 + bind_addresses: ['::'] + type: http + resources: + - names: [replication] + +## Performance ## + +event_cache_size: "10K" + +## Ratelimiting ## + +rc_messages_per_second: 0.2 +rc_message_burst_count: 10.0 +federation_rc_window_size: 1000 +federation_rc_sleep_limit: 10 +federation_rc_sleep_delay: 500 +federation_rc_reject_limit: 50 +federation_rc_concurrent: 3 + +## Files ## + +media_store_path: "/data/media" +uploads_path: "/data/uploads" +max_upload_size: "10M" +max_image_pixels: "32M" +dynamic_thumbnails: false + +# List of thumbnail to precalculate when an image is uploaded. +thumbnail_sizes: +- width: 32 + height: 32 + method: crop +- width: 96 + height: 96 + method: crop +- width: 320 + height: 240 + method: scale +- width: 640 + height: 480 + method: scale +- width: 800 + height: 600 + method: scale + +url_preview_enabled: False +max_spider_size: "10M" + + +## Registration ## + +enable_registration: False +bcrypt_rounds: 12 +allow_guest_access: True +enable_group_creation: true + +# The list of identity servers trusted to verify third party +# identifiers by this server. +# +# Also defines the ID server which will be called when an account is +# deactivated (one will be picked arbitrarily). +trusted_third_party_id_servers: + - matrix.org + - vector.im + +## Metrics ### + +enable_metrics: True +report_stats: False + + +## API Configuration ## + +room_invite_state_types: + - "m.room.join_rules" + - "m.room.canonical_alias" + - "m.room.avatar" + - "m.room.name" + +expire_access_token: False + +## Signing Keys ## + +signing_key_path: "/data/hackerspace.pl.signing.key" +old_signing_keys: {} +key_refresh_interval: "1d" # 1 Day. + +# The trusted servers to download signing keys from. +perspectives: + servers: + "matrix.org": + verify_keys: + "ed25519:auto": + key: "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw" +suppress_key_server_warning: true + +password_config: + enabled: false + +enable_media_repo: False diff --git a/app/matrix/lib/synapse/homeserver-secrets.yaml b/app/matrix/lib/synapse/homeserver-secrets.yaml new file mode 100644 index 00000000..1f6221f4 --- /dev/null +++ b/app/matrix/lib/synapse/homeserver-secrets.yaml @@ -0,0 +1,22 @@ +## Common secrets ## +registration_shared_secret: "$(SYNAPSE_REGISTRATION_SHARED_SECRET)" +macaroon_secret_key: "$(SYNAPSE_MACAROON_SECRET_KEY)" +worker_replication_secret: "$(WORKER_REPLICATION_SECRET)" + +## Database ## +database: + name: "psycopg2" + args: + user: "synapse" + password: "$(POSTGRES_PASSWORD)" + database: "synapse" + host: "waw3-postgres" + port: "5432" + cp_min: 5 + cp_max: 10 + +## Replication Redis ## +redis: + enabled: true + host: "redis" + password: "$(REDIS_PASSWORD)"