// k0.hswaw.net kubernetes cluster // This defines the cluster as a single object. // Use the sibling k0*.jsonnet 'view' files to actually apply the configuration. local kube = import "../../kube/kube.libsonnet"; local policies = import "../../kube/policies.libsonnet"; local cluster = import "cluster.libsonnet"; local admitomatic = import "lib/admitomatic.libsonnet"; local cockroachdb = import "lib/cockroachdb.libsonnet"; local registry = import "lib/registry.libsonnet"; local rook = import "lib/rook.libsonnet"; { k0: { local k0 = self, cluster: cluster.Cluster("k0", "hswaw.net") { cfg+: { storageClassNameParanoid: k0.ceph.waw3Pools.blockRedundant.name, }, metallb+: { cfg+: { // Peer with calico running on same node. peers: [ { "peer-address": "127.0.0.1", "peer-asn": 65003, "my-asn": 65002, }, ], // Public IP address pools. Keep in sync with k0.calico.yaml. addressPools: [ { name: "public-v4-1", protocol: "bgp", addresses: [ "185.236.240.48/28", ], }, { name: "public-v4-2", protocol: "bgp", addresses: [ "185.236.240.112/28" ], }, ], }, }, }, // Docker registry registry: registry.Environment { cfg+: { domain: "registry.%s" % [k0.cluster.fqdn], storageClassName: k0.cluster.cfg.storageClassNameParanoid, objectStorageName: "waw-hdd-redundant-3-object", }, }, // CockroachDB, running on bc01n{01,02,03}. cockroach: { waw2: cockroachdb.Cluster("crdb-waw1") { cfg+: { topology: [ { name: "bc01n02", node: "bc01n02.hswaw.net" }, { name: "dcr01s22", node: "dcr01s22.hswaw.net" }, { name: "dcr01s24", node: "dcr01s24.hswaw.net" }, ], // Host path on SSD. hostPath: "/var/db/crdb-waw1", extraDNS: [ "crdb-waw1.hswaw.net", ], }, initJob:: null, }, clients: { cccampix: k0.cockroach.waw2.Client("cccampix"), cccampixDev: k0.cockroach.waw2.Client("cccampix-dev"), buglessDev: k0.cockroach.waw2.Client("bugless-dev"), sso: k0.cockroach.waw2.Client("sso"), herpDev: k0.cockroach.waw2.Client("herp-dev"), gitea: k0.cockroach.waw2.Client("gitea"), issues: k0.cockroach.waw2.Client("issues"), dns: k0.cockroach.waw2.Client("dns"), }, }, ceph: { // waw1 cluster - dead as of 2019/08/06, data corruption // waw2 cluster - dead as of 2021/01/22, torn down (horrible M610 RAID controllers are horrible) // waw3: 6TB SAS 3.5" HDDs, internal Rook cluster. // // Suffers from rook going apeshit and nuking all mons if enough of // a control plane is up for rook to run but if nodes are // unavailable to the point of it deciding that no mon exists and // it should create some new ones, fully nuking the monmap and // making recovery a pain. // // Supposedly new versions of Rook slowly fix these issues, but q3k // doesn't personally trust this codebase anymore. He'd rather // manage the actual Ceph cluster myself, we don't need all of this // magic. // // See: b.hswaw.net/6 waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") { spec: { mon: { count: 3, allowMultiplePerNode: false, }, resources: { osd: { requests: { cpu: "2", memory: "6G", }, limits: { cpu: "2", memory: "8G", }, }, }, storage: { useAllNodes: false, useAllDevices: false, config: { databaseSizeMB: "1024", journalSizeMB: "1024", }, nodes: [ { name: "dcr01s22.hswaw.net", devices: [ { name: "/dev/disk/by-id/wwn-0x" + id } for id in [ "5000c5008508c433", # ST6000NM0034 Z4D40QZR0000R629ME1B "5000c500850989cf", # ST6000NM0034 Z4D40JRL0000R63008A2 "5000c5008508baf7", # ST6000NM0034 Z4D40M380000R630V00M "5000c5008508f843", # ST6000NM0034 Z4D40LGP0000R630UVTD "5000c500850312cb", # ST6000NM0034 Z4D3ZAAX0000R629NW31 "5000c500850293e3", # ST6000NM0034 Z4D3Z5TD0000R629MF7P "5000c5008508e3ef", # ST6000NM0034 Z4D40LM50000R630V0W3 "5000c5008508e23f", # ST6000NM0034 Z4D40QMX0000R629MD3C ] ], }, { name: "dcr01s24.hswaw.net", devices: [ { name: "/dev/disk/by-id/wwn-0x" + id } for id in [ "5000c5008508c9ef", # ST6000NM0034 Z4D40LY40000R630UZCE "5000c5008508df33", # ST6000NM0034 Z4D40QQ00000R629MB25 "5000c5008508dd3b", # ST6000NM0034 Z4D40QQJ0000R630RBY6 "5000c5008509199b", # ST6000NM0034 Z4D40QG10000R630V0X9 "5000c5008508ee03", # ST6000NM0034 Z4D40LHH0000R630UYP0 "5000c50085046abf", # ST6000NM0034 Z4D3ZF1B0000R629NV9P "5000c5008502929b", # ST6000NM0034 Z4D3Z5WG0000R629MF14 ] ], }, ], }, benji:: { metadataStorageClass: "waw-hdd-redundant-3", encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0], pools: [ "waw-hdd-redundant-3", "waw-hdd-redundant-3-metadata", "waw-hdd-yolo-3", ], s3Configuration: { awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3", awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0], bucketName: "benji-k0-backups-waw3", endpointUrl: "https://s3.eu-central-1.wasabisys.com/", }, } }, }, waw3Pools: { // redundant block storage blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") { metadataReplicas: 2, spec: { failureDomain: "host", replicated: { size: 2, }, }, }, // q3k's personal pool, used externally from k8s. q3kRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-q3k-3") { metadataReplicas: 2, spec: { failureDomain: "host", replicated: { size: 2, }, }, }, object: { local poolSpec = { failureDomain: "host", replicated: { size: 2 }, }, realm: rook.S3ObjectRealm(k0.ceph.waw3, "hscloud"), zonegroup: rook.S3ObjectZoneGroup(self.realm, "eu"), // This is serving at object.ceph-waw3.hswaw.net, but // internally to Ceph it is known as // waw-hdd-redundant-3-object (name of radosgw zone). store: rook.S3ObjectStore(self.zonegroup, "waw-hdd-redundant-3-object") { cfg+: { // Override so that this radosgw serves on // object.ceph-{waw3,eu}.hswaw.net instead of // ceph-{waw-hdd-redundant-3-object,eu}. domainParts: [ "waw3", "eu", ], }, spec: { metadataPool: poolSpec, dataPool: poolSpec, }, }, }, }, // Clients for S3/radosgw storage. clients: { # Used for owncloud.hackerspace.pl, which for now lives on boston-packets.hackerspace.pl. nextcloudWaw3: kube.CephObjectStoreUser("nextcloud") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "nextcloud", }, }, # issues.hackerspace.pl (redmine) attachments bucket issuesWaw3: kube.CephObjectStoreUser("issues") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "issues", }, }, # matrix.hackerspace.pl media storage bucket matrixWaw3: kube.CephObjectStoreUser("matrix") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "matrix", }, }, # tape staging temporary storage tapeStaging: kube.CephObjectStoreUser("tape-staging") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "tape-staging", }, }, # nuke@hackerspace.pl's personal storage. nukePersonalWaw3: kube.CephObjectStoreUser("nuke-personal") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "nuke-personal", }, }, # patryk@hackerspace.pl's ArmA3 mod bucket. cz2ArmaModsWaw3: kube.CephObjectStoreUser("cz2-arma3mods") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "cz2-arma3mods", }, }, # implr's personal user implrSparkWaw3: kube.CephObjectStoreUser("implr") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "implr", }, }, # q3k's personal user q3kWaw3: kube.CephObjectStoreUser("q3k") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "q3k", }, }, # woju's personal user wojuWaw3: kube.CephObjectStoreUser("woju") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "woju", }, }, # cz3's (patryk@hackerspace.pl) personal user cz3Waw3: kube.CephObjectStoreUser("cz3") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "cz3", }, }, # informatic's personal user informaticWaw3: kube.CephObjectStoreUser("informatic") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "informatic", }, }, # mastodon qa and prod mastodonWaw3: { qa: kube.CephObjectStoreUser("mastodon-qa") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "mastodon-qa", }, }, prod: kube.CephObjectStoreUser("mastodon-prod") { metadata+: { namespace: "ceph-waw3", }, spec: { store: "waw-hdd-redundant-3-object", displayName: "mastodon-prod", }, }, }, }, }, # These are policies allowing for Insecure pods in some namespaces. # A lot of them are spurious and come from the fact that we deployed # these namespaces before we deployed the draconian PodSecurityPolicy # we have now. This should be fixed by setting up some more granular # policies, or fixing the workloads to not need some of the permission # bits they use, whatever those might be. # TODO(q3k): fix this? unnecessarilyInsecureNamespaces: [ policies.AllowNamespaceInsecure("ceph-waw3"), policies.AllowNamespaceInsecure("matrix"), policies.AllowNamespaceInsecure("registry"), policies.AllowNamespaceInsecure("internet"), # TODO(implr): restricted policy with CAP_NET_ADMIN and tuntap, but no full root policies.AllowNamespaceInsecure("implr-vpn"), ], # Admission controller that permits non-privileged users to manage # their namespaces without danger of hijacking important URLs. admitomatic: admitomatic.Environment { cfg+: { proto: { // Domains allowed in given namespaces. If a domain exists // anywhere, ingresses will only be permitted to be created // within namespaces in which it appears here. This works // the same way for wildcards, if a wildcard exists in this // list it blocks all unauthorized uses of that domain // elsewhere. // // See //cluster/admitomatic for more information. // // Or, tl;dr: // // If you do a wildcard CNAME onto the k0 ingress, you // should explicitly state *.your.name.com here. // // If you just want to protect your host from being // hijacked by other cluster users, you should also state // it here (either as a wildcard, or unary domains). allow_domain: [ { namespace: "covid-formity", dns: "covid19.hackerspace.pl" }, { namespace: "covid-formity", dns: "covid.hackerspace.pl" }, { namespace: "covid-formity", dns: "www.covid.hackerspace.pl" }, { namespace: "inventory", dns: "inventory.hackerspace.pl" }, { namespace: "ldapweb", dns: "profile.hackerspace.pl" }, { namespace: "devtools-prod", dns: "hackdoc.hackerspace.pl" }, { namespace: "devtools-prod", dns: "cs.hackerspace.pl" }, { namespace: "engelsystem-prod", dns: "engelsystem.hackerspace.pl" }, { namespace: "gerrit", dns: "gerrit.hackerspace.pl" }, { namespace: "gitea-prod", dns: "gitea.hackerspace.pl" }, { namespace: "hswaw-prod", dns: "*.hackerspace.pl" }, { namespace: "hswaw-prod", dns: "*.hswaw.net" }, { namespace: "internet", dns: "internet.hackerspace.pl" }, { namespace: "matrix", dns: "matrix.hackerspace.pl" }, { namespace: "onlyoffice-prod", dns: "office.hackerspace.pl" }, { namespace: "paperless", dns: "paperless.hackerspace.pl" }, { namespace: "redmine", dns: "issues.hackerspace.pl" }, { namespace: "redmine", dns: "b.hackerspace.pl" }, { namespace: "redmine", dns: "b.hswaw.net" }, { namespace: "redmine", dns: "xn--137h.hackerspace.pl" }, { namespace: "redmine", dns: "xn--137h.hswaw.net" }, { namespace: "speedtest", dns: "speedtest.hackerspace.pl" }, { namespace: "sso", dns: "sso.hackerspace.pl" }, { namespace: "mastodon-hackerspace-qa", dns: "social-qa-2.hackerspace.pl" }, { namespace: "mastodon-hackerspace-prod", dns: "social.hackerspace.pl" }, { namespace: "ceph-waw3", dns: "ceph-waw3.hswaw.net" }, { namespace: "ceph-waw3", dns: "object.ceph-waw3.hswaw.net" }, { namespace: "ceph-waw3", dns: "object.ceph-eu.hswaw.net" }, { namespace: "monitoring-global-k0", dns: "*.hswaw.net" }, { namespace: "registry", dns: "*.hswaw.net" }, // q3k's legacy namespace (pre-prodvider) { namespace: "q3k", dns: "*.q3k.org" }, { namespace: "personal-q3k", dns: "*.q3k.org" }, ], anything_goes_namespace: [ // sourcegraph ingress wants a config snippet to set a header. "devtools-prod", ], }, }, }, }, }