cluster: add ceph-waw3, move metallb to bgp

Change-Id: Iebf369f9a02e44be163ef4afc2e0f23c4b009898
This commit is contained in:
q3k 2019-11-01 18:43:45 +01:00
parent e67f6fec98
commit c33ebcc79f
4 changed files with 178 additions and 42 deletions

View file

@ -138,8 +138,10 @@ local Cluster(short, realm) = {
allowInsecureNamespaces: [
policies.AllowNamespaceInsecure("kube-system"),
policies.AllowNamespaceInsecure("metallb-system"),
# TODO(q3k): fix this?
policies.AllowNamespaceInsecure("ceph-waw2"),
policies.AllowNamespaceInsecure("ceph-waw3"),
policies.AllowNamespaceInsecure("matrix"),
policies.AllowNamespaceInsecure("registry"),
policies.AllowNamespaceInsecure("internet"),
@ -173,8 +175,28 @@ local Cluster(short, realm) = {
// Metal Load Balancer
metallb: metallb.Environment {
cfg+: {
peers: [
{
"peer-address": "185.236.240.33",
"peer-asn": 65001,
"my-asn": 65002,
},
],
addressPools: [
{ name: "public-v4-1", protocol: "layer2", addresses: ["185.236.240.50-185.236.240.63"] },
{
name: "public-v4-1",
protocol: "bgp",
addresses: [
"185.236.240.48/28",
],
},
{
name: "public-v4-2",
protocol: "bgp",
addresses: [
"185.236.240.112/28"
],
},
],
},
},
@ -231,7 +253,7 @@ local Cluster(short, realm) = {
local k0 = self,
cluster: Cluster("k0", "hswaw.net") {
cfg+: {
storageClassNameParanoid: k0.ceph.blockParanoid.name,
storageClassNameParanoid: k0.ceph.waw2Pools.blockParanoid.name,
},
},
cockroach: {
@ -302,41 +324,10 @@ local Cluster(short, realm) = {
}
},
},
// redundant block storage
blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
spec: {
failureDomain: "host",
erasureCoded: {
dataChunks: 2,
codingChunks: 1,
},
},
},
// paranoid block storage (3 replicas)
blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
spec: {
failureDomain: "host",
replicated: {
size: 3,
},
},
},
// yolo block storage (no replicas!)
blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
spec: {
failureDomain: "host",
replicated: {
size: 1,
},
},
},
objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
spec: {
metadataPool: {
failureDomain: "host",
replicated: { size: 3 },
},
dataPool: {
waw2Pools: {
// redundant block storage
blockRedundant: rook.ECBlockPool(k0.ceph.waw2, "waw-hdd-redundant-2") {
spec: {
failureDomain: "host",
erasureCoded: {
dataChunks: 2,
@ -344,6 +335,139 @@ local Cluster(short, realm) = {
},
},
},
// paranoid block storage (3 replicas)
blockParanoid: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-paranoid-2") {
spec: {
failureDomain: "host",
replicated: {
size: 3,
},
},
},
// yolo block storage (no replicas!)
blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw2, "waw-hdd-yolo-2") {
spec: {
failureDomain: "host",
replicated: {
size: 1,
},
},
},
objectRedundant: rook.S3ObjectStore(k0.ceph.waw2, "waw-hdd-redundant-2-object") {
spec: {
metadataPool: {
failureDomain: "host",
replicated: { size: 3 },
},
dataPool: {
failureDomain: "host",
erasureCoded: {
dataChunks: 2,
codingChunks: 1,
},
},
},
},
},
waw3: rook.Cluster(k0.cluster.rook, "ceph-waw3") {
spec: {
mon: {
count: 3,
allowMultiplePerNode: false,
},
storage: {
useAllNodes: false,
useAllDevices: false,
config: {
databaseSizeMB: "1024",
journalSizeMB: "1024",
},
nodes: [
{
name: "dcr01s22.hswaw.net",
location: "rack=dcr01 host=dcr01s22",
devices: [
// https://github.com/rook/rook/issues/1228
//{ name: "disk/by-id/wwan-0x" + wwan }
//for wwan in [
// "5000c5008508c433",
// "5000c500850989cf",
// "5000c5008508f843",
// "5000c5008508baf7",
//]
{ name: "sdn" },
{ name: "sda" },
{ name: "sdb" },
{ name: "sdc" },
],
},
{
name: "dcr01s24.hswaw.net",
location: "rack=dcr01 host=dcr01s22",
devices: [
// https://github.com/rook/rook/issues/1228
//{ name: "disk/by-id/wwan-0x" + wwan }
//for wwan in [
// "5000c5008508ee03",
// "5000c5008508c9ef",
// "5000c5008508df33",
// "5000c5008508dd3b",
//]
{ name: "sdm" },
{ name: "sda" },
{ name: "sdb" },
{ name: "sdc" },
],
},
],
},
benji:: {
metadataStorageClass: "waw-hdd-paranoid-3",
encryptionPassword: std.split((importstr "../secrets/plain/k0-benji-encryption-password"), '\n')[0],
pools: [
],
s3Configuration: {
awsAccessKeyId: "RPYZIROFXNLQVU2WJ4R3",
awsSecretAccessKey: std.split((importstr "../secrets/plain/k0-benji-secret-access-key"), '\n')[0],
bucketName: "benji-k0-backups-waw3",
endpointUrl: "https://s3.eu-central-1.wasabisys.com/",
},
}
},
},
waw3Pools: {
// redundant block storage
blockRedundant: rook.ECBlockPool(k0.ceph.waw3, "waw-hdd-redundant-3") {
metadataReplicas: 2,
spec: {
failureDomain: "host",
replicated: {
size: 2,
},
},
},
// yolo block storage (low usage, no host redundancy)
blockYolo: rook.ReplicatedBlockPool(k0.ceph.waw3, "waw-hdd-yolo-3") {
spec: {
failureDomain: "osd",
erasureCoded: {
dataChunks: 12,
codingChunks: 4,
},
},
},
objectRedundant: rook.S3ObjectStore(k0.ceph.waw3, "waw-hdd-redundant-3-object") {
spec: {
metadataPool: {
failureDomain: "host",
replicated: { size: 2 },
},
dataPool: {
failureDomain: "host",
replicated: { size: 2 },
},
},
},
},
},

View file

@ -25,9 +25,10 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
cfg:: {
namespace: "metallb-system",
namespaceCreate: true,
version:: "v0.7.3",
version:: "v0.8.3",
imageController: "metallb/controller:" + cfg.version,
imageSpeaker: "metallb/speaker:" + cfg.version,
//imageSpeaker: "metallb/speaker:" + cfg.version,
imageSpeaker: "derq3k/metallb-speaker:20191101-180123",
addressPools: error "addressPools must be set in config",
},
@ -76,6 +77,11 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
resources: ["services", "endpoints", "nodes"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["create", "patch"],
},
],
},
@ -165,6 +171,7 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
args: [ "--port=7472", "--config=config" ],
env_: {
METALLB_NODE_NAME: kube.FieldRef("spec.nodeName"),
METALLB_HOST: kube.FieldRef("status.hostIP"),
},
ports: [
{ name: "monitoring", containerPort: 7472 },
@ -174,7 +181,7 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
},
securityContext: {
allowPrivilegeEscalation: false,
capabilities: { drop: [ "all" ], add: [ "net_raw" ] },
capabilities: { drop: [ "all" ], add: [ "NET_ADMIN", "NET_RAW", "SYS_ADMIN" ] },
readOnlyRootFilesystem: true,
},
},
@ -192,6 +199,7 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
data: {
config: std.manifestYamlDoc({
"address-pools": cfg.addressPools,
"peers": cfg.peers,
}),
},
},

View file

@ -381,7 +381,7 @@ local policies = import "../../../kube/policies.libsonnet";
image: "ceph/ceph:v14.2.2-20190830",
allowUnsupported: true,
},
dataDirHostPath: "/var/lib/rook",
dataDirHostPath: if name == "ceph-waw2" then "/var/lib/rook" else "/var/lib/rook-%s" % [name],
dashboard: {
ssl: false,
enabled: true,
@ -709,6 +709,7 @@ local policies = import "../../../kube/policies.libsonnet";
ECBlockPool(cluster, name):: {
local pool = self,
name:: name,
metadataReplicas:: 3,
spec:: error "spec must be specified",
@ -721,7 +722,7 @@ local policies = import "../../../kube/policies.libsonnet";
spec: {
failureDomain: "host",
replicated: {
size: 3,
size: pool.metadataReplicas,
},
},
},

View file

@ -14,6 +14,9 @@ local kube = import "kube.libsonnet";
allowedCapabilities: ['*'],
volumes: ['*'],
hostNetwork: true,
hostPorts: [
{ max: 40000, min: 1 },
],
hostIPC: true,
hostPID: true,
runAsUser: {