2019-01-13 21:06:33 +00:00
|
|
|
# Top level cluster configuration.
|
|
|
|
|
|
|
|
local kube = import "../../kube/kube.libsonnet";
|
2019-01-13 23:02:59 +00:00
|
|
|
local coredns = import "lib/coredns.libsonnet";
|
2019-01-17 17:57:19 +00:00
|
|
|
local metrics = import "lib/metrics.libsonnet";
|
|
|
|
local calico = import "lib/calico.libsonnet";
|
2019-01-18 08:40:59 +00:00
|
|
|
local metallb = import "lib/metallb.libsonnet";
|
2019-04-01 15:56:28 +00:00
|
|
|
local nginx = import "lib/nginx.libsonnet";
|
2019-04-01 16:40:50 +00:00
|
|
|
local rook = import "lib/rook.libsonnet";
|
2019-04-02 11:20:15 +00:00
|
|
|
local certmanager = import "lib/cert-manager.libsonnet";
|
2019-01-13 21:06:33 +00:00
|
|
|
|
|
|
|
local Cluster(fqdn) = {
|
|
|
|
local cluster = self,
|
|
|
|
|
|
|
|
// These are required to let the API Server contact kubelets.
|
|
|
|
crAPIServerToKubelet: kube.ClusterRole("system:kube-apiserver-to-kubelet") {
|
|
|
|
metadata+: {
|
|
|
|
annotations+: {
|
|
|
|
"rbac.authorization.kubernetes.io/autoupdate": "true",
|
|
|
|
},
|
|
|
|
labels+: {
|
|
|
|
"kubernets.io/bootstrapping": "rbac-defaults",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
rules: [
|
|
|
|
{
|
|
|
|
apiGroups: [""],
|
|
|
|
resources: ["nodes/%s" % r for r in [ "proxy", "stats", "log", "spec", "metrics" ]],
|
|
|
|
verbs: ["*"],
|
|
|
|
},
|
|
|
|
],
|
|
|
|
},
|
2019-01-13 21:08:05 +00:00
|
|
|
crbAPIServer: kube.ClusterRoleBinding("system:kube-apiserver") {
|
2019-01-13 21:06:33 +00:00
|
|
|
roleRef: {
|
|
|
|
apiGroup: "rbac.authorization.k8s.io",
|
|
|
|
kind: "ClusterRole",
|
|
|
|
name: cluster.crAPIServerToKubelet.metadata.name,
|
|
|
|
},
|
|
|
|
subjects: [
|
|
|
|
{
|
|
|
|
apiGroup: "rbac.authorization.k8s.io",
|
|
|
|
kind: "User",
|
|
|
|
# A cluster API Server authenticates with a certificate whose CN is == to the FQDN of the cluster.
|
|
|
|
name: fqdn,
|
|
|
|
},
|
|
|
|
],
|
2019-01-13 23:02:59 +00:00
|
|
|
},
|
|
|
|
|
2019-01-17 17:57:19 +00:00
|
|
|
// Calico network fabric
|
|
|
|
calico: calico.Environment {},
|
2019-01-13 23:02:59 +00:00
|
|
|
// CoreDNS for this cluster.
|
2019-01-17 17:57:19 +00:00
|
|
|
dns: coredns.Environment {},
|
|
|
|
// Metrics Server
|
|
|
|
metrics: metrics.Environment {},
|
2019-01-18 08:40:59 +00:00
|
|
|
// Metal Load Balancer
|
2019-04-01 16:00:44 +00:00
|
|
|
metallb: metallb.Environment {
|
|
|
|
cfg+: {
|
|
|
|
addressPools: [
|
|
|
|
{ name: "public-v4-1", protocol: "layer2", addresses: ["185.236.240.50-185.236.240.63"] },
|
|
|
|
],
|
|
|
|
},
|
|
|
|
},
|
2019-04-01 15:56:28 +00:00
|
|
|
// Main nginx Ingress Controller
|
|
|
|
nginx: nginx.Environment {},
|
2019-04-02 11:20:15 +00:00
|
|
|
certmanager: certmanager.Environment {},
|
2019-04-02 12:44:04 +00:00
|
|
|
issuer: certmanager.ClusterIssuer("letsencrypt-prod") {
|
|
|
|
spec: {
|
|
|
|
acme: {
|
|
|
|
server: "https://acme-v02.api.letsencrypt.org/directory",
|
|
|
|
email: "bofh@hackerspace.pl",
|
|
|
|
privateKeySecretRef: {
|
|
|
|
name: "letsencrypt-prod"
|
|
|
|
},
|
|
|
|
http01: {},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-01 22:06:13 +00:00
|
|
|
|
2019-04-01 16:40:50 +00:00
|
|
|
// Rook Ceph storage
|
2019-04-01 22:06:13 +00:00
|
|
|
rook: rook.Operator {},
|
|
|
|
// waw1 ceph cluster
|
|
|
|
cephWaw1: rook.Cluster(cluster.rook, "ceph-waw1") {
|
|
|
|
spec: {
|
|
|
|
mon: {
|
|
|
|
count: 3,
|
|
|
|
allowMultiplePerNode: false,
|
|
|
|
},
|
|
|
|
storage: {
|
|
|
|
useAllNodes: false,
|
|
|
|
useAllDevices: false,
|
|
|
|
config: {
|
|
|
|
databaseSizeMB: "1024",
|
|
|
|
journalSizeMB: "1024",
|
|
|
|
},
|
|
|
|
nodes: [
|
|
|
|
{
|
|
|
|
name: "bc01n01.hswaw.net",
|
|
|
|
location: "rack=dcr01 chassis=bc01 host=bc01n01",
|
|
|
|
devices: [ { name: "sda" } ],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "bc01n02.hswaw.net",
|
|
|
|
location: "rack=dcr01 chassis=bc01 host=bc01n02",
|
|
|
|
devices: [ { name: "sda" } ],
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "bc01n03.hswaw.net",
|
|
|
|
location: "rack=dcr01 chassis=bc01 host=bc01n03",
|
|
|
|
devices: [ { name: "sda" } ],
|
|
|
|
},
|
|
|
|
],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-01 23:05:38 +00:00
|
|
|
// redundant block storage
|
|
|
|
cephWaw1Redundant: rook.ECBlockPool(cluster.cephWaw1, "waw-hdd-redundant-1") {
|
|
|
|
spec: {
|
|
|
|
failureDomain: "host",
|
|
|
|
erasureCoded: {
|
|
|
|
dataChunks: 2,
|
|
|
|
codingChunks: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-05-17 16:08:48 +00:00
|
|
|
// yolo block storage (no replicas!)
|
|
|
|
cephWaw1Yolo: rook.ReplicatedBlockPool(cluster.cephWaw1, "waw-hdd-yolo-1") {
|
|
|
|
spec: {
|
|
|
|
failureDomain: "host",
|
|
|
|
replicated: {
|
|
|
|
size: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-04-09 19:45:32 +00:00
|
|
|
cephWaw1Object: rook.S3ObjectStore(cluster.cephWaw1, "waw-hdd-redundant-1-object") {
|
2019-04-07 16:49:41 +00:00
|
|
|
spec: {
|
|
|
|
metadataPool: {
|
|
|
|
failureDomain: "host",
|
|
|
|
replicated: { size: 3 },
|
|
|
|
},
|
|
|
|
dataPool: {
|
|
|
|
failureDomain: "host",
|
|
|
|
erasureCoded: {
|
|
|
|
dataChunks: 2,
|
|
|
|
codingChunks: 1,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-01-13 21:06:33 +00:00
|
|
|
};
|
|
|
|
|
2019-01-13 23:02:59 +00:00
|
|
|
|
2019-01-13 21:06:33 +00:00
|
|
|
{
|
|
|
|
k0: Cluster("k0.hswaw.net"),
|
|
|
|
}
|