hscloud/cluster/kube/cluster.libsonnet
Serge Bazanski 4f0468fa26 cluster/kube: remove ceph diff against k0 production
This now has a zero diff against prod.

location fields in CephCluster.storage.nodes seem to have been removed
from the CRD at some point. Not sure how the CRUSH tree now gets
populated, but whatever, it's been working like this for a while
already. Same for CephObjectStore.gateway.type.

The Rook Operator has been zero-scaled for a while now due to b/6.

Change-Id: I30a836f273f4c1529f60fa9297c96b7aac412f59
2021-09-11 12:43:53 +00:00

230 lines
7.6 KiB
Text

# Common cluster configuration.
# This defines what Kubernetes resources are required to turn a bare k8s
# deployment into a fully working cluster.
# These assume that you're running on bare metal, and using the corresponding
# NixOS deployment that we do.
local kube = import "../../kube/kube.libsonnet";
local policies = import "../../kube/policies.libsonnet";
local calico = import "lib/calico.libsonnet";
local certmanager = import "lib/cert-manager.libsonnet";
local coredns = import "lib/coredns.libsonnet";
local identd = import "lib/identd.libsonnet";
local metallb = import "lib/metallb.libsonnet";
local metrics = import "lib/metrics.libsonnet";
local nginx = import "lib/nginx.libsonnet";
local prodvider = import "lib/prodvider.libsonnet";
local rook = import "lib/rook.libsonnet";
local pki = import "lib/pki.libsonnet";
{
Cluster(short, realm):: {
local cluster = self,
local cfg = cluster.cfg,
short:: short,
realm:: realm,
fqdn:: "%s.%s" % [cluster.short, cluster.realm],
cfg:: {
// Storage class used for internal services (like registry). This must
// be set to a valid storage class. This can either be a cloud provider class
// (when running on GKE &co) or a storage class created using rook.
storageClassNameRedundant: error "storageClassNameRedundant must be set",
},
// These are required to let the API Server contact kubelets.
crAPIServerToKubelet: kube.ClusterRole("system:kube-apiserver-to-kubelet") {
metadata+: {
annotations+: {
"rbac.authorization.kubernetes.io/autoupdate": "true",
},
labels+: {
"kubernetes.io/bootstrapping": "rbac-defaults",
},
},
rules: [
{
apiGroups: [""],
resources: ["nodes/%s" % r for r in [ "proxy", "stats", "log", "spec", "metrics" ]],
verbs: ["*"],
},
],
},
crbAPIServer: kube.ClusterRoleBinding("system:kube-apiserver") {
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
name: cluster.crAPIServerToKubelet.metadata.name,
},
subjects: [
{
apiGroup: "rbac.authorization.k8s.io",
kind: "User",
# A cluster API Server authenticates with a certificate whose CN is == to the FQDN of the cluster.
name: cluster.fqdn,
},
],
},
// This ClusterRole is bound to all humans that log in via prodaccess/prodvider/SSO.
// It should allow viewing of non-sensitive data for debugability and openness.
crViewer: kube.ClusterRole("system:viewer") {
rules: [
{
apiGroups: [""],
resources: [
"nodes",
"namespaces",
"pods",
"configmaps",
"services",
],
verbs: ["list"],
},
{
apiGroups: ["metrics.k8s.io"],
resources: [
"nodes",
"pods",
],
verbs: ["list"],
},
{
apiGroups: ["apps"],
resources: [
"statefulsets",
],
verbs: ["list"],
},
{
apiGroups: ["extensions"],
resources: [
"deployments",
"ingresses",
],
verbs: ["list"],
}
],
},
// This ClusterRole is applied (scoped to personal namespace) to all humans.
crFullInNamespace: kube.ClusterRole("system:admin-namespace") {
rules: [
{
apiGroups: ["", "extensions", "apps"],
resources: ["*"],
verbs: ["*"],
},
{
apiGroups: ["batch"],
resources: ["jobs", "cronjobs"],
verbs: ["*"],
},
{
apiGroups: ["networking.k8s.io"],
resources: ["ingresses"],
verbs: ["*"],
},
],
},
// This ClusterRoleBindings allows root access to cluster admins.
crbAdmins: kube.ClusterRoleBinding("system:admins") {
roleRef: {
apiGroup: "rbac.authorization.k8s.io",
kind: "ClusterRole",
name: "cluster-admin",
},
subjects: [
{
apiGroup: "rbac.authorization.k8s.io",
kind: "User",
name: user + "@hackerspace.pl",
} for user in [
"q3k",
"implr",
"informatic",
]
],
},
podSecurityPolicies: policies.Cluster {},
allowInsecureNamespaces: [
policies.AllowNamespaceInsecure("kube-system"),
policies.AllowNamespaceInsecure("metallb-system"),
],
// Allow all service accounts (thus all controllers) to create secure pods.
crbAllowServiceAccountsSecure: kube.ClusterRoleBinding("policy:allow-all-secure") {
roleRef_: cluster.podSecurityPolicies.secureRole,
subjects: [
{
kind: "Group",
apiGroup: "rbac.authorization.k8s.io",
name: "system:serviceaccounts",
}
],
},
// Calico network fabric
calico: calico.Environment {},
// CoreDNS for this cluster.
dns: coredns.Environment {
cfg+: {
cluster_domains: [
"cluster.local",
cluster.fqdn,
],
},
},
// Metrics Server
metrics: metrics.Environment {},
// Metal Load Balancer
metallb: metallb.Environment {},
// Main nginx Ingress Controller
nginx: nginx.Environment {},
// Cert-manager (Let's Encrypt, CA, ...)
certmanager: certmanager.Environment {},
issuer: kube.ClusterIssuer("letsencrypt-prod") {
spec: {
acme: {
server: "https://acme-v02.api.letsencrypt.org/directory",
email: "bofh@hackerspace.pl",
privateKeySecretRef: {
name: "letsencrypt-prod"
},
http01: {},
},
},
},
// Ident service
identd: identd.Environment {},
// Rook Ceph storage operator.
rook: rook.Operator {
operator+: {
spec+: {
replicas: 0,
},
},
},
// TLS PKI machinery (compatibility with mirko)
pki: pki.Environment(cluster.short, cluster.realm),
// Prodvider
prodvider: prodvider.Environment {
cfg+: {
apiEndpoint: "kubernetes.default.svc.%s" % [cluster.fqdn],
},
},
},
}