From 12f176c1eb68dca457b653a669615e0499708a44 Mon Sep 17 00:00:00 2001 From: Bartosz Stebel Date: Fri, 18 Jun 2021 13:12:41 +0200 Subject: [PATCH] calico 3.14 -> 1.15 Change-Id: I9eceaf26017e483235b97c8d08717d2750fabe25 Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/995 Reviewed-by: q3k --- WORKSPACE | 9 +++++ cluster/kube/lib/calico.libsonnet | 66 ++++++++++++++++--------------- cluster/tools/BUILD | 10 ++--- cluster/tools/calicoctl.sh | 2 +- 4 files changed, 47 insertions(+), 40 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 19e93041..2586f19d 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -327,3 +327,12 @@ filegroup( strip_prefix = "noVNC-1.3.0-beta", urls = ["https://github.com/novnc/noVNC/archive/refs/tags/v1.3.0-beta.tar.gz"], ) + +# temporarily use upstream binary, see //cluster/tools/BUILD +http_file( + name = "calicoctl_3_15", + downloaded_file_path = "calicoctl", + urls = ["https://github.com/projectcalico/calicoctl/releases/download/v3.15.5/calicoctl"], + sha256 = "f49e9e8d25108f7f22d5a51c756b2fe40cbe36347ad297e31a767376172f2845", + executable = True, +) diff --git a/cluster/kube/lib/calico.libsonnet b/cluster/kube/lib/calico.libsonnet index 1e2d5038..6a9b7999 100644 --- a/cluster/kube/lib/calico.libsonnet +++ b/cluster/kube/lib/calico.libsonnet @@ -23,11 +23,11 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat local cfg = env.cfg, cfg:: { namespace: "kube-system", - version: "v3.14.0", + version: "v3.15.5", imageController: "calico/kube-controllers:" + cfg.version, imageCNI: "calico/cni:" + cfg.version, imageNode: "calico/node:" + cfg.version, - // TODO(q3k): Separate etcd for calico + // TODO(implr): migrate calico from etcd to apiserver etcd: { endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)], ca: importstr "../../certs/ca-etcd.crt", @@ -135,16 +135,6 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat resources: ["nodes/status"], verbs: ["patch", "update"], }, - { - apiGroups: ["networking.k8s.io"], - resources: ["networkpolicies"], - verbs: ["watch", "list"], - }, - { - apiGroups: [""], - resources: ["pods", "namespaces", "serviceaccounts"], - verbs: ["watch", "list"], - }, { apiGroups: [""], resources: ["pods/status"], @@ -186,8 +176,8 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat controller: kube.Deployment("calico-kube-controllers") { metadata+: { namespace: cfg.namespace, - annotations+: { - "scheduler.alpha.kubernetes.io/critical-pod": "", + labels+: { + "k8s-app": "calico-kube-controllers", }, }, spec+: { @@ -195,14 +185,16 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat strategy: { type: "Recreate" }, template+: { spec+: { - hostNetwork: true, + nodeSelector: { + "kubernetes.io/os": "linux" + }, tolerations: [ { key: "CriticalAddonsOnly", operator: "Exists" }, + { key: "node-role.kubernetes.io/master", effect: "NoSchedule" }, ], serviceAccountName: env.saController.metadata.name, - volumes_: { - secrets: kube.SecretVolume(env.secrets), - }, + priorityClassName: "system-cluster-critical", + hostNetwork: true, containers_: { "calico-kube-controllers": kube.Container("calico-kube-controllers") { image: cfg.imageController, @@ -225,6 +217,9 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat }, }, }, + volumes_: { + secrets: kube.SecretVolume(env.secrets), + }, }, }, }, @@ -244,46 +239,49 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat nodeDaemon: kube.DaemonSet("calico-node") { metadata+: { namespace: cfg.namespace, + labels+: { + "k8s-app": "calico-node", + }, }, spec+: { template+: { - metadata+: { - annotations+: { - "scheduler.alpha.kubernetes.io/critical-pod": "", - }, - }, spec+: { + nodeSelector: { + "kubernetes.io/os": "linux" + }, hostNetwork: true, tolerations: [ + { effect: "NoSchedule", operator: "Exists" }, { key: "CriticalAddonsOnly", operator: "Exists" }, { effect: "NoExecute", operator: "Exists" }, - { effect: "NoSchedule", operator: "Exists" }, ], serviceAccountName: env.saNode.metadata.name, terminationGracePeriodSeconds: 0, + priorityClassName: "system-cluster-critical", volumes_: { + lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"), + var_run_calico: kube.HostPathVolume("/var/run/calico"), + var_lib_calico: kube.HostPathVolume("/var/lib/calico"), + xtables_lock: kube.HostPathVolume("/run/xtables.lock"), cni_bin: kube.HostPathVolume("/opt/cni/bin"), cni_config: kube.HostPathVolume("/opt/cni/conf"), secrets: kube.SecretVolume(env.secrets), - lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"), - xtables_lock: kube.HostPathVolume("/run/xtables.lock"), - var_run_calico: kube.HostPathVolume("/var/run/calico"), - var_lib_calico: kube.HostPathVolume("/var/lib/calico"), bird_cfg_template: kube.ConfigMapVolume(env.calicoMetallbBird), + # TODO flexvol-driver-host, policysync }, initContainers_: { installCNI: kube.Container("install-cni") { image: cfg.imageCNI, command: ["/install-cni.sh"], env_: { - ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"), CNI_CONF_NAME: "10-calico.conflist", CNI_NETWORK_CONFIG: kube.ConfigMapRef(env.cm, "cni_network_config"), + ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"), + CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"), + # TODO(implr) needed? CNI_CONF_ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"), CNI_CONF_ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"), CNI_CONF_ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"), - CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"), - CNI_NET_DIR: "/opt/cni/conf", SLEEP: "false", KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } }, }, @@ -292,6 +290,9 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat cni_config: { mountPath: "/host/etc/cni/net.d" }, secrets: { mountPath: env.cm.secretPrefix }, }, + securityContext: { + privileged: true, + }, }, }, containers_: { @@ -310,11 +311,12 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat IP_AUTODETECTION_METHOD: "can-reach=185.236.240.1", CALICO_IPV4POOL_IPIP: "Always", FELIX_IPINIPMTU: kube.ConfigMapRef(env.cm, "veth_mtu"), + FELIX_WIREGUARDMTU: kube.ConfigMapRef(env.cm, "veth_mtu"), CALICO_IPV4POOL_CIDR: "10.10.24.0/21", CALICO_DISABLE_FILE_LOGGING: "true", FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT", - FELIX_IPV6SUPPORT: "false", FELIX_LOGSEVERITYSCREEN: "info", + FELIX_IPV6SUPPORT: "false", FELIX_HEALTHENABLED: "true", FELIX_HEALTHHOST: "127.0.0.1", CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24", diff --git a/cluster/tools/BUILD b/cluster/tools/BUILD index a63245eb..e43a809a 100644 --- a/cluster/tools/BUILD +++ b/cluster/tools/BUILD @@ -13,17 +13,13 @@ copy_go_binary( visibility = ["//visibility:public"], ) -copy_go_binary( - name = "calicoctl.bin", - src = "@com_github_projectcalico_calicoctl//calicoctl:calicoctl", - visibility = ["//visibility:public"], -) - sh_binary( name = "calicoctl", srcs = ["calicoctl.sh"], data = [ - ":calicoctl.bin", + # it's not worth the effort to bazelify all intermediary versions of calicoctl + # just to use them once, so until we finish upgrading use upstream binaries + "@calicoctl_3_15//file", "//tools:secretstore", "//tools/hscloud", ], diff --git a/cluster/tools/calicoctl.sh b/cluster/tools/calicoctl.sh index 86e1097e..5f1390e6 100755 --- a/cluster/tools/calicoctl.sh +++ b/cluster/tools/calicoctl.sh @@ -16,7 +16,7 @@ function main() { $(hscloud::must_rlocation hscloud/tools/secretstore) decrypt "$ws/cluster/secrets/cipher/etcd-calico.key" "$ETCD_KEY_FILE" fi - "$(hscloud::must_rlocation hscloud/cluster/tools/calicoctl.bin)" "$@" + "$(hscloud::must_rlocation calicoctl_3_15/file/calicoctl)" "$@" } main "$@"