calico 3.14 -> 1.15

Change-Id: I9eceaf26017e483235b97c8d08717d2750fabe25
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/995
Reviewed-by: q3k <q3k@hackerspace.pl>
changes/95/995/7
implr 2021-06-18 13:12:41 +02:00 committed by implr
parent e999b4f726
commit 12f176c1eb
4 changed files with 47 additions and 40 deletions

View File

@ -327,3 +327,12 @@ filegroup(
strip_prefix = "noVNC-1.3.0-beta", strip_prefix = "noVNC-1.3.0-beta",
urls = ["https://github.com/novnc/noVNC/archive/refs/tags/v1.3.0-beta.tar.gz"], urls = ["https://github.com/novnc/noVNC/archive/refs/tags/v1.3.0-beta.tar.gz"],
) )
# temporarily use upstream binary, see //cluster/tools/BUILD
http_file(
name = "calicoctl_3_15",
downloaded_file_path = "calicoctl",
urls = ["https://github.com/projectcalico/calicoctl/releases/download/v3.15.5/calicoctl"],
sha256 = "f49e9e8d25108f7f22d5a51c756b2fe40cbe36347ad297e31a767376172f2845",
executable = True,
)

View File

@ -23,11 +23,11 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
local cfg = env.cfg, local cfg = env.cfg,
cfg:: { cfg:: {
namespace: "kube-system", namespace: "kube-system",
version: "v3.14.0", version: "v3.15.5",
imageController: "calico/kube-controllers:" + cfg.version, imageController: "calico/kube-controllers:" + cfg.version,
imageCNI: "calico/cni:" + cfg.version, imageCNI: "calico/cni:" + cfg.version,
imageNode: "calico/node:" + cfg.version, imageNode: "calico/node:" + cfg.version,
// TODO(q3k): Separate etcd for calico // TODO(implr): migrate calico from etcd to apiserver
etcd: { etcd: {
endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)], endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)],
ca: importstr "../../certs/ca-etcd.crt", ca: importstr "../../certs/ca-etcd.crt",
@ -135,16 +135,6 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
resources: ["nodes/status"], resources: ["nodes/status"],
verbs: ["patch", "update"], verbs: ["patch", "update"],
}, },
{
apiGroups: ["networking.k8s.io"],
resources: ["networkpolicies"],
verbs: ["watch", "list"],
},
{
apiGroups: [""],
resources: ["pods", "namespaces", "serviceaccounts"],
verbs: ["watch", "list"],
},
{ {
apiGroups: [""], apiGroups: [""],
resources: ["pods/status"], resources: ["pods/status"],
@ -186,8 +176,8 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
controller: kube.Deployment("calico-kube-controllers") { controller: kube.Deployment("calico-kube-controllers") {
metadata+: { metadata+: {
namespace: cfg.namespace, namespace: cfg.namespace,
annotations+: { labels+: {
"scheduler.alpha.kubernetes.io/critical-pod": "", "k8s-app": "calico-kube-controllers",
}, },
}, },
spec+: { spec+: {
@ -195,14 +185,16 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
strategy: { type: "Recreate" }, strategy: { type: "Recreate" },
template+: { template+: {
spec+: { spec+: {
hostNetwork: true, nodeSelector: {
"kubernetes.io/os": "linux"
},
tolerations: [ tolerations: [
{ key: "CriticalAddonsOnly", operator: "Exists" }, { key: "CriticalAddonsOnly", operator: "Exists" },
{ key: "node-role.kubernetes.io/master", effect: "NoSchedule" },
], ],
serviceAccountName: env.saController.metadata.name, serviceAccountName: env.saController.metadata.name,
volumes_: { priorityClassName: "system-cluster-critical",
secrets: kube.SecretVolume(env.secrets), hostNetwork: true,
},
containers_: { containers_: {
"calico-kube-controllers": kube.Container("calico-kube-controllers") { "calico-kube-controllers": kube.Container("calico-kube-controllers") {
image: cfg.imageController, image: cfg.imageController,
@ -225,6 +217,9 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
}, },
}, },
}, },
volumes_: {
secrets: kube.SecretVolume(env.secrets),
},
}, },
}, },
}, },
@ -244,46 +239,49 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
nodeDaemon: kube.DaemonSet("calico-node") { nodeDaemon: kube.DaemonSet("calico-node") {
metadata+: { metadata+: {
namespace: cfg.namespace, namespace: cfg.namespace,
labels+: {
"k8s-app": "calico-node",
},
}, },
spec+: { spec+: {
template+: { template+: {
metadata+: {
annotations+: {
"scheduler.alpha.kubernetes.io/critical-pod": "",
},
},
spec+: { spec+: {
nodeSelector: {
"kubernetes.io/os": "linux"
},
hostNetwork: true, hostNetwork: true,
tolerations: [ tolerations: [
{ effect: "NoSchedule", operator: "Exists" },
{ key: "CriticalAddonsOnly", operator: "Exists" }, { key: "CriticalAddonsOnly", operator: "Exists" },
{ effect: "NoExecute", operator: "Exists" }, { effect: "NoExecute", operator: "Exists" },
{ effect: "NoSchedule", operator: "Exists" },
], ],
serviceAccountName: env.saNode.metadata.name, serviceAccountName: env.saNode.metadata.name,
terminationGracePeriodSeconds: 0, terminationGracePeriodSeconds: 0,
priorityClassName: "system-cluster-critical",
volumes_: { volumes_: {
lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"),
var_run_calico: kube.HostPathVolume("/var/run/calico"),
var_lib_calico: kube.HostPathVolume("/var/lib/calico"),
xtables_lock: kube.HostPathVolume("/run/xtables.lock"),
cni_bin: kube.HostPathVolume("/opt/cni/bin"), cni_bin: kube.HostPathVolume("/opt/cni/bin"),
cni_config: kube.HostPathVolume("/opt/cni/conf"), cni_config: kube.HostPathVolume("/opt/cni/conf"),
secrets: kube.SecretVolume(env.secrets), secrets: kube.SecretVolume(env.secrets),
lib_modules: kube.HostPathVolume("/run/current-system/kernel-modules/lib/modules"),
xtables_lock: kube.HostPathVolume("/run/xtables.lock"),
var_run_calico: kube.HostPathVolume("/var/run/calico"),
var_lib_calico: kube.HostPathVolume("/var/lib/calico"),
bird_cfg_template: kube.ConfigMapVolume(env.calicoMetallbBird), bird_cfg_template: kube.ConfigMapVolume(env.calicoMetallbBird),
# TODO flexvol-driver-host, policysync
}, },
initContainers_: { initContainers_: {
installCNI: kube.Container("install-cni") { installCNI: kube.Container("install-cni") {
image: cfg.imageCNI, image: cfg.imageCNI,
command: ["/install-cni.sh"], command: ["/install-cni.sh"],
env_: { env_: {
ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
CNI_CONF_NAME: "10-calico.conflist", CNI_CONF_NAME: "10-calico.conflist",
CNI_NETWORK_CONFIG: kube.ConfigMapRef(env.cm, "cni_network_config"), CNI_NETWORK_CONFIG: kube.ConfigMapRef(env.cm, "cni_network_config"),
ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
# TODO(implr) needed?
CNI_CONF_ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"), CNI_CONF_ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
CNI_CONF_ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"), CNI_CONF_ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
CNI_CONF_ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"), CNI_CONF_ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
CNI_NET_DIR: "/opt/cni/conf",
SLEEP: "false", SLEEP: "false",
KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } }, KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } },
}, },
@ -292,6 +290,9 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
cni_config: { mountPath: "/host/etc/cni/net.d" }, cni_config: { mountPath: "/host/etc/cni/net.d" },
secrets: { mountPath: env.cm.secretPrefix }, secrets: { mountPath: env.cm.secretPrefix },
}, },
securityContext: {
privileged: true,
},
}, },
}, },
containers_: { containers_: {
@ -310,11 +311,12 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
IP_AUTODETECTION_METHOD: "can-reach=185.236.240.1", IP_AUTODETECTION_METHOD: "can-reach=185.236.240.1",
CALICO_IPV4POOL_IPIP: "Always", CALICO_IPV4POOL_IPIP: "Always",
FELIX_IPINIPMTU: kube.ConfigMapRef(env.cm, "veth_mtu"), FELIX_IPINIPMTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
FELIX_WIREGUARDMTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
CALICO_IPV4POOL_CIDR: "10.10.24.0/21", CALICO_IPV4POOL_CIDR: "10.10.24.0/21",
CALICO_DISABLE_FILE_LOGGING: "true", CALICO_DISABLE_FILE_LOGGING: "true",
FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT", FELIX_DEFAULTENDPOINTTOHOSTACTION: "ACCEPT",
FELIX_IPV6SUPPORT: "false",
FELIX_LOGSEVERITYSCREEN: "info", FELIX_LOGSEVERITYSCREEN: "info",
FELIX_IPV6SUPPORT: "false",
FELIX_HEALTHENABLED: "true", FELIX_HEALTHENABLED: "true",
FELIX_HEALTHHOST: "127.0.0.1", FELIX_HEALTHHOST: "127.0.0.1",
CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24", CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24",

View File

@ -13,17 +13,13 @@ copy_go_binary(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
copy_go_binary(
name = "calicoctl.bin",
src = "@com_github_projectcalico_calicoctl//calicoctl:calicoctl",
visibility = ["//visibility:public"],
)
sh_binary( sh_binary(
name = "calicoctl", name = "calicoctl",
srcs = ["calicoctl.sh"], srcs = ["calicoctl.sh"],
data = [ data = [
":calicoctl.bin", # it's not worth the effort to bazelify all intermediary versions of calicoctl
# just to use them once, so until we finish upgrading use upstream binaries
"@calicoctl_3_15//file",
"//tools:secretstore", "//tools:secretstore",
"//tools/hscloud", "//tools/hscloud",
], ],

View File

@ -16,7 +16,7 @@ function main() {
$(hscloud::must_rlocation hscloud/tools/secretstore) decrypt "$ws/cluster/secrets/cipher/etcd-calico.key" "$ETCD_KEY_FILE" $(hscloud::must_rlocation hscloud/tools/secretstore) decrypt "$ws/cluster/secrets/cipher/etcd-calico.key" "$ETCD_KEY_FILE"
fi fi
"$(hscloud::must_rlocation hscloud/cluster/tools/calicoctl.bin)" "$@" "$(hscloud::must_rlocation calicoctl_3_15/file/calicoctl)" "$@"
} }
main "$@" main "$@"