1
0
Fork 0

calico: upgrade to 3.14, fix calicoctl

We still use etcd as the data store (and as such didn't set up k8s CRDs
for Calico), but that's okay for now.

Change-Id: If6d66f505c6b40f2646ffae7d33d0d641d34a963
master
q3k 2020-05-28 16:38:52 +02:00
parent d13df642c5
commit d81bf72d7f
2 changed files with 59 additions and 17 deletions

View File

@ -23,10 +23,10 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
local cfg = env.cfg,
cfg:: {
namespace: "kube-system",
version: "v3.4.0",
imageController: "quay.io/calico/kube-controllers:" + cfg.version,
imageCNI: "quay.io/calico/cni:" + cfg.version,
imageNode: "quay.io/calico/node:" + cfg.version,
version: "v3.14.0",
imageController: "calico/kube-controllers:" + cfg.version,
imageCNI: "calico/cni:" + cfg.version,
imageNode: "calico/node:" + cfg.version,
// TODO(q3k): Separate etcd for calico
etcd: {
endpoints: ["https://bc01n%02d.hswaw.net:2379" % n for n in std.range(1, 3)],
@ -54,10 +54,15 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
calico_backend: "bird",
veth_mtu: "1440",
typha_service_name: "none",
# Existing nodes are already named without an FQDN (just the local, before .hswaw.net part),
# future ones will hopefully use the full FQDN instead.
# At some point, we might want to port existing calico nodes to their full FQDN instead.
cni_network_config: |||
{
"name": "k8s-pod-network",
"cniVersion": "0.3.0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
@ -66,6 +71,8 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
"etcd_key_file": "__ETCD_KEY_FILE__",
"etcd_cert_file": "__ETCD_CERT_FILE__",
"etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__",
"datastore_type": "etcdv3",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
@ -81,6 +88,10 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
},
{
"type": "bandwidth",
"capabilities": {"bandwidth": true}
}
]
}
@ -116,13 +127,38 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
{
apiGroups: [""],
resources: ["endpoints", "services"],
verbs: ["watch", "list"],
verbs: ["watch", "list", "get"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get"],
},
{
apiGroups: [""],
resources: ["nodes/status"],
verbs: ["patch", "update"],
},
{
apiGroups: ["networking.k8s.io"],
resources: ["networkpolicies"],
verbs: ["watch", "list"],
},
{
apiGroups: [""],
resources: ["pods", "namespaces", "serviceaccounts"],
verbs: ["watch", "list"],
},
{
apiGroups: [""],
resources: ["pods/status"],
verbs: ["patch"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get", "list", "watch"],
},
],
},
@ -138,8 +174,13 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
rules: [
{
apiGroups: [""],
resources: ["pods", "nodes", "namespaces", "serviceaccounts"],
verbs: ["watch", "list"],
resources: ["nodes"],
verbs: ["watch", "list", "get"],
},
{
apiGroups: [""],
resources: ["pods"],
verbs: ["get"],
},
{
apiGroups: ["networking.k8s.io"],
@ -241,6 +282,7 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
CNI_MTU: kube.ConfigMapRef(env.cm, "veth_mtu"),
CNI_NET_DIR: "/opt/cni/conf",
SLEEP: "false",
KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } },
},
volumeMounts_: {
cni_bin: { mountPath: "/host/opt/cni/bin" },
@ -253,12 +295,13 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
calicoNode: kube.Container("calico-node") {
image: cfg.imageNode,
env_: {
DATASTORE_TYPE: "etcdv3",
ETCD_ENDPOINTS: kube.ConfigMapRef(env.cm, "etcd_endpoints"),
ETCD_CA_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_ca"),
ETCD_KEY_FILE: kube.ConfigMapRef(env.cm, "etcd_key"),
ETCD_CERT_FILE: kube.ConfigMapRef(env.cm, "etcd_cert"),
CALICO_K8S_NODE_REF: kube.FieldRef("spec.nodeName"),
CALICO_NETWORK_BACKEND: kube.ConfigMapRef(env.cm, "calico_backend"),
CALICO_NETWORKING_BACKEND: kube.ConfigMapRef(env.cm, "calico_backend"),
CLUSTER_TYPE: "k8s,bgp",
IP: "autodetect",
IP_AUTODETECTION_METHOD: "can-reach=185.236.240.1",
@ -272,6 +315,7 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
FELIX_HEALTHENABLED: "true",
FELIX_HEALTHHOST: "127.0.0.1",
CALICO_ADVERTISE_CLUSTER_IPS: "10.10.12.0/24",
KUBERNETES_NODE_NAME: { fieldRef: { fieldPath: "spec.nodeName" } },
},
securityContext: {
privileged: true,
@ -280,10 +324,8 @@ local bindServiceAccountClusterRole(sa, cr) = kube.ClusterRoleBinding(cr.metadat
requests: { cpu: "250m" },
},
livenessProbe: {
httpGet: {
path: "/liveness",
port: 9099,
host: "127.0.0.1",
exec: {
command: ["/bin/calico-node", "-bird-live", "-felix-live"],
},
periodSeconds: 10,
initialDelaySeconds: 10,

View File

@ -8,12 +8,12 @@ if [ -z "$hscloud_root" ]; then
fi
ETCD_ENDPOINTS="https://bc01n01.hswaw.net:2379,https://bc01n01.hswaw.net:2379,https://bc01n01.hswaw.net:2379"
ETCD_KEY_FILE="$hscloud_root/cluster/secrets/plain/kube-calico.key"
ETCD_CERT_FILE="$hscloud_root/cluster/certs/kube-calico.crt"
ETCD_CA_CERT_FILE="$hscloud_root/cluster/certs/ca.crt"
ETCD_KEY_FILE="$hscloud_root/cluster/secrets/plain/etcd-calico.key"
ETCD_CERT_FILE="$hscloud_root/cluster/certs/etcd-calico.cert"
ETCD_CA_CERT_FILE="$hscloud_root/cluster/certs/ca-etcd.crt"
if [ ! -f "$ETCD_KEY_FILE" ] ; then
secretstore decrypt "$hscloud_root/cluster/secrets/cipher/kube-calico.key" > "$ETCD_KEY_FILE"
secretstore decrypt "$hscloud_root/cluster/secrets/cipher/etcd-calico.key" > "$ETCD_KEY_FILE"
fi
export ETCD_ENDPOINTS