forked from hswaw/hscloud
246 lines
9.4 KiB
Nix
246 lines
9.4 KiB
Nix
{ config, pkgs, lib, ... }:
|
|
|
|
with ( import ./toplevel.nix );
|
|
let
|
|
fqdn = config.networking.hostName + domain;
|
|
node = (builtins.head (builtins.filter (n: n.fqdn == fqdn) nodes));
|
|
otherNodes = (builtins.filter (n: n.fqdn != fqdn) nodes);
|
|
|
|
# Pin for k8s packages. This is so that upagrading the system will not upgrade the k8s control or data planes.
|
|
k8spkgs = import (fetchGit {
|
|
name = "nixos-unstable-2019-04-12";
|
|
url = https://github.com/nixos/nixpkgs/;
|
|
rev = "1fc591f9a5bd1b016b5d66dfab29560073955a14";
|
|
}) {};
|
|
|
|
|
|
in rec {
|
|
imports =
|
|
[ # Include the results of the hardware scan.
|
|
./hardware-configuration.nix
|
|
];
|
|
|
|
# Use the GRUB 2 boot loader.
|
|
boot.loader.grub.enable = true;
|
|
boot.loader.grub.version = 2;
|
|
boot.loader.grub.device = node.diskBoot;
|
|
|
|
boot.kernelPackages = pkgs.linuxPackages_5_1;
|
|
boot.kernelParams = [ "boot.shell_on_fail" ];
|
|
|
|
time.timeZone = "Europe/Warsaw";
|
|
|
|
# List packages installed in system profile. To search, run:
|
|
# $ nix search wget
|
|
environment.systemPackages = with pkgs; [
|
|
wget vim htop tcpdump
|
|
rxvt_unicode.terminfo
|
|
];
|
|
|
|
# Some programs need SUID wrappers, can be configured further or are
|
|
# started in user sessions.
|
|
programs.mtr.enable = true;
|
|
|
|
# List services that you want to enable:
|
|
virtualisation.docker.enable = true;
|
|
virtualisation.docker.extraOptions = "--iptables=false --ip-masq=false --ip-forward=true";
|
|
|
|
# Docker 1.13 sets iptables FORWARD to DROP. Unfuck this.
|
|
systemd.services."docker-iptables-unfuck" = {
|
|
enable = true;
|
|
wantedBy = [ "kubernetes.target" ];
|
|
description = "Docker iptable Unfuck";
|
|
after = [ "docker.service" ];
|
|
requires = [ "docker.service" ];
|
|
path = [ pkgs.iptables ];
|
|
script = ''
|
|
iptables -P FORWARD ACCEPT
|
|
'';
|
|
serviceConfig.Type = "oneshot";
|
|
};
|
|
# Otherwise fetchGit nixpkgs pin fails.
|
|
systemd.services.nixos-upgrade.path = [ pkgs.git ];
|
|
|
|
# Enable the OpenSSH daemon.
|
|
services.openssh.enable = true;
|
|
users.users.root.openssh.authorizedKeys.keys = [
|
|
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDD4VJXAXEHEXZk2dxNwehneuJcEGkfXG/U7z4fO79vDVIENdedtXQUyLyhZJc5RTEfHhQj66FwIqzl7mzBHd9x9PuDp6QAYXrkVNMj48s6JXqZqBvF6H/weRqFMf4a2TZv+hG8D0kpvmLheCwWAVRls7Jofnp/My+yDd57GMdsbG/yFEf6WPMiOnA7hxdSJSVihCsCSw2p8PD4GhBe8CVt7xIuinhutjm9zYBjV78NT8acjDUfJh0B1ODTjs7nuW1CC4jybSe2j/OU3Yczj4AxRxBNWuFxUq+jBo9BfpbKLh+Tt7re+zBkaicM77KM/oV6943JJxgHNBBOsv9scZE7 q3k@amnesia"
|
|
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQb3YQoiYFZLKwvHYKbu1bMqzNeDCAszQhAe1+QI5SLDOotclyY/vFmOReZOsmyMFl71G2d7d+FbYNusUnNNjTxRYQ021tVc+RkMdLJaORRURmQfEFEKbai6QSFTwErXzuoIzyEPK0lbsQuGgqT9WaVnRzHJ2Q/4+qQbxAS34PuR5NqEkmn4G6LMo3OyJ5mwPkCj9lsqz4BcxRaMWFO3mNcwGDfSW+sqgc3E8N6LKrTpZq3ke7xacpQmcG5DU9VO+2QVPdltl9jWbs3gXjmF92YRNOuKPVfAOZBBsp8JOznfx8s9wDgs7RwPmDpjIAJEyoABqW5hlXfqRbTnfnMvuR informatic@InformaticPC"
|
|
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDGkMgEVwQM8yeuFUYL2TwlJIq9yUNBmHnwce46zeL2PK2CkMz7sxT/om7sp/K5XDiqeD05Nioe+Dr3drP6B8uI33S5NgxPIfaqQsRS+CBEgk6cqFlcdlKETU/DT+/WsdoO173n7mgGeafPInEuQuGDUID0Fl099kIxtqfAhdeZFMM6/szAZEZsElLJ8K6dp1Ni/jmnXCZhjivZH3AZUlnqrmtDG7FY1bgcOfDXAal45LItughGPtrdiigXe9DK2fW3+9DBZZduh5DMJTNlphAZ+nfSrbyHVKUg6WsgMSprur4KdU47q1QwzqqvEj75JcdP1jOWoZi4F6VJDte9Wb9lhD1jGgjxY9O6Gs4CH35bx15W7CN9hgNa0C8NbPJe/fZYIeMZmJ1m7O2xmnYwP8j+t7RNJWu7Pa3Em4mOEXvhBF07Zfq+Ye/4SluoRgADy5eII2x5fFo5EBhInxK0/X8wF6XZvysalVifoCh7T4Edejoi91oAxFgYAxbboXGlod0eEHIi2hla8SM9+IBHOChmgawKBYp2kzAJyAmHNBF+Pah9G4arVCj/axp/SJZDZbJQoI7UT/fJzEtvlb5RWrHXRq+y6IvjpUq4pzpDWW04+9UMqEEXRmhWOakHfEVM9rN8h3aJBflLUBBnh0Z/hVsKNh8bCRHaKtah8TrD9i+wMw== patryk.jakuszew@gmail.com"
|
|
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC33naG1ptCvUcRWX9cj9wXM1nW1lyQC4SvMJzWlr9aMD96O8hQ2JMkuIUgUJvorAY02QRplQ2BuoVoVkdkzwjMyi1bL3OdgcKo7Z1yByClGTTocqNJYY0lcUb6EJH8+6e6F9ydrQlSxNzL1uCaA7phZr+yPcmAmWbSfioXn98yXNkE0emHxzJv/nypJY56sDCMC2IXDRd8L2goDtPwgPEW7bWfAQdIFMJ75xOidZOTxJ8eqyXLw/kxY5UlyX66jdoYz1sE5XUHuoQl1AOG9UdlMo0aMhUvP4pX5l7r7EnA9OttKMFB3oWqkVK/R6ynZ52YNOU5BZ9V+Ppaj34W0xNu+p0mbHcCtXYCTrf/OU0hcZDbDaNTjs6Vtcm2wYw9iAKX7Tex+eOMwUwlrlcyPNRV5BTot7lGNYfauHCSIuWJKN4NhCLR/NtVNh4/94eKkPTwJsY6XqDcS7q49wPAs4DAH7BJgsbHPOqygVHrY0YYEfz3Pj0HTxJHQMCP/hQX4fXEGt0BjgoVJbXPAQtPyeg0JuxiUg+b4CgVVfQ6R060MlM1BZzhmh+FY5MJH6nJppS0aHYCvSg8Z68NUlCPKy0jpcyfuAIWQWwSGG1O010WShQG2ELsvNdg5/4HVdCGNl5mmoom6JOd72FOZyQlHDFfeQUQRn9HOeCq/c51rK99SQ== bartek@IHM"
|
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAICTR292kx/2CNuWYIsZ6gykQ036aBGrmheIuZa6S1D2x implr@thonk"
|
|
];
|
|
|
|
networking.firewall.enable = false;
|
|
|
|
# Point k8s apiserver address at ourselves, as every node runs an apiserver with this cert name.
|
|
networking.extraHosts = ''
|
|
127.0.0.1 ${k8sapi}
|
|
'';
|
|
|
|
security.acme.certs = {
|
|
host = {
|
|
email = acmeEmail;
|
|
domain = fqdn;
|
|
webroot = services.nginx.virtualHosts.host.root;
|
|
};
|
|
};
|
|
|
|
services.nginx = {
|
|
enable = true;
|
|
virtualHosts.host = {
|
|
serverName = fqdn;
|
|
root = "/var/www/${fqdn}";
|
|
};
|
|
};
|
|
|
|
services.etcd = {
|
|
enable = true;
|
|
name = fqdn;
|
|
listenClientUrls = ["https://0.0.0.0:2379"];
|
|
advertiseClientUrls = ["https://${fqdn}:2379"];
|
|
listenPeerUrls = ["https://0.0.0.0:2380"];
|
|
initialAdvertisePeerUrls = ["https://${fqdn}:2380"];
|
|
initialCluster = (map (n: "${n.fqdn}=https://${n.fqdn}:2380") nodes);
|
|
|
|
clientCertAuth = true;
|
|
trustedCaFile = pki.etcd.server.ca;
|
|
certFile = pki.etcd.server.cert;
|
|
keyFile = pki.etcd.server.key;
|
|
|
|
peerClientCertAuth = true;
|
|
peerTrustedCaFile = pki.etcdPeer.ca;
|
|
peerCertFile = pki.etcdPeer.cert;
|
|
peerKeyFile = pki.etcdPeer.key;
|
|
|
|
extraConf = {
|
|
PEER_CLIENT_CERT_AUTH = "true";
|
|
};
|
|
};
|
|
|
|
services.kubernetes = {
|
|
# Pin to specific k8s package.
|
|
package = k8spkgs.kubernetes;
|
|
roles = []; # We do not use any nixpkgs predefined roles for k8s. Instead,
|
|
# we enable k8s components manually.
|
|
|
|
caFile = pki.kube.apiserver.ca;
|
|
clusterCidr = "10.10.16.0/20";
|
|
|
|
path = [ pkgs.e2fsprogs ]; # kubelet wants to mkfs.ext4 when mounting pvcs
|
|
|
|
addons.dns.enable = false;
|
|
|
|
apiserver = rec {
|
|
enable = true;
|
|
insecurePort = ports.k8sAPIServerPlain;
|
|
securePort = ports.k8sAPIServerSecure;
|
|
advertiseAddress = "${node.ipAddr}";
|
|
|
|
etcd = {
|
|
servers = (map (n: "https://${n.fqdn}:2379") nodes);
|
|
caFile = pki.etcd.kube.ca;
|
|
keyFile = pki.etcd.kube.key;
|
|
certFile = pki.etcd.kube.cert;
|
|
};
|
|
|
|
tlsCertFile = pki.kube.apiserver.cert;
|
|
tlsKeyFile = pki.kube.apiserver.key;
|
|
|
|
clientCaFile = pki.kube.apiserver.ca;
|
|
|
|
kubeletHttps = true;
|
|
kubeletClientCaFile = pki.kube.apiserver.ca;
|
|
kubeletClientCertFile = pki.kube.apiserver.cert;
|
|
kubeletClientKeyFile = pki.kube.apiserver.key;
|
|
|
|
serviceAccountKeyFile = pki.kube.serviceaccounts.key;
|
|
|
|
allowPrivileged = true;
|
|
serviceClusterIpRange = "10.10.12.0/24";
|
|
runtimeConfig = "api/all,authentication.k8s.io/v1beta1";
|
|
authorizationMode = ["Node" "RBAC"];
|
|
enableAdmissionPlugins = ["Initializers" "NamespaceLifecycle" "NodeRestriction" "LimitRanger" "ServiceAccount" "DefaultStorageClass" "ResourceQuota"];
|
|
extraOpts = ''
|
|
--apiserver-count=3 \
|
|
--proxy-client-cert-file=${pki.kubeFront.apiserver.cert} \
|
|
--proxy-client-key-file=${pki.kubeFront.apiserver.key} \
|
|
--requestheader-allowed-names= \
|
|
--requestheader-client-ca-file=${pki.kubeFront.apiserver.ca} \
|
|
--requestheader-extra-headers-prefix=X-Remote-Extra- \
|
|
--requestheader-group-headers=X-Remote-Group \
|
|
--requestheader-username-headers=X-Remote-User \
|
|
-v=5
|
|
'';
|
|
};
|
|
|
|
controllerManager = {
|
|
enable = true;
|
|
bindAddress = "0.0.0.0";
|
|
insecurePort = ports.k8sControllerManagerPlain;
|
|
leaderElect = true;
|
|
serviceAccountKeyFile = pki.kube.serviceaccounts.key;
|
|
rootCaFile = pki.kube.ca;
|
|
extraOpts = ''
|
|
--service-cluster-ip-range=10.10.12.0/24 \
|
|
--use-service-account-credentials=true \
|
|
--secure-port=${toString ports.k8sControllerManagerSecure}\
|
|
'';
|
|
kubeconfig = pki.kube.controllermanager.config;
|
|
};
|
|
|
|
scheduler = {
|
|
enable = true;
|
|
address = "0.0.0.0";
|
|
port = 0;
|
|
leaderElect = true;
|
|
kubeconfig = pki.kube.scheduler.config;
|
|
};
|
|
|
|
proxy = {
|
|
enable = true;
|
|
kubeconfig = pki.kube.proxy.config;
|
|
extraOpts = ''
|
|
--hostname-override=${fqdn}\
|
|
--proxy-mode=iptables
|
|
'';
|
|
};
|
|
|
|
kubelet = {
|
|
enable = true;
|
|
unschedulable = false;
|
|
allowPrivileged = true;
|
|
hostname = fqdn;
|
|
tlsCertFile = pki.kube.kubelet.cert;
|
|
tlsKeyFile = pki.kube.kubelet.key;
|
|
clientCaFile = pki.kube.kubelet.ca;
|
|
nodeIp = node.ipAddr;
|
|
networkPlugin = "cni";
|
|
clusterDns = "10.10.12.254";
|
|
kubeconfig = pki.kube.kubelet.config;
|
|
extraOpts = ''
|
|
--cni-conf-dir=/opt/cni/conf \
|
|
--cni-bin-dir=/opt/cni/bin
|
|
'';
|
|
};
|
|
|
|
};
|
|
|
|
# https://github.com/NixOS/nixpkgs/issues/60687
|
|
systemd.services.kube-control-plane-online = {
|
|
preStart = pkgs.lib.mkForce "";
|
|
};
|
|
# this seems to depend on flannel
|
|
# TODO(q3k): file issue
|
|
systemd.services.kubelet-online = {
|
|
script = pkgs.lib.mkForce "sleep 1";
|
|
};
|
|
# This by default removes all CNI plugins and replaces them with nix-defines ones
|
|
# Since we bring our own CNI plugins via containers with host mounts, this causes
|
|
# them to be removed on kubelet restart.
|
|
# TODO(https://github.com/NixOS/nixpkgs/issues/53601): fix when resolved
|
|
systemd.services.kubelet = {
|
|
preStart = pkgs.lib.mkForce "sleep 1";
|
|
};
|
|
}
|