4
0
Fork 2
mirror of https://gerrit.hackerspace.pl/hscloud synced 2025-02-11 11:36:45 +00:00

cluster/clustercfg: add multi-cluster support

Change-Id: I01fa3c40ac00cfa022d438163ba9e2d3ef66ac72
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/2090
Reviewed-by: q3k <q3k@hackerspace.pl>
This commit is contained in:
radex 2025-01-08 15:23:53 +01:00
parent d112420352
commit 274e70f557
9 changed files with 108 additions and 23 deletions

View file

@ -11,6 +11,7 @@ go_library(
visibility = ["//visibility:private"],
deps = [
"//cluster/clustercfg/certs",
"//cluster/clustercfg/clusters",
"//go/workspace",
"@com_github_spf13_cobra//:cobra",
"@io_k8s_client_go//tools/clientcmd",

View file

@ -9,4 +9,7 @@ go_library(
],
importpath = "code.hackerspace.pl/hscloud/cluster/clustercfg/certs",
visibility = ["//visibility:public"],
deps = [
"//cluster/clustercfg/clusters",
],
)

View file

@ -1,8 +1,10 @@
package certs
import (
"net"
"path/filepath"
"time"
"code.hackerspace.pl/hscloud/cluster/clustercfg/clusters"
)
// Certificates is the set of certificates required to run our Kubernetes
@ -167,11 +169,13 @@ func mkCA(root, name, cn string) *Certificate {
}
// Prepare builds our Certificates structure at a given location on the
// filesystem, for the given nodes.
// filesystem, for the given cluster definition and nodes.
//
// Calling Ensure() on the returned Certificates will actually engage
// generation logic. Before that, no disk accesses are performed.
func Prepare(root string, fqdns []string) Certificates {
func Prepare(clustersRoot string, cluster clusters.Cluster, fqdns []string) Certificates {
root := filepath.Join(clustersRoot, cluster.PathPrefix)
certs := Certificates{
CAs: CAs{
EtcdPeer: mkCA(root, "ca-etcdpeer", "etcd peer ca"),
@ -203,14 +207,12 @@ func Prepare(root string, fqdns []string) Certificates {
name: "kube-apiserver",
root: root,
kind: kindClientServer,
cn: "k0.hswaw.net",
cn: cluster.Fqdn(),
san: []string{
"k0.hswaw.net",
"kubernetes.default.svc.k0.hswaw.net",
},
ips: []net.IP{
{10, 10, 12, 1},
cluster.Fqdn(),
"kubernetes.default.svc." + cluster.Fqdn(),
},
ips: cluster.ApiserverIps,
issuer: certs.CAs.Kube,
},
KubeControllerManager: &Certificate{

View file

@ -0,0 +1,10 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "clusters",
srcs = [
"clusters.go",
],
importpath = "code.hackerspace.pl/hscloud/cluster/clustercfg/clusters",
visibility = ["//visibility:public"],
)

View file

@ -0,0 +1,42 @@
package clusters
import (
"net"
"net/url"
)
type Cluster struct {
// Name of the cluster in shorthand form (e.g. k0, k1, etc), must be url-compatible
Name string
// PathPrefix is the subfolder within //cluster where the cluster's certs and secrets are stored
PathPrefix string
ApiserverIps []net.IP
}
var Clusters = map[string]Cluster{
"k0": {
Name: "k0",
PathPrefix: "",
ApiserverIps: []net.IP{
{10, 10, 12, 1},
},
},
"k1": {
Name: "k1",
PathPrefix: "k1",
ApiserverIps: []net.IP{
// TODO: fill in once we have a k1 cluster
},
},
}
func (c *Cluster) Fqdn() string {
return c.Name + ".hswaw.net"
}
func (c *Cluster) ApiServerUrl() *url.URL {
return &url.URL{
Scheme: "https",
Host: c.Fqdn() + ":4001",
}
}

View file

@ -12,6 +12,7 @@ import (
clientapi "k8s.io/client-go/tools/clientcmd/api"
"code.hackerspace.pl/hscloud/cluster/clustercfg/certs"
"code.hackerspace.pl/hscloud/cluster/clustercfg/clusters"
"code.hackerspace.pl/hscloud/go/workspace"
)
@ -31,6 +32,12 @@ for auditing of accesses to apiservers.
log.Fatalf("Could not figure out workspace: %v", err)
}
clusterName := flagCluster
cluster, ok := clusters.Clusters[clusterName]
if !ok {
log.Fatalf("Unknown cluster: %q", clusterName)
}
uname := "UNKNOWN"
if u, err := user.Current(); err == nil {
uname = u.Username
@ -42,8 +49,8 @@ for auditing of accesses to apiservers.
breadcrumb := fmt.Sprintf("%s@%s", uname, hostname)
root := filepath.Join(ws, "cluster")
path := filepath.Join(ws, ".kubectl", "admincreds")
c := certs.Prepare(root, nil)
path := filepath.Join(ws, ".kubectl", "admincreds-"+cluster.Name)
c := certs.Prepare(root, cluster, nil)
creds := c.MakeKubeEmergencyCreds(path, breadcrumb)
_ = creds
@ -51,7 +58,7 @@ for auditing of accesses to apiservers.
log.Printf("WARNING WARNING WARNING WARNING WARNING WARNING")
log.Printf("===============================================")
log.Printf("")
log.Printf("You are requesting ADMIN credentials.")
log.Printf("You are requesting ADMIN credentials for %s.", cluster.Name)
log.Printf("")
log.Printf("You likely shouldn't be doing this, and")
log.Printf("instead should be using `prodaccess`.")
@ -67,15 +74,16 @@ for auditing of accesses to apiservers.
log.Printf("Configuring kubectl...")
caPath, certPath, keyPath := creds.Paths()
if err := installKubeletConfig(caPath, certPath, keyPath, "emergency.k0"); err != nil {
contextName := "emergency." + cluster.Name
if err := installKubeletConfig(caPath, certPath, keyPath, cluster, contextName); err != nil {
log.Fatalf("Failed: %v", err)
}
log.Fatalf("Done. Use kubectl --context=emergency.k0")
log.Fatalf("Done. Use kubectl --context=%s", contextName)
},
}
func installKubeletConfig(caPath, certPath, keyPath, configName string) error {
func installKubeletConfig(caPath, certPath, keyPath string, cluster clusters.Cluster, configName string) error {
ca := clientcmd.NewDefaultPathOptions()
config, err := ca.GetStartingConfig()
if err != nil {
@ -89,7 +97,7 @@ func installKubeletConfig(caPath, certPath, keyPath, configName string) error {
config.Clusters[configName] = &clientapi.Cluster{
CertificateAuthority: caPath,
Server: "https://k0.hswaw.net:4001",
Server: cluster.ApiServerUrl().String(),
}
config.Contexts[configName] = &clientapi.Context{

View file

@ -8,6 +8,7 @@ import (
"github.com/spf13/cobra"
"code.hackerspace.pl/hscloud/cluster/clustercfg/certs"
"code.hackerspace.pl/hscloud/cluster/clustercfg/clusters"
"code.hackerspace.pl/hscloud/go/workspace"
)
@ -15,10 +16,10 @@ var flagFQDNs []string
var gencertsCmd = &cobra.Command{
Use: "gencerts",
Short: "(re)generate keys/certs for k0 cluster",
Short: "(re)generate keys/certs for a hscloud cluster",
Long: `
If you're adding a new cluster node, run this. It will populate //cluster/secrets
and //cluster/certificates with new certs/keys.
If you're adding a new cluster node, run this. It will populate the appropriate
certs/secrets folders with new certs/keys.
By default, the nodes to generate certificates for are automatically discovered
by querying the local Nix machines defined in //ops, looking for anything that
@ -30,11 +31,21 @@ node names, set --fqdn (either comma-separate them or repeat flags).
if err != nil {
log.Fatalf("Could not figure out workspace: %v", err)
}
path := filepath.Join(ws, "cluster")
clusterName := flagCluster
cluster, ok := clusters.Clusters[clusterName]
if !ok {
log.Fatalf("Unknown cluster: %q", clusterName)
}
fqdns := flagFQDNs
if len(fqdns) == 0 {
log.Printf("--fqdn not set, figuring out machines from Nix...")
if clusterName != "k0" {
log.Fatalf("Only k0 cluster supported for automatic machine discovery.")
}
err = workspace.EvalHscloudNix(cmd.Context(), &fqdns, "ops.exports.kubeMachineNames")
if err != nil {
log.Fatalf("Could not figure out Kubernetes machine FQDNs: %v", err)
@ -48,8 +59,11 @@ node names, set --fqdn (either comma-separate them or repeat flags).
}
}
log.Printf("Cluster: -c %s", clusterName)
log.Printf("Machines: --fqdn %s", strings.Join(fqdns, ","))
c := certs.Prepare(path, fqdns)
path := filepath.Join(ws, "cluster")
c := certs.Prepare(path, cluster, fqdns)
if err := c.Ensure(); err != nil {
log.Fatalf("Failed: %v", err)
}

View file

@ -7,12 +7,17 @@ import (
"github.com/spf13/cobra"
)
var flagCluster string
var rootCmd = &cobra.Command{
Use: "clustercfg",
Short: "admin management tool for k0 cluster",
Short: "admin management tool for hscloud clusters",
}
func main() {
rootCmd.PersistentFlags().StringVarP(&flagCluster, "cluster", "c", "", "Name of the hscloud cluster to manage (required)")
rootCmd.MarkPersistentFlagRequired("cluster")
if err := rootCmd.Execute(); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)

View file

@ -30,7 +30,7 @@ Provisioning nodes
- bring up a new node with nixos, the configuration doesn't matter and will be
nuked anyway
- add machine to cluster/machines and ops/machines.nix
- generate certs with `bazel run //cluster/clustercfg gencerts`
- generate certs with `bazel run //cluster/clustercfg gencerts --cluster k0`
- deploy using ops (see ops/README.md)
Applying kubecfg state