1
0
Fork 0

smsgw: productionize, implement kube/mirko

This productionizes smsgw.

We also add some jsonnet machinery to provide a unified service for Go
micro/mirkoservices.

This machinery provides all the nice stuff:
 - a deployment
 - a service for all your types of pots
 - TLS certificates for HSPKI

We also update and test hspki for a new name scheme.

Change-Id: I292d00f858144903cbc8fe0c1c26eb1180d636bc
master
q3k 2019-10-02 20:46:48 +02:00
parent d186e9468d
commit 6f773e0004
13 changed files with 599 additions and 36 deletions

View File

@ -4,3 +4,4 @@ test --host_force_python=PY2
run --host_force_python=PY2
build --workspace_status_command=./bzl/workspace-status.sh
test --build_tests_only
test --test_output=errors

View File

@ -13,11 +13,16 @@ local nginx = import "lib/nginx.libsonnet";
local prodvider = import "lib/prodvider.libsonnet";
local registry = import "lib/registry.libsonnet";
local rook = import "lib/rook.libsonnet";
local pki = import "lib/pki.libsonnet";
local Cluster(fqdn) = {
local Cluster(short, realm) = {
local cluster = self,
local cfg = cluster.cfg,
short:: short,
realm:: realm,
fqdn:: "%s.%s" % [cluster.short, cluster.realm],
cfg:: {
// Storage class used for internal services (like registry). This must
// be set to a valid storage class. This can either be a cloud provider class
@ -54,7 +59,7 @@ local Cluster(fqdn) = {
apiGroup: "rbac.authorization.k8s.io",
kind: "User",
# A cluster API Server authenticates with a certificate whose CN is == to the FQDN of the cluster.
name: fqdn,
name: cluster.fqdn,
},
],
},
@ -159,7 +164,7 @@ local Cluster(fqdn) = {
cfg+: {
cluster_domains: [
"cluster.local",
fqdn,
cluster.fqdn,
],
},
},
@ -203,12 +208,15 @@ local Cluster(fqdn) = {
// Docker registry
registry: registry.Environment {
cfg+: {
domain: "registry.%s" % [fqdn],
domain: "registry.%s" % [cluster.fqdn],
storageClassName: cfg.storageClassNameParanoid,
objectStorageName: "waw-hdd-redundant-2-object",
},
},
// TLS PKI machinery
pki: pki.Environment(cluster.short, cluster.realm),
// Prodvider
prodvider: prodvider.Environment {
cfg+: {
@ -221,7 +229,7 @@ local Cluster(fqdn) = {
{
k0: {
local k0 = self,
cluster: Cluster("k0.hswaw.net") {
cluster: Cluster("k0", "hswaw.net") {
cfg+: {
storageClassNameParanoid: k0.ceph.blockParanoid.name,
},

View File

@ -0,0 +1,48 @@
local kube = import "../../../kube/kube.libsonnet";
{
Environment(clusterShort, realm): {
local env = self,
realm:: realm,
clusterShort:: clusterShort,
clusterFQDN:: "%s.%s" % [clusterShort, realm],
namespace:: "cert-manager", // https://github.com/jetstack/cert-manager/issues/2130
// An issuer that self-signs certificates, used for the CA certificate.
selfSignedIssuer: kube.Issuer("pki-selfsigned") {
metadata+: {
namespace: env.namespace,
},
spec: {
selfSigned: {},
},
},
// CA keypair, self-signed by the above issuer.
selfSignedCert: kube.Certificate("pki-selfsigned") {
metadata+: {
namespace: env.namespace,
},
spec: {
secretName: "pki-selfsigned-cert",
duration: "43800h0m0s", // 5 years,
isCA: true,
issuerRef: {
name: env.selfSignedIssuer.metadata.name,
},
commonName: "pki-ca",
},
},
// CA issuer, used to issue certificates signed by the CA.
issuer: kube.ClusterIssuer("pki-ca") {
spec: {
ca: {
secretName: env.selfSignedCert.spec.secretName,
},
},
},
},
}

View File

@ -1,4 +1,4 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
@ -15,3 +15,10 @@ go_library(
"@org_golang_x_net//trace:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["grpc_test.go"],
embed = [":go_default_library"],
deps = ["@com_github_go_test_deep//:go_default_library"],
)

View File

@ -24,36 +24,49 @@ Concepts
All certs for mutual auth have the following CN/SAN format:
<job>.<principal>.<realm>
<job>.<principal>.svc.<cluster-short>.<realm>
or
<principal>.person.<realm>
or
<principal>.external.<realm>
For example, if principal maps into a 'group' and job into a 'user':
Where in adition we define `<cluster>` as being `<realm>` plus its next left-side member.
arista-proxy-dcr01u23.cluster-management-prod.c.example.com
For example, for kubernetes jobs:
job = arista-proxy-dcr01u23
principal = cluster-management-prod
realm = c.example.com
foo.bar.svc.k0.hswaw.net
job = foo
principal = bar.svc
cluster = k0.hswaw.net
realm = hswaw.net
Where foo is the name of a kubernets service, bar is the name of the namespace its in, and
k0.hswaw.net is the cluster running them.
For people and external services:
q3k.person.hswaw.net
job =
principal = q3k
cluster = person.hswaw.net
realm = hswaw.net
The Realm is a DNS name that is global to all jobs that need mutual authentication.
The Principal is any name that carries significance for logical grouping of jobs.
It can, but doesn't need to, group jobs by similar permissions.
The Principal is any name that carries significance for an authentication principal,
ie. a unit that gives information about an identity of an element. In case of kubernetes
it's a namespace (as we split authentication/authorization into namespaces). In the case of external
services and people it's the name of the service or person.
The Job is any name that identifies uniquely (within the principal) a security
endpoint that describes a single security policy for a gRPC endpoint.
The Job is a name that makes the Principal more specific, if possible. If set, the Principal
can be treated as a group of Jobs.
The entire CN should be DNS resolvable into an IP address that would respond to
gRPC requests on port 42000 (with a server TLS certificate that represents this CN) if the
job represents a service.
This maps nicely to the Kubernetes Cluster DNS format if you set `realm` to `svc.cluster.local`.
Then, `principal` maps to a Kubernetes namespace, and `job` maps into a Kubernetes service.
arista-proxy-dcr01u23.infrastructure-prod.svc.cluster.local
job/service = arista-proxy-dcr01u23
principal/namespace = infrastructure-prod
realm = svc.cluster.local
ACL, or How do I restrict access to my service?
-----------------------------------------------
@ -84,8 +97,10 @@ Flags
Once linked into your program, the following flags will be automatically present:
-hspki_realm string
-hspki_cluster string
PKI realm (default "svc.cluster.local")
-hspki_realm string
PKI realm (default "cluster.local")
-hspki_tls_ca_path string
Path to PKI CA certificate (default "pki/ca.pem")
-hspki_tls_certificate_path string

View File

@ -36,6 +36,7 @@ var (
flagCAPath string
flagCertificatePath string
flagKeyPath string
flagPKICluster string
flagPKIRealm string
flagPKIDisable bool
@ -53,7 +54,8 @@ func init() {
flag.StringVar(&flagCAPath, "hspki_tls_ca_path", "pki/ca.pem", "Path to PKI CA certificate")
flag.StringVar(&flagCertificatePath, "hspki_tls_certificate_path", "pki/service.pem", "Path to PKI service certificate")
flag.StringVar(&flagKeyPath, "hspki_tls_key_path", "pki/service-key.pem", "Path to PKI service private key")
flag.StringVar(&flagPKIRealm, "hspki_realm", "svc.cluster.local", "PKI realm")
flag.StringVar(&flagPKICluster, "hspki_cluster", "local.hswaw.net", "FQDN of cluster on which this service runs")
flag.StringVar(&flagPKIRealm, "hspki_realm", "hswaw.net", "Cluster realm (top level from which we accept foreign cluster certs)")
flag.BoolVar(&flagPKIDisable, "hspki_disable", false, "Disable PKI entirely (insecure!)")
}
@ -81,14 +83,39 @@ func parseClientName(name string) (*ClientInfo, error) {
if !strings.HasSuffix(name, "."+flagPKIRealm) {
return nil, fmt.Errorf("invalid realm")
}
service := strings.TrimSuffix(name, "."+flagPKIRealm)
parts := strings.Split(service, ".")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid job/principal format")
inRealm := strings.TrimSuffix(name, "."+flagPKIRealm)
special := []string{"person", "external"}
for _, s := range special {
// Special case for people running jobs from workstations, or for non-cluster services.
if strings.HasSuffix(inRealm, "."+s) {
asPerson := strings.TrimSuffix(inRealm, "."+s)
parts := strings.Split(asPerson, ".")
if len(parts) != 1 {
return nil, fmt.Errorf("invalid person fqdn")
}
return &ClientInfo{
Cluster: fmt.Sprintf("%s.%s", s, flagPKIRealm),
Principal: parts[0],
Job: "",
}, nil
}
}
parts := strings.Split(inRealm, ".")
if len(parts) != 4 {
return nil, fmt.Errorf("invalid job/principal format for in-cluster")
}
if parts[2] != "svc" {
return nil, fmt.Errorf("can only refer to services within cluster")
}
clusterShort := parts[3]
return &ClientInfo{
Realm: flagPKIRealm,
Principal: parts[1],
Cluster: fmt.Sprintf("%s.%s", clusterShort, flagPKIRealm),
Principal: fmt.Sprintf("%s.svc", parts[1]),
Job: parts[0],
}, nil
}
@ -137,15 +164,24 @@ func grpcInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServe
// ClientInfo contains information about the HSPKI authentication data of the
// gRPC client that has made the request.
type ClientInfo struct {
Realm string
Cluster string
Principal string
Job string
}
// String returns a human-readable representation of the ClientInfo in the
// form "job=foo, principal=bar, realm=baz".
// form "job=foo, principal=bar.svc, cluster=baz.hswaw.net".
func (c *ClientInfo) String() string {
return fmt.Sprintf("job=%q, principal=%q, realm=%q", c.Job, c.Principal, c.Realm)
return fmt.Sprintf("job=%q, principal=%q, cluster=%q", c.Job, c.Principal, c.Cluster)
}
// Person returns a reference to a person's ID if the ClientInfo describes a person.
// Otherwise, it returns an empty string.
func (c *ClientInfo) Person() string {
if c.Cluster != fmt.Sprintf("person.%s", flagPKIRealm) {
return ""
}
return c.Principal
}
// ClientInfoFromContext returns ClientInfo from a gRPC service context.

69
go/pki/grpc_test.go Normal file
View File

@ -0,0 +1,69 @@
package pki
import (
"testing"
"github.com/go-test/deep"
)
func TestParseClient(t *testing.T) {
flagPKIRealm = "hswaw.net"
tests := []struct {
name string
want *ClientInfo
}{
// Local cluster
{"foo.bar.svc.k0.hswaw.net", &ClientInfo{Cluster: "k0.hswaw.net", Principal: "bar.svc", Job: "foo"}},
{"foo.bar.k0.hswaw.net", nil},
// Foreign cluster
{"foo.bar.svc.k1.hswaw.net", &ClientInfo{Cluster: "k1.hswaw.net", Principal: "bar.svc", Job: "foo"}},
{"foo.bar.k1.hswaw.net", nil},
// Human admins (admins, as we know, don't have a real job)
{"q3k.person.hswaw.net", &ClientInfo{Cluster: "person.hswaw.net", Principal: "q3k", Job: ""}},
// External services
{"kasownik.external.hswaw.net", &ClientInfo{Cluster: "external.hswaw.net", Principal: "kasownik", Job: ""}},
// Broken.
{"foo.hswaw.net", nil},
{"ldap.hackerspace.pl", nil},
{"", nil},
{"..what..plz...don.t.hack.me.hswaw.net", nil},
}
for i, te := range tests {
res, err := parseClientName(te.name)
if err != nil {
if te.want != nil {
t.Errorf("#%d: wanted result, got err %v", i, err)
}
continue
}
if te.want == nil {
t.Errorf("#%d: wanted err, got %+v", i, res)
continue
}
if diff := deep.Equal(*te.want, *res); diff != nil {
t.Errorf("#%d: res diff: %v", i, diff)
continue
}
}
}
func TestCheckPerson(t *testing.T) {
flagPKIRealm = "hswaw.net"
res, err := parseClientName("q3k.person.hswaw.net")
if err != nil {
t.Fatalf("err: %v", err)
}
if want, got := "q3k", res.Person(); want != got {
t.Fatalf("wanted %q, got %q", want, got)
}
}

96
hswaw/kube/hswaw.jsonnet Normal file
View File

@ -0,0 +1,96 @@
local mirko = import "../../kube/mirko.libsonnet";
local kube = import "../../kube/kube.libsonnet";
{
hswaw(name):: mirko.Environment(name) {
local env = self,
local cfg = self.cfg,
cfg+: {
smsgw: {
secret: {
twilio_token: error "twilio_token must be set",
},
image: "registry.k0.hswaw.net/q3k/smsgs:1570049853-05c5b491c45de6d960979d4aee8635768f3178e9",
webhookFQDN: error "webhookFQDN must be set",
},
},
components: {
smsgw: mirko.Component(env, "smsgw") {
local smsgw = self,
cfg+: {
image: cfg.smsgw.image,
container: smsgw.GoContainer("main", "/smsgw/smsgw") {
env_: {
TWILIO_TOKEN: kube.SecretKeyRef(smsgw.secret, "twilio_token"),
},
command+: [
"-twilio_friendly_phone", "48732168371",
"-twilio_sid", "AC806ed4bf4b6c80c8f8ea686379b69518",
"-twilio_token", "$(TWILIO_TOKEN)",
"-webhook_listen", "0.0.0.0:5000",
"-webhook_public", "https://%s/" % [ env.cfg.smsgw.webhookFQDN ],
],
},
ports+: {
publicHTTP: {
webhook: {
port: 5000,
dns: env.cfg.smsgw.webhookFQDN,
}
},
},
},
secret: kube.Secret("smsgw") {
metadata+: smsgw.metadata,
data: env.cfg.smsgw.secret,
},
// Temporary machinery to access gRPC from outsite.
// In the future, this will be handled by a proxy/API gateway.
// For now, we need this running.
// TODO(q3k): remove this when we have an API GW or proxy.
stopgap: {
rpcLB: kube.Service("smsgw-tcp-rpc") {
metadata+: smsgw.metadata,
target_pod: smsgw.deployment.spec.template,
spec+: {
type: "LoadBalancer",
ports: [
{ name: "grpc-external", port: 443, targetPort: 4200 },
],
},
},
rpcCertificate: kube.Certificate("smsgw-tcp-rpc-consumer") {
metadata+: smsgw.metadata,
spec: {
secretName: "smsgw-tcp-rpc-consumer",
duration: "35040h0m0s", // 4 years
issuerRef: {
// Contract with cluster/lib/pki.libsonnet.
// Copied over.
name: "pki-ca",
kind: "ClusterIssuer",
},
commonName: "kasownik.external.hswaw.net",
},
},
}
},
},
},
prod: self.hswaw("hswaw-prod") {
cfg+: {
smsgw+: {
secret+: {
twilio_token: std.base64(std.split(importstr "secrets/plain/prod-twilio-token", "\n")[0]),
},
webhookFQDN: "smsgw-webhook-prod.hswaw.net",
}
},
},
}

1
hswaw/kube/secrets/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
plain

View File

@ -0,0 +1,40 @@
-----BEGIN PGP MESSAGE-----
hQEMAzhuiT4RC8VbAQf+PEa/FRUTq0HXd5LfuDDr47lajUW7UrBPbpHuRcmY0p7O
+w9uPXP0+ceckH+zmVAJm4wATKlXlOlUOzUNhmyUC04npJWDiru+AA7faVcnU1cZ
8rQ7qBZgG441oceYfn0HJrDa18dvnRQN1YOB2V2xVKFpLil4Kd/loWXmhdBSr8pD
8gLLZKlYauC4TyNMmOBgAqCRiDxFn2O0vLNz5OOgtqB6siXdHs8t4/8bBW/Inj29
g7n4beESkg1BbbSgJABIHmMiWNuOPxeY8k101p9f8wt5kZAKV1QwXgLEFztMF6zb
nnSbFhBAaJRDxERl674acQxuY5VJRkLzRd1ELK4w+4UBDANcG2tp6fXqvgEH/j63
azGWI2hvhV6ZmkknqJR4Iy9JDAgD9HSlFQYswxEVhXP5//lId2Aof4k69IJ3qWgF
RbMZSpuYSOM5aX6yw2A2gG4b5NQ2CDP3ls+tFm48mLrJfbNxcDu6K5Au3WgtyO6l
B6MNchnKXLN8lUfYKDrtv3hZV+N3EwtMbVshMTpKwCEJ+RTq7gf/DSz2VMq8c2Em
LpeUb9mPuAawYVFLrp4D3T1CoPHIcq+ApsVoHuhAv1SGotC51coSMFzfTPeQ3cYY
/p+vu+3z5lGbk9O/h+IG5lUAEAL9u+AHMfhkw36uGQTQEdnS8mF+iVHaFb09mgwN
SUk9QhZJFb+aj88Oyt6FAgwDodoT8VqRl4UBD/92bm8kXNTcFml/IAlgp+FVrEFk
2FuZ0qyUobLt93uoSzq8SeVBxatqmRWXBdVb0ccaYYJxT3aWamZJwy3tAb3Ki/uS
ZFruA3DbtQNXtAOrXi1n81CT2S7c/Kw6iM3a+5S+XnUWB2fnfY+mY7HRmVDlu0uO
F7qxrFbF9fYUNZunrkA/hA38kfdWY3p3F8TTDfifdR6At4yRi1hv/M4/hxubgew1
f6NoNj8g2Bjf99kfS5z5ibyQNg1hWLAJUr5nSYJRTcdC+zQhPMnHtH5+ybXIinfG
v1IwSKmtqLsTn2DZ3xXhBs7k98+1rKUcKEJLU6dOwRo6aKkMYaufz8VxUOWdpC4f
MFxtNo/1uPkPraKuB/AuStQsbz4cCxWMgxze3UkSqL4ZqklxfiUCQE/5ZdWmCers
5XRiqxdCVsgmCirvZOoEvZ1Ghn64OTxfDXY2yUVkNKffKahGDJc0r0epTZuqlneY
rYqECIfU9Xuzjy0JXp7bm0ufIVaDXJRsfsDzM9u+TKhFaM6hT0bnkxSr+/UTtrhA
l5x67YgkXI5MYNlkG6CL/wbTUeq5hozwjvV8e7pyf5QmSTxK8RijGgAZ/bbyEOWQ
+rvWMKMFiwB254LOT426z8HQoAKo43XnnyHZIa+RzFs6mzTjuT4vMMkb4Ruttac7
UmsRh/LHSIMWA4bSDoUCDAPiA8lOXOuz7wEP/j5QJgjue6ikh4OL4zPu5PSvmb0j
4voRFsIZbmCla/e0snD9SIYKZZNylnn85A497KePSw0Gz6Q9SRVMlqh8jXAtmc3R
f2KDYDkoHIbsru+PBpucBdILM0ThiuZtT6YnHd7rTo5tqF10vXULZ1kM9mBcSHS1
6yhzpp/rpYk/sqqlUtNA2CP7vXP/ySgcCh+ZXQFeFQEBIUiMFa5zgevjp+ClwUTV
GVxPMgavvZY52oa9E4dlvi8pTtrhsG/ME/2kwT5FUJzmIvfWVIB3GT+yjUih8YGY
OzVDnZ8X4NqJU9qGuSMqzGIA1zacy4GtuFxKVf1soJUS+8a1o9PhC53cIANeh5S8
zNHYRB3sqlblm40uSTtAWkpdW8nBG1Ky+omv/I5ljvHAYrW0kxoIZ+1YWck+IWIX
9SDQ5r5Juv4FadJVTQZEdGR+0zou9PIv6W1bKqSAzTKtGeBspsu0/M+KzT5ywOLP
KoLxxiIHscIMX9gfRgnxI2Kpo0fID4X6bGdfk7ZqDj1zBx65L0CoEWUpi5hvg9l1
uq6Z55broSQ9EFhQTtb9e0UqT09Jb6JJ07elTRVBQw3hIKwXFGsL/NZLjHxwk+h3
PtFD4+rSnVcy8n7cLfsZmP4ufN5VHWX5C8fVowTgg39YDeZJFRtZe8mcSawbSwa6
nQBhGKJxZGMtoR9b0m0BusPAbAy7FLxLEC1yStg6x9k71iWgf3PcGCNWHsMdGoGl
grkDD88kQYQujIwMzrWkMOeCJmvGemGinNUZjkDQv5WDR1RoTisMf/xgdplFSZq7
jDM7tUYE8kNa/hBa1S7vX11NJLjRoE+P0d1p
=WIU6
-----END PGP MESSAGE-----

View File

@ -1,3 +1,4 @@
load("@io_bazel_rules_docker//container:container.bzl", "container_image", "container_layer", "container_push")
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library", "go_test")
go_library(
@ -29,3 +30,28 @@ go_test(
srcs = ["dispatcher_test.go"],
embed = [":go_default_library"],
)
container_layer(
name = "layer_bin",
files = [
":smsgw",
],
directory = "/smsgw/",
)
container_image(
name = "runtime",
base = "@prodimage-bionic//image",
layers = [
":layer_bin",
],
)
container_push(
name = "push",
image = ":runtime",
format = "Docker",
registry = "registry.k0.hswaw.net",
repository = "q3k/smsgs",
tag = "{BUILD_TIMESTAMP}-{STABLE_GIT_COMMIT}",
)

View File

@ -76,7 +76,7 @@ func ensureWebhook(ctx context.Context, t *twilio) {
glog.Infof("Webhook not yet ready, currently %s %q", pn.SMSMethod, pn.SMSURL)
time.Sleep(5 * time.Second)
}
glog.Infof("Webhook verifier")
glog.Infof("Webhook verified")
} else {
glog.Infof("Webhook up to date")
}

216
kube/mirko.libsonnet Normal file
View File

@ -0,0 +1,216 @@
# Mirko, an abstraction layer for hscloud kubernetes services.
local kube = import "kube.libsonnet";
{
Environment(name): {
local env = self,
local cfg = env.cfg,
cfg:: {
name: name,
namespace: cfg.name,
},
namespace: kube.Namespace(cfg.namespace),
components: {}, // type: mirko.Component
// Currently hardcoded!
// This might end up being something passed part of kubecfg evaluation,
// when we get to supporting multiple/federated clusters.
// For now, this is goog enough.
pkiRealm:: "hswaw.net",
pkiClusterFQDN:: "k0.hswaw.net",
// Generate an ingress if we have any public ports.
publicHTTPPorts:: std.flattenArrays([
[
{
local component = env.components[c],
service: component.svc,
port: component.cfg.ports.publicHTTP[p].port,
dns: component.cfg.ports.publicHTTP[p].dns,
}
for p in std.objectFields(env.components[c].cfg.ports.publicHTTP)
]
for c in std.objectFields(env.components)
]),
ingress: if std.length(env.publicHTTPPorts) > 0 then kube.Ingress("mirko-public") {
metadata+: {
namespace: env.cfg.namespace,
labels: {
"app.kubernetes.io/name": cfg.name,
"app.kubernetes.io/managed-by": "kubecfg-mirko",
"app.kubernetes.io/component": cfg.name,
"mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
"mirko.hscloud.hackerspace.pl/component": "mirko-public-ingress",
},
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
},
},
spec+: {
tls: [
{
hosts: [p.dns for p in env.publicHTTPPorts],
secretName: "mirko-public-tls",
},
],
rules: [
{
host: p.dns,
http: {
paths: [
{ path: "/", backend: { serviceName: p.service.metadata.name, servicePort: p.port }},
],
},
}
for p in env.publicHTTPPorts
],
},
} else {}
},
Component(env, name): {
local component = self,
local cfg = component.cfg,
makeName(suffix):: "%s%s%s" % [cfg.prefix, cfg.name, suffix],
metadata:: {
namespace: env.cfg.namespace,
labels: {
"app.kubernetes.io/name": env.cfg.name,
"app.kubernetes.io/managed-by": "kubecfg-mirko",
"app.kubernetes.io/component": cfg.name,
"mirko.hscloud.hackerspace.pl/environment": env.cfg.name,
"mirko.hscloud.hackerspace.pl/component": cfg.name,
},
},
# Tunables for users.
cfg:: {
name: name,
prefix:: "",
image:: env.image,
volumes:: {},
containers:: {
main: cfg.container,
},
container:: error "container(s) must be set",
ports:: {
publicHTTP: {}, // name -> { port: no, dns: fqdn }
grpc: { main: 4200 }, // name -> port no
},
},
allPorts:: {
['grpc-' + p]: cfg.ports.grpc[p]
for p in std.objectFields(cfg.ports.grpc)
} + {
['pubhttp-' + p] : cfg.ports.publicHTTP[p].port
for p in std.objectFields(cfg.ports.publicHTTP)
},
Container(name):: kube.Container(component.makeName(name)) {
image: cfg.image,
volumeMounts_: {
pki: { mountPath: "/mnt/pki" },
},
ports_: {
[p]: { containerPort: component.allPorts[p] }
for p in std.objectFields(component.allPorts)
},
resources: {
requests: {
cpu: "25m",
memory: "64Mi",
},
limits: {
cpu: "500m",
memory: "128Mi",
},
},
},
GoContainer(name, binary):: component.Container(name) {
command: [
binary,
"-hspki_realm", env.pkiRealm,
"-hspki_cluster", env.pkiClusterFQDN,
"-hspki_tls_ca_path", "/mnt/pki/ca.crt",
"-hspki_tls_certificate_path", "/mnt/pki/tls.crt",
"-hspki_tls_key_path", "/mnt/pki/tls.key",
"-logtostderr",
"-listen_address", "0.0.0.0:4200",
],
},
deployment: kube.Deployment(component.makeName("-main")) {
metadata+: component.metadata,
spec+: {
template+: {
spec+: {
volumes_: {
pki: {
secret: { secretName: component.pki.cert.spec.secretName },
},
} + cfg.volumes,
containers_: cfg.containers,
serviceAccountName: component.sa.metadata.name,
},
},
},
},
svc: kube.Service(component.makeName("")) { // No suffix, name part of DNS entry.
metadata+: component.metadata,
target_pod:: component.deployment.spec.template,
spec+: {
ports: [
{
name: p,
port: component.allPorts[p],
targetPort: component.allPorts[p],
}
for p in std.objectFields(component.allPorts)
],
},
},
sa: kube.ServiceAccount(component.makeName("-main")) {
metadata+: component.metadata,
},
pki: {
cert: kube.Certificate(component.makeName("-cert")) {
metadata+: component.metadata,
spec: {
secretName: component.makeName("-cert"),
duration: "35040h0m0s", // 4 years
issuerRef: {
// Contract with cluster/lib/pki.libsonnet.
name: "pki-ca",
kind: "ClusterIssuer",
},
commonName: "%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
dnsNames: [
"%s" % [component.svc.metadata.name ],
"%s.%s" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc.cluster.local" % [component.svc.metadata.name, component.svc.metadata.namespace ],
"%s.%s.svc.%s" % [component.svc.metadata.name, component.svc.metadata.namespace, env.pkiClusterFQDN ],
],
},
},
},
},
}