kube: clean up (various)

Change-Id: Idc11cf70fa7fd0360f63438270748ef1d9bad989
Reviewed-on: https://gerrit.hackerspace.pl/c/hscloud/+/1810
Reviewed-by: q3k <q3k@hackerspace.pl>
changes/10/1810/4
radex 2023-11-24 13:20:10 +01:00
parent d45584aa6d
commit 7a4c27d28c
7 changed files with 19 additions and 51 deletions

View File

@ -3,7 +3,6 @@
// kubectl -n onlyoffice-prod create secret generic documentserver-jwt --from-literal=jwt=$(pwgen 32 1)
local kube = import "../../kube/hscloud.libsonnet";
local policies = import "../../kube/policies.libsonnet";
{
onlyoffice:: {
@ -77,11 +76,6 @@ local policies = import "../../kube/policies.libsonnet";
hosts:: [cfg.domain],
target:: top.svc,
},
// Needed because the documentserver runs its own supervisor, and:
// - rabbitmq wants to mkdir in /run, which starts out with the wrong permissions
// - nginx wants to bind to port 80
insecure: policies.AllowNamespaceInsecure(cfg.namespace),
},
prod: self.onlyoffice {

View File

@ -116,11 +116,6 @@ local kube = import "../../../kube/hscloud.libsonnet";
svc: kube.Service(ix.name("octorpki")) {
metadata+: ix.metadata("octorpki"),
target:: ix.octorpki.deployment,
spec+: {
ports: [
{ name: "client", port: 8080, targetPort: 8080, protocol: "TCP" },
],
},
},
},

View File

@ -318,6 +318,10 @@ local admins = import "lib/admins.libsonnet";
policies.AllowNamespaceInsecure("implr-vpn"),
// For SourceGraph's tini container mess.
policies.AllowNamespaceMostlySecure("sourcegraph"),
// Needed because the documentserver runs its own supervisor, and:
// - rabbitmq wants to mkdir in /run, which starts out with the wrong permissions
// - nginx wants to bind to port 80
policies.AllowNamespaceInsecure("onlyoffice-prod"),
],
# Admission controller that permits non-privileged users to manage

View File

@ -3,23 +3,19 @@ local kube = import "../../../kube/kube.libsonnet";
{
local top = self,
local cfg = top.cfg,
local ns = kube.Namespace(cfg.namespace),
cfg:: {
image:: "registry.k0.hswaw.net/games/factorio/modproxy:1589157915-eafe7be328477e8a6590c4210466ef12901f1b9a",
namespace: error "namespace must be set",
},
pvc: kube.PersistentVolumeClaim("proxy-cas") {
metadata+: {
namespace: cfg.namespace,
},
pvc: ns.Contain(kube.PersistentVolumeClaim("proxy-cas")) {
storage:: "32Gi",
storageClass:: "waw-hdd-redundant-3",
},
deploy: kube.Deployment("proxy") {
metadata+: {
namespace: "factorio",
},
deploy: ns.Contain(kube.Deployment("proxy")) {
spec+: {
template+: {
spec+: {
@ -47,15 +43,8 @@ local kube = import "../../../kube/kube.libsonnet";
},
},
},
svc: kube.Service("proxy") {
metadata+: {
namespace: "factorio",
},
svc: ns.Contain(kube.Service("proxy")) {
target:: top.deploy,
spec+: {
ports: [
{ name: "client", port: 4200, targetPort: 4200, protocol: "TCP" },
],
},
},
}

View File

@ -15,6 +15,7 @@ local redis = import "../../kube/redis.libsonnet";
local cfg = self.cfg,
cfg:: {
name: "paperless",
namespace: "paperless",
domain: "paperless.hackerspace.pl",
@ -71,7 +72,7 @@ local redis = import "../../kube/redis.libsonnet";
storageClass:: cfg.storageClassName,
},
deploy: ns.Contain(kube.Deployment("paperless")) {
deploy: ns.Contain(kube.Deployment(cfg.name)) {
spec+: {
replicas: 1,
template+: {
@ -169,11 +170,11 @@ local redis = import "../../kube/redis.libsonnet";
},
},
service: ns.Contain(kube.Service("paperless")) {
service: ns.Contain(kube.Service(cfg.name)) {
target:: top.deploy,
},
ingress: ns.Contain(kube.SimpleIngress("paperless")) {
ingress: ns.Contain(kube.SimpleIngress(cfg.name)) {
hosts:: [cfg.domain],
target:: top.service,
},

View File

@ -87,6 +87,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
spec+: {
template+: {
spec+: {
default_container: "vmauth",
containers_: {
default: kube.Container("default") {
image: cfg.images.victoria,
@ -119,12 +120,6 @@ local kube = import "../../../kube/hscloud.libsonnet";
serviceAPI: ns.Contain(kube.Service("victoria-api")) {
target:: victoria.deploy,
spec+: {
ports: [
{ name: "api", port: 8427, targetPort: 8427, protocol: "TCP" },
],
type: "ClusterIP",
},
},
ingressAPI: ns.Contain(kube.SimpleIngress("victoria-api")) {
@ -241,11 +236,6 @@ local kube = import "../../../kube/hscloud.libsonnet";
service: ns.Contain(kube.Service("grafana-public")) {
target:: grafana.deploy,
spec+: {
ports: [
{ name: "public", port: 3000, targetPort: 3000, protocol: "TCP" },
],
},
},
ingress: ns.Contain(kube.SimpleIngress("grafana-public")) {

View File

@ -7,6 +7,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
local cfg = top.cfg,
cfg:: {
name: 'sso',
namespace: "sso",
image: "registry.k0.hswaw.net/informatic/sso-v2@sha256:1118effa697489028c3cd5a6786d3f94f16dbbe2810b1bf1b0f65ea15bac1914",
domain: error "domain must be set",
@ -26,7 +27,7 @@ local kube = import "../../../kube/hscloud.libsonnet";
local ns = kube.Namespace(top.cfg.namespace),
deployment: ns.Contain(kube.Deployment("sso")) {
deployment: ns.Contain(kube.Deployment(cfg.name)) {
spec+: {
replicas: 1,
template+: {
@ -99,17 +100,11 @@ local kube = import "../../../kube/hscloud.libsonnet";
},
},
svc: ns.Contain(kube.Service("sso")) {
svc: ns.Contain(kube.Service(cfg.name)) {
target:: top.deployment,
spec+: {
ports: [
{ name: "http", port: 5000, targetPort: 5000, protocol: "TCP" },
],
type: "ClusterIP",
},
},
ingress: ns.Contain(kube.SimpleIngress("sso")) {
ingress: ns.Contain(kube.SimpleIngress(cfg.name)) {
hosts:: [cfg.domain],
target:: top.svc,
},