mirror of https://gerrit.hackerspace.pl/hscloud
236 lines
8.7 KiB
Plaintext
236 lines
8.7 KiB
Plaintext
# Deploy a per-cluster Nginx Ingress Controller
|
|
|
|
local kube = import "../../../kube/kube.libsonnet";
|
|
local policies = import "../../../kube/policies.libsonnet";
|
|
|
|
{
|
|
Environment: {
|
|
local env = self,
|
|
local cfg = env.cfg,
|
|
cfg:: {
|
|
image: "quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0",
|
|
namespace: "nginx-system",
|
|
},
|
|
|
|
metadata:: {
|
|
namespace: cfg.namespace,
|
|
labels: {
|
|
"app.kubernetes.io/name": "ingress-nginx",
|
|
"app.kubernetes.io/part-of": "ingress-nginx",
|
|
},
|
|
},
|
|
|
|
namespace: kube.Namespace(cfg.namespace),
|
|
|
|
allowInsecure: policies.AllowNamespaceInsecure(cfg.namespace),
|
|
|
|
maps: {
|
|
make(name):: kube.ConfigMap(name) {
|
|
metadata+: env.metadata,
|
|
},
|
|
configuration: env.maps.make("nginx-configuration"),
|
|
tcp: env.maps.make("tcp-services") {
|
|
data: {
|
|
"22": "gerrit/gerrit:22",
|
|
"222": "gitea-prod/gitea:22",
|
|
}
|
|
},
|
|
udp: env.maps.make("udp-services"),
|
|
},
|
|
|
|
sa: kube.ServiceAccount("nginx-ingress-serviceaccount") {
|
|
metadata+: env.metadata,
|
|
},
|
|
|
|
cr: kube.ClusterRole("nginx-ingress-clusterrole") {
|
|
metadata+: env.metadata {
|
|
namespace:: null,
|
|
},
|
|
rules: [
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["configmaps", "endpoints", "nodes", "pods", "secrets"],
|
|
verbs: ["list", "watch"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["nodes"],
|
|
verbs: ["get"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["services"],
|
|
verbs: ["get", "list", "watch"],
|
|
},
|
|
{
|
|
apiGroups: ["extensions"],
|
|
resources: ["ingresses"],
|
|
verbs: ["get", "list", "watch"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["events"],
|
|
verbs: ["create", "patch"],
|
|
},
|
|
{
|
|
apiGroups: ["extensions"],
|
|
resources: ["ingresses/status"],
|
|
verbs: ["update"],
|
|
},
|
|
],
|
|
},
|
|
|
|
crb: kube.ClusterRoleBinding("nginx-ingress-clusterrole-nisa-binding") {
|
|
metadata+: env.metadata {
|
|
namespace:: null,
|
|
},
|
|
roleRef: {
|
|
apiGroup: "rbac.authorization.k8s.io",
|
|
kind: "ClusterRole",
|
|
name: env.cr.metadata.name,
|
|
},
|
|
subjects: [
|
|
{
|
|
kind: "ServiceAccount",
|
|
name: env.sa.metadata.name,
|
|
namespace: env.sa.metadata.namespace,
|
|
},
|
|
],
|
|
},
|
|
|
|
role: kube.Role("nginx-ingress-role") {
|
|
metadata+: env.metadata,
|
|
rules : [
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["configmaps", "pods", "secrets", "namespaces"],
|
|
verbs: ["get"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["configmaps"],
|
|
resourceNames: ["ingress-controller-leader-nginx"],
|
|
verbs: ["get", "update"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["configmaps"],
|
|
verbs: ["create"],
|
|
},
|
|
{
|
|
apiGroups: [""],
|
|
resources: ["endpoints"],
|
|
verbs: ["get"],
|
|
},
|
|
],
|
|
},
|
|
|
|
roleb: kube.RoleBinding("nginx-ingress-role-nisa-binding") {
|
|
metadata+: env.metadata,
|
|
roleRef: {
|
|
apiGroup: "rbac.authorization.k8s.io",
|
|
kind: "Role",
|
|
name: env.role.metadata.name,
|
|
},
|
|
subjects: [
|
|
{
|
|
kind: "ServiceAccount",
|
|
name: env.sa.metadata.name,
|
|
namespace: env.sa.metadata.namespace,
|
|
},
|
|
],
|
|
},
|
|
|
|
service: kube.Service("ingress-nginx") {
|
|
metadata+: env.metadata,
|
|
target_pod:: env.deployment.spec.template,
|
|
spec+: {
|
|
type: "LoadBalancer",
|
|
ports: [
|
|
{ name: "ssh", port: 22, targetPort: 22, protocol: "TCP" },
|
|
{ name: "http", port: 80, targetPort: 80, protocol: "TCP" },
|
|
{ name: "https", port: 443, targetPort: 443, protocol: "TCP" },
|
|
],
|
|
},
|
|
},
|
|
|
|
serviceGitea: kube.Service("ingress-nginx-gitea") {
|
|
metadata+: env.metadata,
|
|
target_pod:: env.deployment.spec.template,
|
|
spec+: {
|
|
type: "LoadBalancer",
|
|
loadBalancerIP: "185.236.240.60",
|
|
ports: [
|
|
{ name: "ssh", port: 22, targetPort: 222, protocol: "TCP" },
|
|
{ name: "http", port: 80, targetPort: 80, protocol: "TCP" },
|
|
{ name: "https", port: 443, targetPort: 443, protocol: "TCP" },
|
|
],
|
|
},
|
|
},
|
|
|
|
deployment: kube.Deployment("nginx-ingress-controller") {
|
|
metadata+: env.metadata,
|
|
spec+: {
|
|
replicas: 5,
|
|
template+: {
|
|
spec+: {
|
|
serviceAccountName: env.sa.metadata.name,
|
|
containers_: {
|
|
controller: kube.Container("nginx-ingress-controller") {
|
|
image: cfg.image,
|
|
args: [
|
|
"/nginx-ingress-controller",
|
|
"--configmap=%s/%s" % [cfg.namespace, env.maps.configuration.metadata.name],
|
|
"--tcp-services-configmap=%s/%s" % [cfg.namespace, env.maps.tcp.metadata.name],
|
|
"--udp-services-configmap=%s/%s" % [cfg.namespace, env.maps.udp.metadata.name],
|
|
"--publish-service=%s/%s" % [cfg.namespace, env.service.metadata.name],
|
|
"--annotations-prefix=nginx.ingress.kubernetes.io",
|
|
],
|
|
env_: {
|
|
POD_NAME: kube.FieldRef("metadata.name"),
|
|
POD_NAMESPACE: kube.FieldRef("metadata.namespace"),
|
|
},
|
|
ports_: {
|
|
http: { containerPort: 80 },
|
|
https: { containerPort: 443 },
|
|
},
|
|
livenessProbe: {
|
|
failureThreshold: 3,
|
|
httpGet: {
|
|
path: "/healthz",
|
|
port: 10254,
|
|
scheme: "HTTP",
|
|
},
|
|
initialDelaySeconds: 10,
|
|
periodSeconds: 10,
|
|
successThreshold: 1,
|
|
timeoutSeconds: 10,
|
|
},
|
|
readinessProbe: {
|
|
failureThreshold: 3,
|
|
httpGet: {
|
|
path: "/healthz",
|
|
port: 10254,
|
|
scheme: "HTTP",
|
|
},
|
|
periodSeconds: 10,
|
|
successThreshold: 1,
|
|
timeoutSeconds: 10,
|
|
},
|
|
securityContext: {
|
|
allowPrivilegeEscalation: true,
|
|
capabilities: {
|
|
drop: ["ALL"],
|
|
add: ["NET_BIND_SERVICE"],
|
|
},
|
|
runAsUser: 33,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|