1
0
Fork 0

bazel-cache: deploy, add waw-hdd-yolo-1 ceph pool

master
q3k 2019-05-17 18:08:48 +02:00
parent a4b3767455
commit 36cc4fb61a
4 changed files with 138 additions and 0 deletions

View File

@ -0,0 +1,108 @@
# bazel-cache.k0.hswaw.net, a Bazel Cache based on buchgr/bazel-remote.
# Once https://github.com/bazelbuild/bazel/pull/4889 gets merged, this will
# likely be replaced with just an Rados GW instance.
local kube = import "../../kube/kube.libsonnet";
{
local app = self,
local cfg = app.cfg,
cfg:: {
namespace: "bazel-cache",
domain: "k0.hswaw.net",
storageClassName: "waw-hdd-yolo-1",
},
metadata(component):: {
namespace: cfg.namespace,
labels: {
"app.kubernetes.io/name": "bazel-cache",
"app.kubernetes.io/managed-by": "kubecfg",
"app.kubernetes.io/component": component,
},
},
namespace: kube.Namespace(cfg.namespace),
volumeClaim: kube.PersistentVolumeClaim("bazel-cache-storage") {
metadata+: app.metadata("bazel-cache-storage"),
spec+: {
storageClassName: cfg.storageClassName,
accessModes: [ "ReadWriteOnce" ],
resources: {
requests: {
storage: "40Gi",
},
},
},
},
deployment: kube.Deployment("bazel-remote") {
metadata+: app.metadata("bazel-cache"),
spec+: {
replicas: 1,
template+: {
spec+: {
volumes_: {
data: kube.PersistentVolumeClaimVolume(app.volumeClaim),
},
containers_: {
auth: kube.Container("bazel-remote") {
image: "buchgr/bazel-remote-cache",
volumeMounts_: {
data: { mountPath: "/data" },
},
ports_: {
http: {
containerPort: 8080,
protocol: "TCP",
},
},
},
},
},
},
},
},
service: kube.Service("bazel-cache") {
metadata+: app.metadata("bazel-cache"),
target_pod:: app.deployment.spec.template,
spec+: {
type: "ClusterIP",
ports: [
{ name: "http", port: 8080, targetPort: 8080, protocol: "TCP" },
],
}
},
ingress: kube.Ingress("bazel-cache") {
metadata+: app.metadata("bazel-cache") {
annotations+: {
"kubernetes.io/tls-acme": "true",
"certmanager.k8s.io/cluster-issuer": "letsencrypt-prod",
"nginx.ingress.kubernetes.io/backend-protocol": "HTTP",
"nginx.ingress.kubernetes.io/proxy-body-size": "0",
},
},
spec+: {
tls: [
{
hosts: ["bazel-cache.%s" % [cfg.domain]],
secretName: "bazel-cache-tls",
},
],
rules: [
{
host: "bazel-cache.%s" % [cfg.domain],
http: {
paths: [
{ path: "/", backend: app.service.name_port },
]
},
}
],
},
},
}

View File

@ -34,6 +34,7 @@ HDDs on bc01n0{1-3}. 3TB total capacity.
The following storage classes use this cluster:
- `waw-hdd-redundant-1` - erasure coded 2.1
- `waw-hdd-yolo-1` - unreplicated (you _will_ lose your data)
- `waw-hdd-redundant-1-object` - erasure coded 2.1 object store
A dashboard is available at https://ceph-waw1.hswaw.net/, to get the admin password run:

View File

@ -122,6 +122,15 @@ local Cluster(fqdn) = {
},
},
},
// yolo block storage (no replicas!)
cephWaw1Yolo: rook.ReplicatedBlockPool(cluster.cephWaw1, "waw-hdd-yolo-1") {
spec: {
failureDomain: "host",
replicated: {
size: 1,
},
},
},
cephWaw1Object: rook.S3ObjectStore(cluster.cephWaw1, "waw-hdd-redundant-1-object") {
spec: {
metadataPool: {

View File

@ -469,6 +469,26 @@ local kube = import "../../../kube/kube.libsonnet";
}
},
ReplicatedBlockPool(cluster, name):: {
local pool = self,
spec:: error "spec must be specified",
pool: kube._Object("ceph.rook.io/v1", "CephBlockPool", name) {
metadata+: cluster.metadata,
spec: pool.spec,
},
storageClass: kube.StorageClass(name) {
provisioner: "ceph.rook.io/block",
parameters: {
blockPool: pool.pool.metadata.name,
clusterNamespace: pool.pool.metadata.namespace,
fstype: "ext4",
},
reclaimPolicy: "Retain",
},
},
ECBlockPool(cluster, name):: {
local pool = self,
spec:: error "spec must be specified",