Merge "cluster/rook: bump to 1.1.9"

changes/07/307/1
q3k 2020-05-11 18:16:24 +00:00 committed by Gerrit Code Review
commit 1743a6b4f3
2 changed files with 616 additions and 114 deletions

View File

@ -3,12 +3,14 @@
local kube = import "../../../kube/kube.libsonnet";
local policies = import "../../../kube/policies.libsonnet";
local oa = kube.OpenAPI;
{
Operator: {
local env = self,
local cfg = env.cfg,
cfg:: {
image: "rook/ceph:v1.0.6",
image: "rook/ceph:v1.1.9",
namespace: "rook-ceph-system",
},
@ -25,106 +27,135 @@ local policies = import "../../../kube/policies.libsonnet";
policyInsecure: policies.AllowNamespaceInsecure(cfg.namespace),
crds: {
cephclusters: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephCluster") {
# BUG: cannot control this because of:
# ERROR Error updating customresourcedefinitions cephclusters.ceph.rook.io: expected kind, but got map
# TODO(q3k): debug and fix kubecfg (it's _not_ just https://github.com/bitnami/kubecfg/issues/259 )
cephclusters:: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephCluster") {
spec+: {
additionalPrinterColumns: [
{ name: "DataDirHostPath", type: "string", description: "Directory used on the K8s nodes", JSONPath: ".spec.dataDirHostPath" },
{ name: "MonCount", type: "string", description: "Number of MONs", JSONPath: ".spec.mon.count" },
{ name: "Age", type: "date", JSONPath: ".metadata.creationTimestamp" },
{ name: "State", type: "string", description: "Current State", JSONPath: ".status.state" },
{ name: "Health", type: "string", description: "Ceaph Health", JSONPath: ".status.ceph.health" },
],
validation: {
# Converted from official operator YAML
"openAPIV3Schema": {
"properties": {
"spec": {
"properties": {
"cephVersion": {
"properties": {
"allowUnsupported": {
"type": "boolean"
},
"image": {
"type": "string"
},
"name": {
"pattern": "^(luminous|mimic|nautilus)$",
"type": "string"
}
}
},
"dashboard": {
"properties": {
"enabled": {
"type": "boolean"
},
"urlPrefix": {
"type": "string"
},
"port": {
"type": "integer"
}
}
},
"dataDirHostPath": {
"pattern": "^/(\\S+)",
"type": "string"
},
"mon": {
"properties": {
"allowMultiplePerNode": {
"type": "boolean"
},
"count": {
"maximum": 9,
"minimum": 1,
"type": "integer"
},
"preferredCount": {
"maximum": 9,
"minimum": 0,
"type": "integer"
}
},
"required": [
"count"
]
},
"network": {
"properties": {
"hostNetwork": {
"type": "boolean"
}
}
},
"storage": {
"properties": {
"nodes": {
"items": {},
"type": "array"
},
"useAllDevices": {},
"useAllNodes": {
"type": "boolean"
}
}
}
validation: oa.Validation(oa.Dict {
spec: oa.Dict {
annotations: oa.Any,
cephVersion: oa.Dict {
allowUnsupported: oa.Boolean,
image: oa.String,
},
dashboard: oa.Dict {
enabled: oa.Boolean,
urlPrefix: oa.String,
port: oa.Integer { minimum: 0, maximum: 65535 },
ssl: oa.Boolean,
},
dataDirHostPath: oa.String { pattern: "^/(\\S+)" },
skipUpgradeChecks: oa.Boolean,
mon: oa.Dict {
allowMultiplePerNode: oa.Boolean,
count: oa.Integer { minimum: 0, maximum: 9 },
preferredCount: oa.Integer { minimum: 0, maximum: 9 },
},
mgr: oa.Dict {
modules: oa.Array(oa.Dict {
name: oa.String,
enabled: oa.Boolean,
}),
},
network: oa.Dict {
hostNetwork: oa.Boolean,
},
storage: oa.Dict {
disruptionManagement: oa.Dict {
managePodBudgets: oa.Boolean,
osdMaintenanceTimeout: oa.Integer,
manageMachineDisruptionBudgets: oa.Boolean,
},
useAllNodes: oa.Boolean,
nodes: oa.Array(oa.Dict {
name: oa.String,
config: oa.Dict {
metadataDevice: oa.String,
storeType: oa.String { pattern: "^(filestore|bluestore)$" },
databaseSizeMB: oa.String,
walSizeMB: oa.String,
journalSizeMB: oa.String,
osdsPerDevice: oa.String,
encryptedDevice: oa.String { pattern: "^(true|false)$" },
},
"required": [
"mon"
]
}
}
}
}
useAllDevices: oa.Boolean,
deviceFilter: oa.Any,
directories: oa.Array(oa.Dict {
path: oa.String,
}),
devices: oa.Array(oa.Dict {
name: oa.String,
}),
location: oa.Any,
resources: oa.Any,
}),
useAllDevices: oa.Boolean,
deviceFilter: oa.Any,
location: oa.Any,
directories: oa.Array(oa.Dict {
path: oa.String,
}),
config: oa.Any,
topologyAware: oa.Boolean,
},
monitoring: oa.Dict {
enabled: oa.Boolean,
rulesNamespace: oa.String,
},
rbdMirroring: oa.Dict {
workers: oa.Integer,
},
placement: oa.Any,
resources: oa.Any,
},
}),
},
},
cephfilesystems: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephFilesystem") {
spec+: {
additionalPrinterColumns: [
{ name: "MdsCount", type: "string", description: "Number of MDs", JSONPath: ".spec.metadataServer.activeCount" },
{ name: "ActiveMDS", type: "string", description: "Number of desired active MDS daemons", JSONPath: ".spec.metadataServer.activeCount" },
{ name: "Age", type: "date", JSONPath: ".metadata.creationTimestamp" },
],
validation: oa.Validation(oa.Dict {
spec: oa.Dict {
metadataServer: oa.Dict {
activeCount: oa.Integer,
activeStandby: oa.Boolean,
annotations: oa.Any,
placement: oa.Any,
resources: oa.Any,
},
metadataPool: oa.Dict {
failureDomain: oa.String,
replicated: oa.Dict {
size: oa.Integer,
},
erasureCoded: oa.Dict {
dataChunks: oa.Integer,
codingChunks: oa.Integer,
},
},
dataPools: oa.Array(oa.Dict {
failureDomain: oa.String,
replicated: oa.Dict {
site: oa.Integer,
erasureCoded: oa.Dict {
dataChunks: oa.Integer,
codingChunks: oa.Integer,
},
},
})
},
}),
},
},
cephnfses: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephNFS") {
@ -133,9 +164,52 @@ local policies = import "../../../kube/policies.libsonnet";
plural: "cephnfses",
shortNames: ["nfs"],
},
validation: oa.Validation(oa.Dict {
spec: oa.Dict {
rados: oa.Dict {
pool: oa.String,
namespace: oa.String,
},
server: oa.Dict {
active: oa.Integer,
annotations: oa.Any,
placement: oa.Any,
resources: oa.Any,
},
},
}),
},
},
cephobjectstores: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephObjectStore") {
spec+: {
validation: oa.Validation(oa.Dict {
spec: oa.Dict {
gateway: oa.Dict {
type: oa.String,
sslCertificateRef: oa.Any,
port: oa.Integer,
securePort: oa.Any,
instances: oa.Integer,
annotations: oa.Any,
placement: oa.Any,
resources: oa.Any,
},
local poolDef = oa.Dict {
failureDomain: oa.String,
replicated: oa.Dict {
size: oa.Integer,
},
erasureCoded: oa.Dict {
dataChunks: oa.Integer,
codingChunks: oa.Integer,
},
},
metadataPool: poolDef,
dataPool: poolDef,
},
}),
},
},
cephobjectstores: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephObjectStore"),
cephobjectstoreusers: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephObjectStoreUser"),
cephblockpools: kube.CustomResourceDefinition("ceph.rook.io", "v1", "CephBlockPool"),
volumes: kube.CustomResourceDefinition("rook.io", "v1alpha2", "Volume") {
@ -145,10 +219,41 @@ local policies = import "../../../kube/policies.libsonnet";
},
},
},
objectbuckets: kube.CustomResourceDefinition("objectbucket.io", "v1alpha1", "ObjectBucket") {
spec+: {
names+: {
shortNames: ["ob", "obs"],
},
scope: "Cluster",
subresources: { status: {} },
},
},
objectbucketclaims: kube.CustomResourceDefinition("objectbucket.io", "v1alpha1", "ObjectBucketClaim") {
spec+: {
names+: {
shortNames: ["obc", "obcs"],
},
subresources: { status: {} },
},
},
},
sa: kube.ServiceAccount("rook-ceph-system") {
metadata+: env.metadata,
sa: {
system: kube.ServiceAccount("rook-ceph-system") {
metadata+: env.metadata,
},
csiCephfsPlugin: kube.ServiceAccount("rook-csi-cephfs-plugin-sa") {
metadata+: env.metadata,
},
csiCephfsProvisioner: kube.ServiceAccount("rook-csi-cephfs-provisioner-sa") {
metadata+: env.metadata,
},
csiRbdPlugin: kube.ServiceAccount("rook-csi-rbd-plugin-sa") {
metadata+: env.metadata,
},
csiRbdProvisioner: kube.ServiceAccount("rook-csi-rbd-provisioner-sa") {
metadata+: env.metadata,
},
},
crs: {
@ -183,7 +288,7 @@ local policies = import "../../../kube/policies.libsonnet";
{
apiGroups: ["storage.k8s.io"],
resources: ["storageclasses"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["batch"],
@ -200,46 +305,356 @@ local policies = import "../../../kube/policies.libsonnet";
resources: ["*"],
verbs: ["*"],
},
{
apiGroups: ["policy", "apps"],
resources: ["poddisruptionbudgets", "deployments"],
verbs: ["*"],
},
],
},
// Upstream rook uses split ClusterRoles, with the 'main' role (eg rook-ceph-cluster-mgmt)
// using aggregationRules to point to a '-rules' role (eg rook-ceph-cluster-mgmt-rules) which
// contains the actual role rules. This was done to permit for a bettr upgrade experience on
// systems that only allow for a recreation of a clusterroles (see https://github.com/rook/rook/issues/2634
// for more background information).
// We do not use this split because our update mechanism is not broken. However, it seems
// that Rook started to use these split rules for other reasons, too. For instance, the
// mgr-cluster role in upstream not only aggregates its equivalent -rules role, but also
// the rook-ceph-object-bucket role. As such, we split mgr-cluster as they do in upstream.
// In the future, we may split the rest of the roles in order to stay consisdent with upsteam.
mgrCluster: kube.ClusterRole("rook-ceph-mgr-cluster") {
metadata+: env.metadata { namespace:: null },
aggregationRule: {
clusterRoleSelectors: [
{ matchLabels: { "rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster": "true" }},
],
},
},
mgrClusterRules: kube.ClusterRole("rook-ceph-mgr-cluster-rules") {
metadata+: env.metadata {
namespace:: null,
labels+: {
"rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster": "true",
},
},
rules: [
{
apiGroups: [""],
resources: ["configmaps", "nodes", "nodes/proxy"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["create", "patch", "list", "get", "watch"],
},
]
},
objectBucket: kube.ClusterRole("rook-ceph-object-bucket") {
metadata+: env.metadata {
namespace:: null,
labels+: {
"rbac.ceph.rook.io/aggregate-to-rook-ceph-mgr-cluster": "true",
},
},
rules: [
{
apiGroups: [""],
resources: ["secrets", "configmaps"],
verbs: ["*"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["storageclasses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["objectbucket.io"],
resources: ["*"],
verbs: ["*"],
},
],
},
cephfsCSINodeplugin: kube.ClusterRole("cephfs-csi-nodeplugin") {
metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get", "list", "update"],
},
{
apiGroups: [""],
resources: ["namespaces"],
verbs: ["get", "list"],
},
{
apiGroups: [""],
resources: ["persistentvolumes"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["volumeattachments"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get", "list"],
},
],
},
cephfsExternalProvisionerRunner: kube.ClusterRole("cephfs-external-provisioner-runner") {
metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
resources: ["secrets"],
verbs: ["get", "list"],
},
{
apiGroups: [""],
resources: ["persistentvolumes"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
{
apiGroups: [""],
resources: ["persistentvolumeclaims"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["storageclasses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["list", "watch", "create", "update", "patch"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["volumeattachments"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get", "list", "watch"],
},
],
},
rbdCSINodeplugin: kube.ClusterRole("rbd-csi-nodeplugin") {
metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
resources: ["secrets"],
verbs: ["get", "list"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get", "list", "update"],
},
{
apiGroups: [""],
resources: ["namespaces"],
verbs: ["get", "list"],
},
{
apiGroups: [""],
resources: ["persistentvolumes"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["volumeattachments"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get", "list"],
},
],
},
rbdExternalProvisionerRunner: kube.ClusterRole("rbd-external-provisioner-runner") {
metadata+: env.metadata { namespace:: null },
rules: [
{
apiGroups: [""],
resources: ["secrets"],
verbs: ["get", "list"],
},
{
apiGroups: [""],
resources: ["persistentvolumes"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
{
apiGroups: [""],
resources: ["persistentvolumeclaims"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["volumeattachments"],
verbs: ["get", "list", "watch", "update"],
},
{
apiGroups: [""],
resources: ["nodes"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["storage.k8s.io"],
resources: ["storageclasses"],
verbs: ["get", "list", "watch"]
},
{
apiGroups: [""],
resources: ["events"],
verbs: ["list", "watch", "create", "update", "patch"],
},
{
apiGroups: ["snapshot.storage.k8s.io"],
resources: ["volumesnapshotcontents"],
verbs: ["create", "get", "list", "watch", "update", "delete"],
},
{
apiGroups: ["snapshot.storage.k8s.io"],
resources: ["volumesnapshotclasses"],
verbs: ["get", "list", "watch"],
},
{
apiGroups: ["apiextensions.k8s.io"],
resources: ["customresourcedefinitions"],
verbs: ["create", "list", "watch", "delete", "get", "update"],
},
{
apiGroups: ["snapshot.storage.k8s.io"],
resources: ["volumesnapshots/status"],
verbs: ["update"],
},
],
},
},
crb: kube.ClusterRoleBinding("ceph-rook-global") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.global,
subjects_: [env.sa],
crbs: {
global: kube.ClusterRoleBinding("ceph-rook-global") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.global,
subjects_: [env.sa.system],
},
objectBucket: kube.ClusterRoleBinding("rook-ceph-object-bucket") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.objectBucket,
subjects_: [env.sa.system],
},
cephfsCSINodeplugin: kube.ClusterRoleBinding("cepfs-csi-nodeplugin") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.cephfsCSINodeplugin,
subjects_: [env.sa.csiCephfsPlugin],
},
cephfsCSIProvisioner: kube.ClusterRoleBinding("cephfs-csi-provisioner") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.cephfsExternalProvisionerRunner,
subjects_: [env.sa.csiCephfsProvisioner],
},
rbdCSINodeplugin: kube.ClusterRoleBinding("rbd-csi-nodeplugin") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.rbdCSINodeplugin,
subjects_: [env.sa.csiRbdPlugin],
},
rbdCSIProvisioner: kube.ClusterRoleBinding("rbd-csi-provisioner") {
metadata+: env.metadata { namespace:: null },
roleRef_: env.crs.rbdExternalProvisionerRunner,
subjects_: [env.sa.csiRbdProvisioner],
},
},
role: kube.Role("ceph-rook-system") {
metadata+: env.metadata,
rules: [
{
apiGroups: [""],
resources: ["pods", "configmaps"],
verbs: ["get", "list", "watch", "patch", "create", "update", "delete"],
},
{
apiGroups: ["apps"],
resources: ["daemonsets"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
],
roles: {
system: kube.Role("ceph-rook-system") {
metadata+: env.metadata,
rules: [
{
apiGroups: [""],
resources: ["pods", "configmaps", "services"],
verbs: ["get", "list", "watch", "patch", "create", "update", "delete"],
},
{
apiGroups: ["apps"],
resources: ["deployments", "statefulsets", "daemonsets"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
],
},
cephfsExternalProvisioner: kube.Role("cephfs-external-provisioner-cfg") {
metadata+: env.metadata,
rules: [
{
apiGroups: [""],
resources: ["endpoints"],
verbs: ["get", "watch", "list", "delete", "update", "create"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get", "list", "create", "delete"],
},
{
apiGroups: ["coordination.k8s.io"],
resources: ["leases"],
verbs: ["get" ,"watch", "list", "delete", "update", "create"],
},
],
},
rbdExternalProvisioner: kube.Role("rbd-external-provisioner-cfg") {
metadata+: env.metadata,
rules: [
{
apiGroups: [""],
resources: ["endpoints"],
verbs: ["get", "watch", "list", "delete", "update", "create"],
},
{
apiGroups: [""],
resources: ["configmaps"],
verbs: ["get", "list", "watch", "create", "delete"],
},
{
apiGroups: ["coordination.k8s.io"],
resources: ["leases"],
verbs: ["get" ,"watch", "list", "delete", "update", "create"],
},
],
},
},
rb: kube.RoleBinding("ceph-rook-system") {
metadata+: env.metadata,
roleRef_: env.role,
subjects_: [env.sa],
rbs: {
system: kube.RoleBinding("ceph-rook-system") {
metadata+: env.metadata,
roleRef_: env.roles.system,
subjects_: [env.sa.system],
},
cephfsCSIProvisioner: kube.RoleBinding("cephfs-csi-provisioner-role-cfg") {
metadata+: env.metadata,
roleRef_: env.roles.cephfsExternalProvisioner,
subjects_: [env.sa.csiCephfsProvisioner],
},
rbdCSIProvisioner: kube.RoleBinding("rbd-csi-provisioner-role-cfg") {
metadata+: env.metadata,
roleRef_: env.roles.rbdExternalProvisioner,
subjects_: [env.sa.csiRbdProvisioner],
},
},
operator: kube.Deployment("rook-ceph-operator") {
@ -247,7 +662,7 @@ local policies = import "../../../kube/policies.libsonnet";
spec+: {
template+: {
spec+: {
serviceAccountName: env.sa.metadata.name,
serviceAccountName: env.sa.system.metadata.name,
containers_: {
operator: kube.Container("rook-ceph-operator") {
image: cfg.image,
@ -269,6 +684,7 @@ local policies = import "../../../kube/policies.libsonnet";
NODE_NAME: kube.FieldRef("spec.nodeName"),
POD_NAME: kube.FieldRef("metadata.name"),
POD_NAMESPACE: kube.FieldRef("metadata.namespace"),
ROOK_CSI_KUBELET_DIR_PATH: "/var/lib/kubernetes"
},
},
},
@ -304,6 +720,9 @@ local policies = import "../../../kube/policies.libsonnet";
mgr: kube.ServiceAccount("rook-ceph-mgr") {
metadata+: cluster.metadata,
},
cmdReporter: kube.ServiceAccount("rook-ceph-cmd-reporter") {
metadata+: cluster.metadata,
},
},
roles: {
@ -337,6 +756,16 @@ local policies = import "../../../kube/policies.libsonnet";
},
],
},
cmdReporter: kube.Role(cluster.name("cmd-reporter")) {
metadata+: cluster.metadata,
rules: [
{
apiGroups: [""],
resources: ["pods", "configmaps"],
verbs: ["get", "list", "watch", "create", "update", "delete"],
},
],
},
mgrSystem: kube.ClusterRole(cluster.name("mgr-system")) {
metadata+: cluster.metadata { namespace:: null },
rules: [
@ -357,9 +786,10 @@ local policies = import "../../../kube/policies.libsonnet";
},
for el in [
// Allow Operator SA to perform Cluster Mgmt in this namespace.
{ name: "cluster-mgmt", role: operator.crs.clusterMgmt, sa: operator.sa },
{ name: "cluster-mgmt", role: operator.crs.clusterMgmt, sa: operator.sa.system },
{ name: "osd", role: cluster.roles.osd, sa: cluster.sa.osd },
{ name: "mgr", role: cluster.roles.mgr, sa: cluster.sa.mgr },
{ name: "cmd-reporter", role: cluster.roles.cmdReporter, sa: cluster.sa.cmdReporter },
{ name: "mgr-cluster", role: operator.crs.mgrCluster, sa: cluster.sa.mgr },
]
],

View File

@ -24,4 +24,76 @@ kube {
// secure way.
secret_name:: "rook-ceph-object-user-%s-%s" % [user.spec.store, user.spec.displayName],
},
// Make OpenAPI v3 schema specification less painful.
OpenAPI:: {
Validation(obj):: {
openAPIV3Schema: obj.render,
},
Dict:: {
local dict = self,
required:: false,
local requiredList = [
k for k in std.filter(function(k) dict[k].required, std.objectFields(dict))
],
render:: {
properties: {
[k]: dict[k].render
for k in std.objectFields(dict)
},
} + (if std.length(requiredList) > 0 then {
required: requiredList,
} else {}),
},
Array(items):: {
required:: false,
render:: {
type: "array",
items: items.render,
},
},
Integer:: {
local integer = self,
required:: false,
render:: {
type: "integer",
} + (if integer.minimum != null then {
minimum: integer.minimum,
} else {}) + (if integer.maximum != null then {
maximum: integer.maximum,
} else {}),
minimum:: null,
maximum:: null,
},
String:: {
local string = self,
required:: false,
render:: {
type: "string",
} + (if string.pattern != null then {
pattern: string.pattern,
} else {}),
pattern:: null,
},
Boolean:: {
required:: false,
render:: {
type: "boolean",
},
},
Any:: {
required:: false,
render:: {},
},
},
}