From 793ca1b3b2c568d2165fefa1680247a1c72047d5 Mon Sep 17 00:00:00 2001 From: Serge Bazanski Date: Sun, 7 Mar 2021 00:07:19 +0000 Subject: [PATCH] cluster/kube: limit OSDs in ceph-waw3 to 8GB RAM Each OSD is connected to a 6TB drive, and with the good ol' 1TB storage -> 1GB RAM rule of thumb for OSDs, we end up with 6GB. Or, to round up, 8GB. I'm doing this because over the past few weeks OSDs in ceph-waw3 have been using a _ton_ of RAM. This will probably not prevent that (and instead they wil OOM more often :/), but it at will prevent us from wasting resources (k0 started migrating pods to other nodes, and running full nodes like that without an underlying request makes for a terrible draining experience). We need to get to the bottom of why this is happening in the first place, though. Did this happen as we moved to containerd? Followup: b.hswaw.net/29 Already deployed to production. Change-Id: I98df63763c35017eb77595db7b9f2cce71756ed1 --- cluster/kube/k0.libsonnet | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/cluster/kube/k0.libsonnet b/cluster/kube/k0.libsonnet index 8d7d49fa..2ba60402 100644 --- a/cluster/kube/k0.libsonnet +++ b/cluster/kube/k0.libsonnet @@ -97,6 +97,19 @@ local rook = import "lib/rook.libsonnet"; count: 1, allowMultiplePerNode: false, }, + resources: { + osd: { + requests: { + cpu: "2" + memory: "6G" + }, + limits: { + cpu: "2" + memory: "8G" + }, + }, + + }, storage: { useAllNodes: false, useAllDevices: false,