Skip to content

Commit

Permalink
Merge pull request #116 from gboutry/fix/microceph-replicas
Browse files Browse the repository at this point in the history
Compute number of OSDs to decide number of replicas
  • Loading branch information
hemanthnakkina authored Feb 9, 2024
2 parents 5f27f4a + 9cce59d commit b1f7843
Show file tree
Hide file tree
Showing 4 changed files with 46 additions and 24 deletions.
2 changes: 1 addition & 1 deletion cloud/etc/deploy-microceph/variables.tf
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ variable "charm_microceph_config" {

variable "microceph_channel" {
description = "K8S channel to deploy, not the operator channel"
default = "latest/stable"
default = "reef/stable"
}

variable "machine_ids" {
Expand Down
26 changes: 19 additions & 7 deletions sunbeam-python/sunbeam/commands/microceph.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,22 @@ def microceph_questions():
}


async def list_disks(jhelper: JujuHelper, model: str, unit: str) -> tuple[dict, dict]:
"""Call list-disks action on an unit."""
LOG.debug("Running list-disks on : %r", unit)
action_result = await jhelper.run_action(unit, model, "list-disks")
LOG.debug(
"Result after running action list-disks on %r: %r",
unit,
action_result,
)
osds = ast.literal_eval(action_result.get("osds", "[]"))
unpartitioned_disks = ast.literal_eval(
action_result.get("unpartitioned-disks", "[]")
)
return osds, unpartitioned_disks


class DeployMicrocephApplicationStep(DeployMachineApplicationStep):
"""Deploy Microceph application using Terraform"""

Expand Down Expand Up @@ -165,14 +181,10 @@ def get_unpartitioned_disks(self) -> list:
unit = run_sync(
self.jhelper.get_unit_from_machine(APPLICATION, self.machine_id, MODEL)
)
LOG.debug(f"Running action list-disks on {unit.entity_id}")
action_result = run_sync(
self.jhelper.run_action(unit.entity_id, MODEL, "list-disks")
_, unpartitioned_disks = run_sync(
list_disks(self.jhelper, MODEL, unit.entity_id)
)
LOG.debug(f"Result after running action list-disks: {action_result}")

disks = ast.literal_eval(action_result.get("unpartitioned-disks", "[]"))
unpartitioned_disks = [disk.get("path") for disk in disks]
unpartitioned_disks = [disk.get("path") for disk in unpartitioned_disks]
# Remove duplicates if any
unpartitioned_disks = list(set(unpartitioned_disks))
if OSD_PATH_PREFIX in unpartitioned_disks:
Expand Down
26 changes: 17 additions & 9 deletions sunbeam-python/sunbeam/commands/openstack.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@
from lightkube.resources.core_v1 import Service
from rich.status import Status

import sunbeam.commands.microceph as microceph
from sunbeam.clusterd.client import Client
from sunbeam.clusterd.service import ConfigItemNotFoundException
from sunbeam.commands.juju import JujuStepHelper
from sunbeam.commands.microceph import APPLICATION as MICROCEPH_APPLICATION
from sunbeam.commands.microk8s import (
CREDENTIAL_SUFFIX,
MICROK8S_CLOUD,
Expand Down Expand Up @@ -117,10 +117,15 @@ def compute_ingress_scale(topology: str, control_nodes: int) -> int:
return control_nodes


def compute_ceph_replica_scale(topology: str, storage_nodes: int) -> int:
if topology == "single" or storage_nodes < 2:
return 1
return min(storage_nodes, 3)
def compute_ceph_replica_scale(osds: int) -> int:
return min(osds, 3)


async def _get_number_of_osds(jhelper: JujuHelper) -> int:
"""Fetch the number of osds from the microceph application"""
leader = await jhelper.get_leader_unit(microceph.APPLICATION, microceph.MODEL)
osds, _ = await microceph.list_disks(jhelper, microceph.MODEL, leader)
return len(osds)


class DeployControlPlaneStep(BaseStep, JujuStepHelper):
Expand Down Expand Up @@ -154,8 +159,11 @@ def get_storage_tfvars(self) -> dict:
tfvars = {}
storage_nodes = self.client.cluster.list_nodes_by_role("storage")
if storage_nodes:
tfvars["ceph-osd-replication-count"] = compute_ceph_replica_scale(
run_sync(_get_number_of_osds(self.jhelper))
)
tfvars["enable-ceph"] = True
tfvars["ceph-offer-url"] = f"{CONTROLLER_MODEL}.{MICROCEPH_APPLICATION}"
tfvars["ceph-offer-url"] = f"{CONTROLLER_MODEL}.{microceph.APPLICATION}"
else:
tfvars["enable-ceph"] = False

Expand Down Expand Up @@ -325,11 +333,11 @@ def run(self, status: Optional[Status] = None) -> Result:
"ha-scale": compute_ha_scale(topology, len(control_nodes)),
"os-api-scale": compute_os_api_scale(topology, len(control_nodes)),
"ingress-scale": compute_ingress_scale(topology, len(control_nodes)),
"enable-ceph": len(storage_nodes) > 0,
"ceph-offer-url": f"{CONTROLLER_MODEL}.{microceph.APPLICATION}",
"ceph-osd-replication-count": compute_ceph_replica_scale(
topology, len(storage_nodes)
run_sync(_get_number_of_osds(self.jhelper))
),
"enable-ceph": len(storage_nodes) > 0,
"ceph-offer-url": f"{CONTROLLER_MODEL}.{MICROCEPH_APPLICATION}",
}

self.update_status(status, "scaling services")
Expand Down
16 changes: 9 additions & 7 deletions sunbeam-python/tests/unit/sunbeam/commands/test_openstack.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ def __init__(self, methodName: str = "runTest") -> None:

def setUp(self):
self.jhelper = AsyncMock()
self.jhelper.run_action.return_value = {}
self.manifest = Mock()
self.client = Mock()

Expand Down Expand Up @@ -186,6 +187,7 @@ def setUp(self):
)
self.read_config.start()
self.jhelper = AsyncMock()
self.jhelper.run_action.return_value = {}
self.manifest = Mock()

def tearDown(self):
Expand Down Expand Up @@ -412,16 +414,16 @@ def test_compute_ingress_scale(topology, control_nodes, scale):


@pytest.mark.parametrize(
"topology,storage_nodes,scale",
"osds,scale",
[
("single", 1, 1),
("multi", 1, 1),
("multi", 9, 3),
("multi", 2, 2),
(1, 1),
(1, 1),
(9, 3),
(2, 2),
],
)
def test_compute_ceph_replica_scale(topology, storage_nodes, scale):
assert compute_ceph_replica_scale(topology, storage_nodes) == scale
def test_compute_ceph_replica_scale(osds, scale):
assert compute_ceph_replica_scale(osds) == scale


class TestReapplyOpenStackTerraformPlanStep(unittest.TestCase):
Expand Down

0 comments on commit b1f7843

Please sign in to comment.