def select_pool(self): query = { "cru": self.resources["cpu"], "mru": math.ceil(self.resources["memory"] / 1024), "sru": math.ceil(self.resources["disk_size"] / 1024), } cu, su = deployer.calculate_capacity_units(**query) self.pool_id = deployer.select_pool(self, cu=cu, su=su, **query)
def select_pool(self): query = { "cru": self.resources["cpu"], "mru": math.ceil(self.resources["memory"] / 1024), "sru": math.ceil(self.resources["disk_size"] / 1024), } if self.container_volume_attach: query["sru"] += math.ceil(self.vol_size / 1024) cu, su = deployer.calculate_capacity_units(**query) self.pool_id = deployer.select_pool(self, cu=cu, su=su, **query)
def extend_zdbs(name, pool_ids, solution_uuid, password, duration, size=10, wallet_name=None, nodes_ids=None, disk_type=DiskType.HDD): """ 1- create/extend pools with enough cloud units for the new zdbs 2- deploy a zdb with the same size and password for each wid 3- build the newly installed zdbs config 4- return wids, password """ description = j.data.serializers.json.dumps({"vdc_uuid": solution_uuid}) wallet_name = wallet_name or j.core.config.get("S3_AUTO_TOPUP_WALLET") wallet = j.clients.stellar.get(wallet_name) zos = get_zos() reservations = [] storage_query = {"hru": size} if disk_type == DiskType.SSD: storage_query = {"sru": size} pool_total_sus = defaultdict(int) for _, pool_id in enumerate(pool_ids): cloud_units = deployer.calculate_capacity_units(**storage_query) su = cloud_units.su pool_total_sus[pool_id] += su for pool_id, su in pool_total_sus.items(): su = su * duration pool_info = zos.pools.extend(pool_id, 0, su, 0) j.logger.info( f"AUTO TOPUP: extending pool {pool_id} with sus: {su}, reservation_id: {pool_info.reservation_id}" ) zos.billing.payout_farmers(wallet, pool_info) reservations.append({ "pool_id": pool_id, "reservation_id": pool_info.reservation_id }) for reservation in reservations: if not wait_pool_reservation(reservation["reservation_id"]): j.logger.warning( f"pool {reservation['pool_id']} extension timedout for reservation: {reservation['reservation_id']}" ) continue gs = GlobalScheduler() wids = [] for pool_id in pool_ids: nodes_generator = gs.nodes_by_capacity(pool_id=pool_id, ip_version="IPv6", **storage_query) if nodes_ids: nodes_generator = list(nodes_generator) nodes_generator_ids = [node.node_id for node in nodes_generator] unavailable_nodes_ids = set(nodes_ids) - set(nodes_generator_ids) if unavailable_nodes_ids: raise j.exceptions.Validation( f"Some nodes: {unavailable_nodes_ids} are not in the farm or don't have capacity" ) nodes_generator = [ node for node in nodes_generator if node.node_id in nodes_ids ] for node in nodes_generator: wid = deployer.deploy_zdb( pool_id=pool_id, node_id=node.node_id, size=size, disk_type=disk_type, mode=ZDBMode.Seq, password=password, form_info={"chatflow": "minio"}, name=name, solution_uuid=solution_uuid, description=description, ) try: success = deployer.wait_workload(wid, cancel_by_uuid=False) if not success: raise DeploymentFailed() wids.append(wid) j.logger.info( f"AUTO TOPUP: ZDB workload {wid} deployed successfully") break except DeploymentFailed: j.logger.error( f"AUTO TOPUP: ZDB workload {wid} failed to deploy") continue return wids, password