def test_to_replication(self) -> None: # Empty dicts return empty replication properties self.assertEqual(ReplicationProperty(), convert.to_replication({})) # None is returned as none self.assertEqual(None, convert.to_replication(None)) # None is returned as none invalid_input1 = {"invalid": 3, "Rack": 4} with self.assertRaises(KeyError): convert.to_replication(invalid_input1) valid_input = {"node": 3, "Rack": 4} expected = {} expected[LocationScope.RACK] = 4 expected[LocationScope.NODE] = 3 expected = ReplicationProperty(expected) self.assertEqual(expected, convert.to_replication(valid_input))
async def check_impact( shards: Optional[List[str]] = None, node_indexes: Optional[List[int]] = None, node_names: Optional[List[str]] = None, target_state: str = "disabled", # pyre-fixme[9]: safety_margin has type `Mapping[str, int]`; used as `None`. safety_margin: Mapping[str, int] = None, timeout: int = 600, skip_metadata_logs: bool = False, skip_internal_logs: bool = False, # pyre-fixme[9]: logs has type `List[int]`; used as `None`. logs: List[int] = None, short: bool = False, max_unavailable_storage_capacity_pct=25, max_unavailable_sequencing_capacity_pct=25, skip_capacity_checks=False, ): """ Return true if performing operations to the given shards will cause loss of read/write availability or data loss. """ if shards is None: shards = [] ctx = context.get_context() def _combine( cv: ClusterView, shards: Optional[List[str]] = None, node_names: Optional[List[str]] = None, node_indexes: Optional[List[int]] = None, ) -> Tuple[ShardID, ...]: shards = list(shards or []) node_names = list(node_names or []) node_indexes = list(node_indexes or []) shard_ids = parse_shards(shards) for nn in node_names: shard_ids.add(ShardID(node=cv.get_node_id(node_name=nn), shard_index=-1)) for ni in node_indexes: shard_ids.add(ShardID(node=NodeID(node_index=ni), shard_index=-1)) shard_ids_expanded = cv.expand_shards(shard_ids) return shard_ids_expanded if not ctx.is_connected(): cprint("LDShell must be connected to a cluster!", "red") return 1 cprint("Starting, this may take a while...", "yellow") # pyre-fixme[9]: target_state has type `str`; used as `ShardStorageState`. target_state = convert.to_storage_state(target_state) if skip_capacity_checks: max_unavailable_sequencing_capacity_pct = 100 max_unavailable_storage_capacity_pct = 100 async with ctx.get_cluster_admin_client() as client: try: cv = await get_cluster_view(client) # pyre-fixme[9]: shards has type `Optional[List[str]]`; used as # `Tuple[ShardID, ...]`. shards = _combine(cv, shards, node_names, node_indexes) req = CheckImpactRequest( # pyre-fixme[6]: Expected `Optional[Sequence[ShardID]]` for 1st # param but got `List[str]`. shards=shards, target_storage_state=target_state, log_ids_to_check=logs, abort_on_negative_impact=True, safety_margin=convert.to_replication(safety_margin), return_sample_size=20, check_metadata_logs=not skip_metadata_logs, check_internal_logs=not skip_internal_logs, max_unavailable_storage_capacity_pct=max_unavailable_storage_capacity_pct, max_unavailable_sequencing_capacity_pct=max_unavailable_sequencing_capacity_pct, ) response = await client.checkImpact(req) except OperationError as e: cprint( f"There was error during check execution, Status {e}, " "result is not known", "red", ) return 1 except texceptions.TransportError as e: cprint(f"Couldn't connect to the Admin Server: {e}", "red") return 1 delta = response.total_duration lines = [] if not response.impact: lines.append(colored("ALL GOOD.\n", "green")) lines.append(f"Total logs checked ({response.total_logs_checked}) in {delta}s") else: lines.append( colored(f"UNSAFE. Impact: {impacts_to_string(response.impact)}", "red") ) lines.append(f"Total logs checked ({response.total_logs_checked}) in {delta}s") print("\n".join(lines)) if not short: # pyre-fixme[6]: Expected `List[ShardID]` for 2nd param but got `List[str]`. print(check_impact_string(response, shards, target_state)) if not response.impact: return 0 return 1