def analyze(config: Dict, job_id: str, wide: bool = False) -> None: if not wide: try: _, columns_str = os.popen("stty size", "r").read().split() except Exception: columns_str = "120" columns = int(columns_str) else: columns = 120 ctx = DefaultContextHandler("[demand-cli]") register_result_handler(ctx) ge_env = environment.from_qconf(config) ge_driver = autoscaler.new_driver(config, ge_env) config = ge_driver.preprocess_config(config) autoscaler.calculate_demand(config, ge_env, ge_driver, ctx) key = "[job {}]".format(job_id) results = ctx.by_context[key] for result in results: if isinstance(result, (EarlyBailoutResult, MatchResult)) and result: continue if isinstance(result, HostgroupConstraint) and not result: continue if wide: print(result.message) else: print(result.message[:columns])
def common_cluster( qsub_commands: List[str], previous_dcalc: Optional[DemandCalculator] = None, ) -> DemandCalculator: ge_env = common_ge_env(previous_dcalc) # allq = new_gequeue("all.q", "@allhosts", ["make"], [], complexes=complexes, parallel_envs=pes) qsub = mock_driver.MockQsub(ge_env) for qsub_cmd in qsub_commands: qsub.qsub(qsub_cmd) jobs = qsub.parse_jobs() def _bindings() -> MockClusterBinding: if previous_dcalc: return previous_dcalc.node_mgr.cluster_bindings mock_bindings = MockClusterBinding() mock_bindings.add_nodearray("hpc", {}, max_placement_group_size=5) mock_bindings.add_bucket("hpc", "Standard_F4", 100, 100) mock_bindings.add_nodearray("htc", {}, max_count=10) mock_bindings.add_bucket("htc", "Standard_F4", 10, 10) return mock_bindings mdriver = mock_driver.MockGridEngineDriver(ge_env) ge_env.jobs.extend(jobs) config = mdriver.preprocess_config(mock_config(_bindings())) return autoscaler.calculate_demand( config, ge_env, mdriver, CONTEXT, node_history=NullNodeHistory(), )
def demand( config: Dict, jobs: Optional[str] = None, scheduler_nodes: Optional[str] = None, output_columns: Optional[List[str]] = None, output_format: Optional[str] = None, ) -> None: """Runs autoscale in dry run mode to see the demand for new nodes""" logging.debug("Begin demand") ctx = DefaultContextHandler("[demand-cli]") register_result_handler(ctx) ge_env = environment.from_qconf(config) ge_driver = autoscaler.new_driver(config, ge_env) config = ge_driver.preprocess_config(config) demand_calc = autoscaler.calculate_demand(config, ge_env, ge_driver, ctx) demand_result = demand_calc.finish() autoscaler.print_demand(config, demand_result, output_columns, output_format) logging.debug("End demand")
def _find_nodes( config: Dict, hostnames: List[str], node_names: List[str] ) -> Tuple[GridEngineDriver, DemandCalculator, List[Node]]: hostnames = hostnames or [] node_names = node_names or [] ge_env = environment.from_qconf(config) ge_driver = autoscaler.new_driver(config, ge_env) demand_calc = autoscaler.calculate_demand(config, ge_env, ge_driver) demand_result = demand_calc.finish() by_hostname = partition_single( demand_result.compute_nodes, lambda n: n.hostname_or_uuid.lower() ) by_node_name = partition_single( demand_result.compute_nodes, lambda n: n.name.lower() ) found_nodes = [] for hostname in hostnames: if not hostname: error("Please specify a hostname") if hostname.lower() not in by_hostname: # it doesn't exist in CC, but we still want to delete it # from the cluster by_hostname[hostname.lower()] = SchedulerNode(hostname, {}) found_nodes.append(by_hostname[hostname.lower()]) for node_name in node_names: if not node_name: error("Please specify a node_name") if node_name.lower() not in by_node_name: error( "Could not find a CycleCloud node that has node_name %s." + " Run 'nodes' to see available nodes.", node_name, ) found_nodes.append(by_node_name[node_name.lower()]) return ge_driver, demand_calc, found_nodes