def run_simrd(g, heuristic, budgets, liveness): logger.info('Evaluating simrd ({}), liveness {}...'.format( type(heuristic).__name__, 'enabled' if liveness else 'disabled' )) futures = [] remote_simrd = ray.remote(num_cpus=NUM_ILP_CORES)(solve_simrd).remote for b in budgets: future = remote_simrd( g, b, heuristic=heuristic, runtime=RuntimeV2EagerOptimized, thrash=2.0, liveness=liveness ) futures.append(future) results = get_futures(futures, desc='simrd ({})'.format(type(heuristic).__name__)) return results
solve_checkpoint_last_node(g) ] result_dict[SolveStrategy.CHEN_SQRTN_NOAP] = [solve_chen_sqrtn(g, False)] result_dict[SolveStrategy.CHEN_SQRTN] = [solve_chen_sqrtn(g, True)] # sweep chen's greedy baseline logger.info("Running Chen's greedy baseline (APs only)") chen_sqrtn_noap = result_dict[SolveStrategy.CHEN_SQRTN_NOAP][0] greedy_eval_points = chen_sqrtn_noap.schedule_aux_data.activation_ram * ( 1.0 + np.arange(-1, 2, 0.01)) remote_solve_chen_greedy = ray.remote(num_cpus=1)(solve_chen_greedy).remote futures = [ remote_solve_chen_greedy(g, float(b), False) for b in greedy_eval_points ] result_dict[SolveStrategy.CHEN_GREEDY] = get_futures( list(futures), desc="Greedy (APs only)") if model_name not in CHAIN_GRAPH_MODELS: logger.info( "Running Chen's greedy baseline (no AP) as model is non-linear") futures = [ remote_solve_chen_greedy(g, float(b), True) for b in greedy_eval_points ] result_dict[SolveStrategy.CHEN_SQRTN_NOAP] = get_futures( list(futures), desc="Greedy (No AP)") # sweep griewank baselines logger.error( "Skipping Griewank baselines as it was broken in parasj/checkmate#65") # if model_name in CHAIN_GRAPH_MODELS: # logger.info(f"Running Griewank baseline (APs only)")
remote_solve_chen_greedy(g, float(b), False) for b in greedy_eval_points ]) futures.extend([ remote_solve_chen_greedy(g, float(b), True) for b in greedy_eval_points ]) # # sweep griewank baselines # if model_name in CHAIN_GRAPH_MODELS: # solve_griewank(g, 1) # prefetch griewank solution from s3, otherwise ray will cause race condition # griewank_eval_points = range(1, g.size + 1) # remote_solve_griewank = ray.remote(num_cpus=1)(solve_griewank).remote # futures.extend([remote_solve_griewank(g, float(b)) for b in griewank_eval_points]) for result in get_futures(futures, desc="Batch size: {}".format(bs)): result_dict[bs][result.solve_strategy].append(result) ray.shutdown() max_batch_sizes = defaultdict(int) for bs, strategy_results in result_dict.items(): for strategy, results in strategy_results.items(): is_valid = ( lambda r: r.schedule_aux_data is not None and r. schedule_aux_data.peak_ram <= platform_ram - bs_param_ram_cost[ bs] and r.schedule_aux_data.cpu <= bs_fwd2xcost[bs]) if any(map(is_valid, results)): max_batch_sizes[strategy] = max(bs, max_batch_sizes[strategy]) logging.info( "SolveStrategy {} succeeded at batch size {}".format(
remote_solve_chen_greedy(g, float(b), False) for b in greedy_eval_points ]) futures.extend([ remote_solve_chen_greedy(g, float(b), True) for b in greedy_eval_points ]) # # sweep griewank baselines # if model_name in CHAIN_GRAPH_MODELS: # solve_griewank(g, 1) # prefetch griewank solution from s3, otherwise ray will cause race condition # griewank_eval_points = range(1, g.size + 1) # remote_solve_griewank = ray.remote(num_cpus=1)(solve_griewank).remote # futures.extend([remote_solve_griewank(g, float(b)) for b in griewank_eval_points]) for result in get_futures(futures, desc=f"Batch size: {bs}"): result_dict[bs][result.solve_strategy].append(result) ray.shutdown() max_batch_sizes = defaultdict(int) for bs, strategy_results in result_dict.items(): for strategy, results in strategy_results.items(): is_valid = lambda r: r.schedule_aux_data is not None \ and r.schedule_aux_data.peak_ram <= platform_ram - bs_param_ram_cost[bs] \ and r.schedule_aux_data.cpu <= bs_fwd2xcost[bs] if any(map(is_valid, results)): max_batch_sizes[strategy] = max(bs, max_batch_sizes[strategy]) logging.info( f"SolveStrategy {strategy} succeeded at batch size {bs}"
solve_checkpoint_last_node(g) ] result_dict[SolveStrategy.CHEN_SQRTN_NOAP] = [solve_chen_sqrtn(g, False)] result_dict[SolveStrategy.CHEN_SQRTN] = [solve_chen_sqrtn(g, True)] # sweep chen's greedy baseline logger.info(f"Running Chen's greedy baseline (No AP)") chen_sqrtn_noap = result_dict[SolveStrategy.CHEN_SQRTN_NOAP][0] greedy_eval_points = chen_sqrtn_noap.schedule_aux_data.activation_ram * ( 1. + np.arange(-1, 2, 0.01)) remote_solve_chen_greedy = ray.remote(num_cpus=1)(solve_chen_greedy).remote futures = [ remote_solve_chen_greedy(g, float(b), False) for b in greedy_eval_points ] result_dict[SolveStrategy.CHEN_GREEDY_NOAP] = get_futures( list(futures), desc="Greedy (No AP)") if model_name not in CHAIN_GRAPH_MODELS: logger.info( f"Running Chen's greedy baseline (AP) as model is non-linear") futures = [ remote_solve_chen_greedy(g, float(b), True) for b in greedy_eval_points ] result_dict[SolveStrategy.CHEN_GREEDY] = get_futures( list(futures), desc="Greedy (APs only)") # sweep griewank baselines if model_name in CHAIN_GRAPH_MODELS: logger.info(f"Running Griewank baseline (APs only)") clean_griewank_cache() solve_griewank(