marker=marker,
               s=markersize**2)
    ax.hlines(y=y, xmin=xlim_min, xmax=x, linestyles="dashed", color=color)
    ax.hlines(y=y, xmin=x, xmax=xlim_max, color=color, zorder=2)
    legend_elements.append(
        Line2D([0], [0],
               lw=2,
               label=label,
               color=color,
               marker=marker,
               markersize=markersize))
    ax.set_xlim([xlim_min, xlim_max])

    # Plot platform memory
    ylim_min, ylim_max = ax.get_ylim()
    mem_gb = platform_memory(args.platform) / 1e9
    if xlim_min <= mem_gb <= xlim_max:
        ax.vlines(x=mem_gb,
                  ymin=ylim_min,
                  ymax=ylim_max,
                  linestyles="dotted",
                  color="b")
        legend_elements.append(
            Line2D([0], [0],
                   lw=2,
                   label="{} memory".format(pretty_platform_name(
                       args.platform)),
                   color="b",
                   linestyle="dotted"))
        ax.set_ylim([ylim_min, ylim_max])
Пример #2
0
def get_solutions_to_evaluate(
        solve_strategy: SolveStrategy,
        model_name: str,
        batch_size: int,
        platform: str,
        input_shape=None,
        model_version="v1",
        buffer_mem: int = 0) -> List[Tuple[RSResult, str]]:
    """

    :param solve_strategy:
    :param model_name:
    :param batch_size:
    :param platform:
    :param input_shape:
    :param model_version:
    :return: Instance of RSResult, or None. Returns None if the solution is not available in cache
    or no solution is available under the budget
    """
    logger = setup_logger("test_execution_get_solution")

    # Load all results for this configuration, regardless of budget
    key_prefix = RedisCache.make_key(platform=platform,
                                     model_name=model_name,
                                     model_version=model_version,
                                     batch_size=batch_size,
                                     input_shape=input_shape)
    cache = RedisCache(key_prefix=key_prefix)
    cost_file = f"b{batch_size}_{platform}.npy"
    logger.info(
        f"Querying results for SS={solve_strategy}, model_name=f{model_name}, bs=f{batch_size}, "
        f"platform={platform}, cost_file={cost_file}, key prefix={key_prefix}")
    results, keys = cache.read_results(solver=solve_strategy,
                                       cost_file=cost_file,
                                       model_name=model_name)
    if not results:
        logger.error(
            f"No solutions found in cache for SS={solve_strategy}, model_name=f{model_name}, "
            f"bs=f{batch_size}, platform={platform}, cost_file={cost_file}, key prefix={key_prefix}"
        )
        return []

    # Filter results to those that abide by the budget
    platform_budget = platform_memory(platform)
    within_budget = []
    for result, key in zip(results, keys):
        if not result.peak_ram:
            logger.warn(f"Falsey peak ram? {result.peak_ram}")
            continue
        if result.peak_ram + buffer_mem <= platform_budget:
            within_budget.append((result, key))
    logger.info(
        f"Out of {len(results)} solver results, {len(within_budget)} had <= {platform_budget} - {buffer_mem} peak ram"
    )
    if not within_budget:
        logger.warn(
            f"While {len(results)} solutions were found in cache, no solutions are within budget"
        )
        return []

    # Return solution in increasing order of cost
    within_budget.sort(key=lambda r: r[0].cpu)
    return within_budget

    # Return min compute solution
    min_compute = within_budget[0]
    for result in within_budget:
        if result[0].cpu < min_compute[0].cpu:
            min_compute = result
    logger.info(
        f"Using solution with f{min_compute[0].cpu} compute, f{min_compute[0].peak_ram} ram"
    )
    return min_compute
    else:
        cost_model = CostModel(model_name,
                               args.platform,
                               log_base,
                               quantization=5)
        cost_model.fit()
        cost_model.plot_costs()

    model = get_keras_model(model_name, input_shape=args.input_shape)
    tf.keras.utils.plot_model(model,
                              to_file=log_base /
                              "plot_{}.png".format(model_name),
                              show_shapes=True,
                              show_layer_names=True)

    platform_ram = platform_memory("p32xlarge")
    bs_futures = defaultdict(list)  # type: Dict[int, List]
    bs_param_ram_cost = {}  # type: Dict[int, int]
    bs_fwd2xcost = {}  # type: Dict[int, int]
    rg = list(
        range(args.batch_size_min, args.batch_size_max,
              args.batch_size_increment))
    for bs in tqdm(rg, desc="Event dispatch"):
        while not ray.is_initialized():
            ray.init(
                temp_dir="/tmp/ray_checkpoint_" + str(str(uuid.uuid4())[:8]),
                redis_password=str(uuid.uuid1()),
                num_cpus=os.cpu_count() - 2,
            )
        futures = []
    else:
        cost_model = CostModel(model_name,
                               args.platform,
                               log_base,
                               quantization=5)
        cost_model.fit()
        cost_model.plot_costs()

    model = get_keras_model(model_name, input_shape=args.input_shape)
    tf.keras.utils.plot_model(model,
                              to_file=log_base /
                              "plot_{}.png".format(model_name),
                              show_shapes=True,
                              show_layer_names=True)

    platform_ram = platform_memory("p32xlarge")
    bs_futures = defaultdict(list)  # type: Dict[int, List]
    bs_fwd2xcost = {}  # type: Dict[int, int]
    # load model at batch size
    g = dfgraph_from_keras(model,
                           batch_size=1,
                           cost_model=cost_model,
                           loss_cpu_cost=0,
                           loss_ram_cost=(4))
    plot_dfgraph(g, log_base, name=model_name)

    model_file = str(log_base / "max_bs_{}.mps".format(model_name))
    param_dict = {
        "LogToConsole": 1,
        "LogFile": str(log_base / "max_bs_{}.solve.log".format(model_name)),
        "Threads": os.cpu_count(),
    return data * 1e-9


if __name__ == "__main__":
    args = extract_params()

    if args.model_name == "linear16":
        N = 16
        B = 8
        scratch_dir = checkmate_data_dir(
        ) / "approxcomparison" / args.model_name / f"budget{B}"
        scratch_dir.mkdir(parents=True, exist_ok=True)

        g = gen_linear_graph(N)
    else:
        B = platform_memory(args.platform)
        scratch_dir = checkmate_data_dir(
        ) / "approxcomparison" / args.model_name / f"budget{B}"
        scratch_dir.mkdir(parents=True, exist_ok=True)

        # load costs, and plot optionally, if platform is not flops
        print("Loading costs")
        if args.platform == "flops":
            cost_model = None
        else:
            cost_model = CostModel(args.model_name,
                                   args.platform,
                                   scratch_dir,
                                   quantization=5)
            cost_model.fit()