Пример #1
0
def plot_heatmap(
    buffer: List[BufferItem],
    grid_indices_with_values: dict,
    grid_components: dict,
    tensor_shape: Tuple,
    env_values_repeated: Dict,
    env_values_dict: Dict,
    regression_probability: bool,
    param_names: List[str],
    approximated_tensor: np.ndarray,
    show_plot: bool,
    plot_only_approximated: bool,
    algo_name: str,
    env_name: str,
    interpolation_function: str,
    plot_file_path: str = None,
    plot_nn: bool = False,
    model_suffix: str = None,
    max_points_x: int = None,
    skip_points_x: int = None,
    max_points_y: int = None,
    skip_points_y: int = None,
    indices_frontier_not_adapted_appr: List[Tuple] = None,
):
    logger = Log("plot_heatmap")

    env_values_not_resampling = []
    for buffer_item in buffer:
        env_values = []
        for key in buffer_item.get_env_values().keys():
            if key in param_names:
                env_values.append(buffer_item.get_env_values()[key])
        env_values_not_resampling.append(tuple(env_values))

    first_param_values_step, second_param_values_step, probabilities_step = [], [], []
    first_param_components = grid_components[param_names[0]]
    second_param_components = grid_components[param_names[1]]
    values_set = 0
    for i in range(tensor_shape[0]):
        for j in range(tensor_shape[1]):
            mutated_pair = (first_param_components[i], second_param_components[j])
            pass_probability = np.nan
            if (i, j) in grid_indices_with_values:
                pass_probability = grid_indices_with_values[(i, j)][1]
                values_set += 1
            first_param_values_step.append(mutated_pair[0])
            second_param_values_step.append(mutated_pair[1])
            probabilities_step.append(pass_probability)
    assert values_set == len(grid_indices_with_values), "{} != {}".format(values_set, len(grid_indices_with_values))

    SMALL_SIZE = 8
    MEDIUM_SIZE = 10
    BIGGER_SIZE = 12

    if plot_only_approximated:
        SMALL_SIZE = 14
        MEDIUM_SIZE = 14
        BIGGER_SIZE = 14

    plt.rc("font", size=SMALL_SIZE)  # controls default text sizes
    plt.rc("axes", titlesize=SMALL_SIZE)  # fontsize of the axes title
    plt.rc("axes", labelsize=MEDIUM_SIZE)  # fontsize of the x and y labels
    plt.rc("xtick", labelsize=SMALL_SIZE)  # fontsize of the tick labels
    plt.rc("ytick", labelsize=SMALL_SIZE)  # fontsize of the tick labels
    plt.rc("legend", fontsize=SMALL_SIZE)  # legend fontsize
    plt.rc("figure", titlesize=BIGGER_SIZE)  # fontsize of the figure title

    lsts_values_repeated = list(env_values_repeated.values())
    first_param_values_repeated = lsts_values_repeated[0]
    second_param_values_repeated = lsts_values_repeated[1]

    min_first_param = np.min(np.array(first_param_values_repeated))
    max_first_param = np.max(np.array(first_param_values_repeated))

    min_second_param = np.min(np.array(second_param_values_repeated))
    max_second_param = np.max(np.array(second_param_values_repeated))

    if not plot_only_approximated:
        fig = plt.figure(figsize=(22, 15))
        _ = fig.add_subplot(211)
    else:
        fig = plt.figure(figsize=(9, 8))

    apprx_ax = plt.gca()

    colors = ["red", "gold", "green"] if not regression_probability else ["green", "gold", "red"]
    cmap = LinearSegmentedColormap.from_list(name="test", colors=colors)

    dict_for_df = {
        param_names[0]: first_param_values_step,
        param_names[1]: second_param_values_step,
        "pass_probability": probabilities_step,
    }

    df = pd.DataFrame(dict_for_df)
    heatmap_data = pd.pivot_table(df, dropna=False, values="pass_probability", index=param_names[1], columns=param_names[0])

    first_param = Param(
        **load_env_params(
            algo_name=algo_name,
            env_name=standardize_env_name(env_name=env_name),
            param_name=param_names[0],
            model_suffix=model_suffix,
        ),
        id=0,
        name=param_names[0]
    )
    second_param = Param(
        **load_env_params(
            algo_name=algo_name,
            env_name=standardize_env_name(env_name=env_name),
            param_name=param_names[1],
            model_suffix=model_suffix,
        ),
        id=1,
        name=param_names[0]
    )

    # first_param = x, second_param = y
    direction_x = "ltr" if first_param.get_starting_multiplier() > 1 and first_param.get_direction() == "positive" else "rtl"
    direction_y = "btt" if second_param.get_starting_multiplier() > 1 and second_param.get_direction() == "positive" else "ttb"

    if not plot_only_approximated:

        grid_ax = sns.heatmap(
            data=heatmap_data,
            linewidths=0.2,
            square=False,
            cmap=cmap,
            vmin=0,
            vmax=1,
            cbar_kws={"label": "Pass probability" if not regression_probability else "Regression probability"},
        )

        xticks_rounded = []
        num_significant_digits = get_num_significant_digits(labels=grid_ax.get_xticklabels())
        for colorbar_label in grid_ax.get_xticklabels():
            num_to_represent = float(colorbar_label.get_text())
            if abs(num_to_represent) > 1:
                round_label = round(num_to_represent, num_significant_digits)
                xticks_rounded.append(Text(x=colorbar_label._x, y=colorbar_label._y, text=str(round_label)))
            else:
                round_label = round(num_to_represent, num_significant_digits)
                xticks_rounded.append(Text(x=colorbar_label._x, y=colorbar_label._y, text=str(round_label)))

        num_significant_digits = get_num_significant_digits(labels=grid_ax.get_yticklabels())
        yticks_rounded = []
        for colorbar_label in grid_ax.get_yticklabels():
            num_to_represent = float(colorbar_label.get_text())
            if abs(num_to_represent) > 1:
                round_label = round(num_to_represent, num_significant_digits)
                yticks_rounded.append(Text(x=colorbar_label._x, y=colorbar_label._y, text=str(round_label)))
            else:
                round_label = round(num_to_represent, num_significant_digits)
                yticks_rounded.append(Text(x=colorbar_label._x, y=colorbar_label._y, text=str(round_label)))

        if direction_x == "ltr" and direction_y == "btt":
            grid_ax.invert_yaxis()
        elif direction_x == "rtl" and direction_y == "btt":
            grid_ax.invert_yaxis()
            grid_ax.invert_xaxis()
        elif direction_x == "ltr" and direction_y == "ttb":
            raise NotImplementedError()
        elif direction_x == "rtl" and direction_y == "ttb":
            raise NotImplementedError()

        grid_ax.set_xticklabels(xticks_rounded, rotation=90)
        grid_ax.set_yticklabels(yticks_rounded)

        apprx_ax = fig.add_subplot(212)

    if plot_nn:
        # suppose to plot filled heatmap
        pass

    lsts_values = list(env_values_dict.values())
    limits_first_param = [grid_components[param_names[0]][0], grid_components[param_names[0]][-1]]
    limits_second_param = [grid_components[param_names[1]][0], grid_components[param_names[1]][-1]]
    first_param_values = []
    second_param_values = []
    first_param_values_resampling = []
    second_param_values_resampling = []
    for i in range(len(lsts_values[0])):
        value_first_param = lsts_values[0][i]
        value_second_param = lsts_values[1][i]
        min_limit_first_param = round(limits_first_param[0], 5)
        max_limit_first_param = round(limits_first_param[1], 5)
        min_limit_second_param = round(limits_second_param[0], 5)
        max_limit_second_param = round(limits_second_param[1], 5)
        if (min_limit_first_param <= value_first_param <= max_limit_first_param) and (
            min_limit_second_param <= value_second_param <= max_limit_second_param
        ):

            if (value_first_param, value_second_param) not in env_values_not_resampling:
                first_param_values_resampling.append(value_first_param)
                second_param_values_resampling.append(value_second_param)
            else:
                first_param_values.append(value_first_param)
                second_param_values.append(value_second_param)
        else:
            logger.warn(
                "Discarding pair {} from scatterplot because beyond limits [{}. {}], [{}, {}]".format(
                    (value_first_param, value_second_param),
                    min_limit_first_param,
                    max_limit_first_param,
                    min_limit_second_param,
                    max_limit_second_param,
                )
            )

    if regression_probability and indices_frontier_not_adapted_appr:
        cmap.colorbar_extend = "min"
        cmap.set_under("gray")
        for index_frontier_not_adapted_appr in indices_frontier_not_adapted_appr:
            approximated_tensor[tuple(index_frontier_not_adapted_appr)] = -1.0

    extent = [min_first_param, max_first_param, min_second_param, max_second_param]
    hm = apprx_ax.imshow(
        approximated_tensor, interpolation="none", cmap=cmap, extent=extent, aspect="auto", origin="lower", vmin=0.0, vmax=1.0
    )

    max_num_points_x = 100 if not max_points_x else max_points_x
    # num_points_to_skip = (len(heatmap_data.axes[1].values) + max_num_points_x // 2) // max_num_points_x
    if skip_points_x is not None:
        num_points_to_skip = skip_points_x
    else:
        num_points_to_skip = 4
    xticks_rounded = []
    num_significant_digits = get_num_significant_digits(values=list(heatmap_data.axes[1].values))
    if num_points_to_skip > 0:
        for i in range(0, len(heatmap_data.axes[1].values), num_points_to_skip):
            xticks_rounded.append(round(heatmap_data.axes[1].values[i], num_significant_digits))
    else:
        for value in heatmap_data.axes[1].values:
            xticks_rounded.append(round(value, num_significant_digits))

    max_num_points_y = 30 if not max_points_y else max_points_y
    # num_points_to_skip = (len(heatmap_data.axes[0].values) + max_num_points_y // 2) // max_num_points_y
    if skip_points_y is not None:
        num_points_to_skip = skip_points_y
    else:
        num_points_to_skip = 4
    yticks_rounded = []
    num_significant_digits = get_num_significant_digits(values=list(heatmap_data.axes[0].values))
    if num_points_to_skip > 0:
        for i in range(0, len(heatmap_data.axes[0].values), num_points_to_skip):
            yticks_rounded.append(round(heatmap_data.axes[0].values[i], num_significant_digits))
    else:
        for value in heatmap_data.axes[0].values:
            yticks_rounded.append(round(value, num_significant_digits))

    apprx_ax.set_xticks(xticks_rounded)
    apprx_ax.set_yticks(yticks_rounded)

    for tick in apprx_ax.get_xticklabels():
        tick.set_rotation(90)

    if direction_x == "ltr" and direction_y == "btt":
        pass
    elif direction_x == "rtl" and direction_y == "btt":
        apprx_ax.invert_xaxis()
    elif direction_x == "ltr" and direction_y == "ttb":
        raise NotImplementedError()
    elif direction_x == "rtl" and direction_y == "ttb":
        raise NotImplementedError()

    apprx_ax.scatter(first_param_values, second_param_values, s=50, c="black")
    if len(first_param_values_resampling) > 0:
        apprx_ax.scatter(first_param_values_resampling, second_param_values_resampling, s=100, marker="*", c="black")

    # determine points in the adaptation frontier if regression
    if regression_probability:
        pass

    cbar = fig.colorbar(hm, ax=apprx_ax)
    cbar.ax.set_ylabel("Adaptation probability" if not regression_probability else "Regression probability")
    apprx_ax.set_xlabel(param_names[0])
    apprx_ax.set_ylabel(param_names[1])

    if show_plot:
        plot_title = "heatmap_" + interpolation_function + "_" + env_name + "_" + algo_name
        fig.canvas.set_window_title(plot_title)
        plt.show()
    else:
        if plot_file_path:
            plt.savefig(plot_file_path + ".pdf", format="pdf")
        else:
            abs_prefix = os.path.abspath("../")
            file_prefix = 0
            file_suffix = "heatmap_" + interpolation_function + "_" + env_name + "_" + algo_name + "_"
            file_name = file_suffix + str(file_prefix) + ".pdf"
            while os.path.exists(os.path.join(abs_prefix, file_name)):
                file_prefix += 1
                file_name = file_suffix + str(file_prefix) + ".pdf"

            plt.savefig(os.path.join(abs_prefix, file_name), format="pdf")
    args, _ = parser.parse_known_args()
    abs_params_dir = os.path.abspath(HOME)

    if args.analysis_results:
        analysis_folder = os.path.join(abs_params_dir,
                                       "rl-experiments-artifacts")
        env_folder = os.path.join(analysis_folder, args.env_name)
        exp_time_folder = os.path.join(env_folder, args.experiment_type)
        param_names_folder = os.path.join(exp_time_folder,
                                          "_".join(args.param_names))
        list_of_folders = glob.glob(
            os.path.join(param_names_folder,
                         "{}_cluster".format(args.algo_name)))
        zip_name = "analysis_{}_{}_{}.zip".format(
            standardize_env_name(args.env_name), args.algo_name,
            "_".join(args.param_names))
    else:
        experiments_folder = os.path.join(abs_params_dir, args.experiment_type)
        env_folder = os.path.join(experiments_folder, args.env_name)
        algo_folder = os.path.join(env_folder, args.algo_name)

        if args.folders_suffix:
            list_of_folders = (glob.glob(
                os.path.join(
                    algo_folder, "n_iterations_{}_{}_*".format(
                        args.folders_suffix, args.num_iterations)))
                               if not args.model_suffix else glob.glob(
                                   os.path.join(
                                       algo_folder,
                                       "n_iterations_{}_{}_{}_*".format(
Пример #3
0
    def __init__(
        self,
        agent: AbstractAgent,
        num_iterations: int,
        algo_name: str,
        env_name: str,
        tb_log_name: str,
        continue_learning_suffix: str,
        env_variables: EnvVariables,
        param_names=None,
        runs_for_probability_estimation: int = 1,
        buffer_file: str = None,
        archive_file: str = None,
        executions_skipped_file: str = None,
        parallelize_search: bool = False,
        monitor_search_every: bool = False,
        binary_search_epsilon: float = 0.05,
        start_search_time: float = None,
        starting_progress_report_number: int = 0,
        stop_at_first_iteration: bool = False,
        exp_suffix: str = None,
    ):
        assert agent, "agent should have a value: {}".format(agent)
        assert algo_name, "algo_name should have a value: {}".format(algo_name)
        assert env_name, "env_name should have a value: {}".format(env_name)

        self.agent = agent
        self.num_iterations = num_iterations
        self.init_env_variables = env_variables
        self.previous_num_iterations = None
        self.start_time = time.time()
        self.logger = Log("Random")
        self.param_names = param_names
        self.all_params = env_variables.instantiate_env()
        self.runs_for_probability_estimation = runs_for_probability_estimation
        self.buffer_file = buffer_file
        self.archive_file = archive_file
        self.parallelize_search = parallelize_search
        self.stop_at_first_iteration = stop_at_first_iteration
        self.exp_suffix = exp_suffix

        if param_names:
            self.param_names_string = "_".join(param_names)

        # TODO: refactor buffer restoring in abstract class extended by search algo
        #  (for now only random search and alphatest)
        if buffer_file:
            previously_saved_buffer = read_saved_buffer(
                buffer_file=buffer_file)
            index_last_slash = buffer_file.rindex("/")

            self.algo_save_dir = buffer_file[:index_last_slash]
            self.logger.debug(
                "Algo save dir from restored execution: {}".format(
                    self.algo_save_dir))
            self.buffer_env_predicate_pairs = BufferEnvPredicatePairs(
                save_dir=self.algo_save_dir)
            self.archive = Archive(save_dir=self.algo_save_dir,
                                   epsilon=binary_search_epsilon)

            # restore buffer
            for buffer_item in previously_saved_buffer:
                previous_env_variables = instantiate_env_variables(
                    algo_name=algo_name,
                    discrete_action_space=self.
                    all_params["discrete_action_space"],
                    env_name=env_name,
                    param_names=param_names,
                    env_values=buffer_item.get_env_values(),
                )
                self.buffer_env_predicate_pairs.append(
                    EnvPredicatePair(
                        env_variables=previous_env_variables,
                        pass_probability=buffer_item.get_pass_probability(),
                        predicate=buffer_item.is_predicate(),
                        regression_probability=buffer_item.
                        get_regression_probability(),
                        probability_estimation_runs=buffer_item.
                        get_probability_estimation_runs(),
                        regression_estimation_runs=buffer_item.
                        get_regression_estimation_runs(),
                        model_dirs=buffer_item.get_model_dirs(),
                    ))
            assert archive_file, (
                "when buffer file is available so needs to be the archive file to "
                "restore a previous execution")
            try:
                previous_num_iterations_buffer = get_result_file_iteration_number(
                    filename=buffer_file)
                previous_num_iterations_archive = get_result_file_iteration_number(
                    filename=archive_file)
                assert (previous_num_iterations_buffer ==
                        previous_num_iterations_archive
                        ), "The two nums must coincide: {}, {}".format(
                            previous_num_iterations_buffer,
                            previous_num_iterations_archive)
                previous_num_iterations = previous_num_iterations_buffer + 1
            except ValueError as e:
                raise ValueError(e)

            self.previous_num_iterations = previous_num_iterations
            self.logger.info(
                "Restore previous execution of {} iterations.".format(
                    previous_num_iterations))

            # restore archive
            previously_saved_archive = read_saved_archive(
                archive_file=archive_file)
            t_env_variables = None
            f_env_variables = None
            for env_values, predicate in previously_saved_archive:
                all_params = env_variables.instantiate_env()
                previous_env_variables = instantiate_env_variables(
                    algo_name=algo_name,
                    discrete_action_space=all_params["discrete_action_space"],
                    env_name=env_name,
                    param_names=param_names,
                    env_values=env_values,
                )
                if predicate:
                    t_env_variables = previous_env_variables
                else:
                    f_env_variables = previous_env_variables

                if t_env_variables and f_env_variables:
                    self.archive.append(t_env_variables=t_env_variables,
                                        f_env_variables=f_env_variables)
                    t_env_variables = None
                    f_env_variables = None

                # restore executions skipped
                previously_saved_executions_skipped = read_saved_buffer_executions_skipped(
                    buffer_executions_skipped_file=executions_skipped_file)
                for buffer_executions_skipped_item in previously_saved_executions_skipped:
                    previous_env_variables_skipped = instantiate_env_variables(
                        algo_name=algo_name,
                        discrete_action_space=self.
                        all_params["discrete_action_space"],
                        env_name=env_name,
                        param_names=param_names,
                        env_values=buffer_executions_skipped_item.
                        env_values_skipped,
                    )
                    env_predicate_pair_skipped = EnvPredicatePair(
                        env_variables=previous_env_variables_skipped,
                        predicate=buffer_executions_skipped_item.predicate)
                    previous_env_variables_executed = instantiate_env_variables(
                        algo_name=algo_name,
                        discrete_action_space=self.
                        all_params["discrete_action_space"],
                        env_name=env_name,
                        param_names=param_names,
                        env_values=buffer_executions_skipped_item.
                        env_values_executed,
                    )
                    env_predicate_pair_executed = EnvPredicatePair(
                        env_variables=previous_env_variables_executed,
                        predicate=buffer_executions_skipped_item.predicate)
                    self.buffer_executions_skipped.append(
                        ExecutionSkipped(
                            env_predicate_pair_skipped=
                            env_predicate_pair_skipped,
                            env_predicate_pair_executed=
                            env_predicate_pair_executed,
                            search_component=buffer_executions_skipped_item.
                            search_component,
                        ))
        else:
            attempt = 0

            suffix = "n_iterations_"
            if self.param_names:
                suffix += self.param_names_string + "_"
            if self.exp_suffix:
                suffix += self.exp_suffix + "_"
            suffix += str(num_iterations)

            algo_save_dir = os.path.abspath(HOME + "/random/" + env_name +
                                            "/" + algo_name + "/" + suffix +
                                            "_" + str(attempt))
            _algo_save_dir = algo_save_dir
            while os.path.exists(_algo_save_dir):
                attempt += 1
                _algo_save_dir = algo_save_dir[:-1] + str(attempt)
            self.algo_save_dir = _algo_save_dir
            os.makedirs(self.algo_save_dir)
            self.buffer_env_predicate_pairs = BufferEnvPredicatePairs(
                save_dir=self.algo_save_dir)
            # assuming initial env_variables satisfies the predicate of adequate performance
            if self.runs_for_probability_estimation:
                env_predicate_pair = EnvPredicatePair(
                    env_variables=self.init_env_variables,
                    predicate=True,
                    probability_estimation_runs=[True] *
                    self.runs_for_probability_estimation,
                )
            else:
                env_predicate_pair = EnvPredicatePair(
                    env_variables=self.init_env_variables, predicate=True)
            self.buffer_env_predicate_pairs.append(env_predicate_pair)
            self.buffer_executions_skipped = BufferExecutionsSkipped(
                save_dir=self.algo_save_dir)
            self.archive = Archive(save_dir=self.algo_save_dir,
                                   epsilon=binary_search_epsilon)

        self.env_name = env_name
        self.algo_name = algo_name
        self.tb_log_name = tb_log_name
        self.continue_learning_suffix = continue_learning_suffix
        self.binary_search_epsilon = binary_search_epsilon

        self.runner = Runner(
            agent=self.agent,
            runs_for_probability_estimation=self.
            runs_for_probability_estimation,
        )

        self.monitor_search_every = monitor_search_every
        self.monitor_progress = None
        if self.monitor_search_every != -1 and self.monitor_search_every > 0:
            self.monitor_progress = MonitorProgress(
                algo_name=self.algo_name,
                env_name=standardize_env_name(env_name=self.env_name),
                results_dir=self.algo_save_dir,
                param_names_string=self.param_names_string,
                search_type="random",
                start_search_time=start_search_time,
                starting_progress_report_number=starting_progress_report_number,
            )
    filename = os.path.join(
        args.dir,
        ("analyze_volume_results_" if not args.only_tsne else
         "analyze_volume_results_tsne_") + "adapt_regress_probability" +
        ("_" + str(args.num_iterations_to_consider)
         if args.num_iterations_to_consider else "") + "_g_" +
        str(args.grid_granularity_percentage_of_range) + ".txt",
    )

    logger = Log("analyze_volume_results")
    logging.basicConfig(filename=filename, filemode="w", level=logging.DEBUG)
    logger.info("args: {}".format(args))

    limits_dict = dict()
    env_params_dir = os.path.abspath("{}/env_params/{}".format(
        HOME, standardize_env_name(args.env_name)))
    list_of_config_files = (glob.glob(os.path.join(
        env_params_dir, "*.yml")) if args.normalize_limits else glob.glob(
            os.path.join(env_params_dir, "{}.yml").format(args.algo_name)))
    low_limits_dict = dict()
    high_limits_dict = dict()
    for config_file in list_of_config_files:
        algo_name = config_file[config_file.rindex("/") +
                                1:config_file.rindex(".")]
        if not args.no_filter_model_architecture:
            if "_big" in algo_name or "_small" in algo_name:
                continue
        with open(config_file, "r") as f:
            env_params = yaml.safe_load(f)
            for param_name, values_dict in env_params.items():
                if param_name in args.param_names:
    parser.add_argument("--full_training_time", action="store_true")
    parser.add_argument("--binary_search_epsilon", type=float, default=0.05)
    parser.add_argument("--stop_at_first_iteration", type=bool, default=False)
    parser.add_argument("--model_suffix", type=str, default=None)
    parser.add_argument("--resample", action="store_true", default=False)
    parser.add_argument("--dir_experiments",
                        type=check_file_existence,
                        default=None)
    parser.add_argument("--exp_suffix", type=str, default=None)
    parser.add_argument("--max_runtime_h", type=int, default=48)

    args, _ = parser.parse_known_args()

    param_names = args.param_names

    args.tb_log_name = standardize_env_name(env_name=args.env_name)
    args.n_eval_episodes = 20
    args.continue_learning = True
    args.only_exp_search = False

    args.exp_search_guidance = True
    args.binary_search_guidance = True
    if not args.search_guidance:
        args.exp_search_guidance = False
        args.binary_search_guidance = False
    args.eval_callback = False
    args.show_progress_bar = False
    args.model_to_load = "best_model_eval"
    args.num_envs = 1
    args.render = False
    args.train_total_timesteps = None