def __init__(self, default: Configuration, incumbent: Configuration, rh: RunHistory, train: List[str], test: Union[List[str], None], run_obj: str, cutoff, output_dir: int): """ Creates a scatterplot of the two configurations on the given set of instances. Saves plot to file. Parameters ---------- default, incumbent: Configuration configurations to be compared rh: RunHistory runhistory to use for cost-estimations output_dir: str output directory Returns ------- output_fns: List[str] list with paths to generated plots """ self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__) out_fn_base = os.path.join(output_dir, 'scatter_') self.logger.info("... plotting scatter") self.logger.debug("Plot scatter to %s[train|test].png", out_fn_base) metric = run_obj timeout = cutoff labels = ["default {}".format(run_obj), "incumbent {}".format(run_obj)] def_costs = get_cost_dict_for_config(rh, default).items() inc_costs = get_cost_dict_for_config(rh, incumbent).items() out_fns = [] for insts, name in [(train, 'train'), (test, 'test')]: if insts == [None]: self.logger.debug("No %s instances, skipping scatter", name) continue default = np.array([v for k, v in def_costs if k in insts]) incumbent = np.array([v for k, v in inc_costs if k in insts]) min_val = min(min(default), min(incumbent)) out_fn = out_fn_base + name + '.png' out_fns.append( plot_scatter_plot((default, ), (incumbent, ), labels, metric=metric, min_val=min_val, max_val=timeout, out_fn=out_fn)) self.output_fns = out_fns
def plot_scatter(self, default: Configuration, incumbent: Configuration, rh: RunHistory): """ Creates a scatterplot of the two configurations on the given set of instances. Saves plot to file. Parameters ---------- default, incumbent: Configuration configurations to be compared rh: RunHistory runhistory to use for cost-estimations Returns ------- output_fns: List[str] list with paths to generated plots """ out_fn_base = os.path.join(self.output_dir, 'scatter_') self.logger.info("... plotting scatter") self.logger.debug("Plot scatter to %s[train|test].png", out_fn_base) metric = self.scenario.run_obj timeout = self.scenario.cutoff labels = [ "default {}".format(self.scenario.run_obj), "incumbent {}".format(self.scenario.run_obj) ] def_costs = get_cost_dict_for_config(rh, default).items() inc_costs = get_cost_dict_for_config(rh, incumbent).items() train, test = self.scenario.train_insts, self.scenario.test_insts out_fns = [] for insts, name in [(train, 'train'), (test, 'test')]: if insts == [None]: self.logger.debug("No %s instances, skipping scatter", name) continue default = np.array([v for k, v in def_costs if k in insts]) incumbent = np.array([v for k, v in inc_costs if k in insts]) min_val = min(min(default), min(incumbent)) out_fn = out_fn_base + name + '.png' out_fns.append( plot_scatter_plot((default, ), (incumbent, ), labels, metric=metric, min_val=min_val, max_val=timeout, out_fn=out_fn)) return out_fns
def _plot_scatter(self, default: Configuration, incumbent: Configuration, rh: RunHistory, train: List[str], test: Union[List[str], None], run_obj: str, cutoff, output_dir): """ Parameters ---------- default, incumbent: Configuration configurations to be compared rh: RunHistory runhistory to use for cost-estimations train[, test]: list(str) instance-names run_obj: str run-objective (time or quality) cutoff: float maximum runtime of ta output_dir: str output directory """ out_fn_base = os.path.join(output_dir, 'scatter_') self.logger.info("... plotting scatter") metric = run_obj timeout = cutoff labels = ["default {}".format(run_obj), "incumbent {}".format(run_obj)] def_costs = get_cost_dict_for_config(rh, default).items() inc_costs = get_cost_dict_for_config(rh, incumbent).items() out_fns = [] if len(train) <= 1 and len(test) <= 1: raise NotApplicable("No instances, so no scatter-plot.") for insts, name in [(train, 'train'), (test, 'test')]: if len(insts) <= 1: self.logger.debug("No %s instances, skipping scatter", name) continue default = np.array([v for k, v in def_costs if k in insts]) incumbent = np.array([v for k, v in inc_costs if k in insts]) min_val = min(min(default), min(incumbent)) out_fn = out_fn_base + name + '.png' out_fns.append(plot_scatter_plot((default,), (incumbent,), labels, metric=metric, min_val=min_val, max_val=timeout, out_fn=out_fn)) self.logger.debug("Plotted scatter to %s", out_fn) return {'figure' : out_fns if len(out_fns) > 0 else None}
def plot_scatter(self, output_fn_base='scatter'): """ Creates a scatterplot of the two configurations on the given set of instances. Saves plot to file. Parameters: ----------- output_fn_base: string base-path to save plot to """ self.logger.debug("Plot scatter to %s[train|test].png", output_fn_base) metric = self.scenario.run_obj timeout = self.scenario.cutoff labels = ["default cost", "incumbent cost"] conf1 = (self.data["default"]["train"], self.data["default"]["test"]) conf2 = (self.data["incumbent"]["train"], self.data["incumbent"]["test"]) min_val = min(min([min(x) for x in conf1]), min([min(y) for y in conf2])) paths = [output_fn_base + 'train.png', output_fn_base + 'test.png'] for idx in [0, 1]: fig = plot_scatter_plot((conf1[idx], ), (conf2[idx], ), labels, metric=metric, user_fontsize=mpl.rcParams['font.size'], min_val=min_val, max_val=timeout, jitter_timeout=True) fig.savefig(paths[idx]) plt.close(fig) return paths