def _plot(self): col_dict = get_column_dict(self.collection, 'i', *self.metric_selector) x = col_dict.pop('i') self.figure.clf() axes = self.figure.add_subplot(111) if self.smooth > 0: x, = gaussConv(self.smooth, x ) # print x for label, y in col_dict.items(): if self.smooth > 0: y, = gaussConv(self.smooth, y ) # print y axes.plot(x,y, label=label) axes.legend(loc='best') axes.set_xlabel('iteration') self.figure.canvas.draw()
def _plot(self): col_dict = get_column_dict(self.collection, "i", *self.metric_selector) x = col_dict.pop("i") self.figure.clf() axes = self.figure.add_subplot(111) if self.smooth > 0: x, = gaussConv(self.smooth, x) # print x for label, y in col_dict.items(): if self.smooth > 0: y, = gaussConv(self.smooth, y) # print y axes.plot(x, y, label=label) axes.legend(loc="best") axes.set_xlabel("iteration") self.figure.canvas.draw()
def plot_curve(collection, x_key, *y_key_list): col_dict = get_column_dict(collection, *((x_key, ) + y_key_list)) x = col_dict.get(x_key) x_, = gaussConv(1, x) color_cycle = pp.gca()._get_lines.color_cycle for y_key in y_key_list: color = color_cycle.next() y = col_dict.get(y_key) y_, = gaussConv(1, y) pp.plot(x, y, '.', color=color, markersize=2) pp.plot(x_, y_, '-', label=y_key, color=color) pp.xlabel(x_key) pp.legend(loc='best')
def plot_curve(collection, x_key, *y_key_list): col_dict = get_column_dict( collection, * ((x_key, ) + y_key_list) ) x = col_dict.get(x_key) x_, = gaussConv(1,x ) color_cycle = pp.gca()._get_lines.color_cycle for y_key in y_key_list: color= color_cycle.next() y = col_dict.get(y_key) y_, = gaussConv(1,y ) pp.plot(x,y,'.', color=color,markersize=2) pp.plot(x_,y_,'-', label=y_key, color=color) pp.xlabel(x_key) pp.legend(loc='best')
def plot_eval_info(plot, hp_info, y_keys, perm=None): y_dict = get_column_dict(hp_info.trace.db.eval_info, 'hp_id', *y_keys) idx = hp_info.map_hp_id_list(y_dict.pop('hp_id')) # add the agnostic bayes distribution the the list of traces idx_list, distr_list = get_column_list(hp_info.trace.db.predict, 'i', 'prob') distr = distr_list[np.argmax( idx_list)] # extract the last computed distribution y_dict['AB probability'] = unpack_prob(distr, hp_info, len(idx)) if len(idx) == 0: print 'no results yet' return gp = MyGP(mcmc_iters=0, noiseless=False) gp.set_hypers(hp_info.chooser_state) for key in y_keys: y_dict[key] = np.array(y_dict[key]) # print '%s.shape:'%y_key, y.shape X = hp_info.unit_grid[idx, :] hp_keys = hp_info.hp_keys print hp_keys if perm is not None: X = X[:, perm] hp_keys = [hp_keys[i] for i in perm] hp_keys = [clean_hp_name(hp_key) for hp_key in hp_keys] print hp_keys plot.set_info(X, y_dict, 'val.risk', hp_keys, hp_info.hp_space.var_list, gp)
def plot_eval_info( plot, hp_info, y_keys, perm = None ): y_dict = get_column_dict( hp_info.trace.db.eval_info, 'hp_id', *y_keys ) idx = hp_info.map_hp_id_list(y_dict.pop('hp_id')) # add the agnostic bayes distribution the the list of traces idx_list, distr_list = get_column_list( hp_info.trace.db.predict, 'i', 'prob' ) distr = distr_list[ np.argmax(idx_list) ] # extract the last computed distribution y_dict['AB probability'] = unpack_prob( distr, hp_info, len(idx)) if len(idx) == 0: print 'no results yet' return gp = MyGP(mcmc_iters=0, noiseless=False) gp.set_hypers(hp_info.chooser_state) for key in y_keys: y_dict[key] = np.array(y_dict[key]) # print '%s.shape:'%y_key, y.shape X = hp_info.unit_grid[idx,:] hp_keys = hp_info.hp_keys print hp_keys if perm is not None: X = X[:,perm] hp_keys = [hp_keys[i] for i in perm ] hp_keys = [ clean_hp_name(hp_key) for hp_key in hp_keys ] print hp_keys plot.set_info(X, y_dict, 'val.risk',hp_keys, hp_info.hp_space.var_list, gp)