def search(self, im_file, query): perf = Performance(False) im_bytes = base64.b64decode(im_file) search_page = 0 try: search_page = query["page"] except KeyError as e: pass res = cmd.search(perf, self.es, self.evaluate, im_bytes, int(search_page)) if res: hits, palettes, w_dists, w_scores, palette, query_tags, rating = res zipped = list(zip(hits, palettes, w_dists, w_scores)) processed = data_process(zipped) should_plot = True try: if not query["plot"]: should_plot = False except KeyError as e: pass plt_bytes = None if should_plot: perf.begin_section("plotting") plt_bytes = plotting.plot(im_bytes, res, True) perf.end_section("plotting") out_dict = { "palette": palette.tolist(), "query_tags": query_tags, "query_rating": rating[0], "results": processed, "performance": perf.gen_report() } send_img = base64.b64encode(plt_bytes).decode("utf-8") if should_plot else "*" return { "data": out_dict, "plot": send_img, "success": True } return { "success": False }
def create_plots(self, dataframe_samples,dataframe_samples_woburin, keys, lag, all_on_one=True, save_data=False, burn_in=False, ac=False): """ :param keys: :param ac: bool :return: Generates plots """ plot_object = plot(dataframe_samples=dataframe_samples,dataframe_samples_woburin=dataframe_samples_woburin, keys=keys,lag=lag, burn_in=burn_in ) plot_object.plot_density(all_on_one) plot_object.plot_trace(all_on_one) if ac: plot_object.auto_corr()
identifierList += ["-Stay-" + str(weight)] for i in range(len(ratioList)): run_for_ratio(ratioList[i], identifierList[i]) ''' plot ''' # Plot # actions plt.figure() ax = plt.gca() for Learner in LEARNERS: for identifier in identifierList: name = Learner.__name__ + identifier # datab = collectorb.getStats(name) datas = collectors.getStats(name) # dataf = collectorf.getStats(name) # plot(ax, datab, label="back", color="red") plot(ax, datas, label=name + "do nothing") # plot(ax, dataf, label="forward", color='green') plt.xlabel("episode") plt.ylabel("# of actions x (1/100)") plt.legend() plt.show() # Plot the reward plt.figure() ax = plt.gca() for Learner in LEARNERS: for identifier in identifierList: name = Learner.__name__ + identifier data = collectorreward.getStats(name) plot(ax, data, label=name)
def main(): # Parse flags config = forge.config() # Restore flags of pretrained model flag_path = osp.join(config.model_dir, 'flags.json') fprint(f"Restoring flags from {flag_path}") pretrained_flags = AttrDict(fet.json_load(flag_path)) pretrained_flags.batch_size = 1 pretrained_flags.gpu = False pretrained_flags.debug = True fet.print_flags() # Fix seeds. Always first thing to be done after parsing the config! torch.manual_seed(0) np.random.seed(0) random.seed(0) # Make CUDA operations deterministic torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # Load model model = fet.load(config.model_config, pretrained_flags) model_path = osp.join(config.model_dir, config.model_file) fprint(f"Restoring model from {model_path}") checkpoint = torch.load(model_path, map_location='cpu') model_state_dict = checkpoint['model_state_dict'] model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_1', None) model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_2', None) model.load_state_dict(model_state_dict) fprint(model) # Visualise model.eval() for _ in range(100): y, stats = model.sample(1, pretrained_flags.K_steps) fig, axes = plt.subplots(nrows=4, ncols=1 + pretrained_flags.K_steps) # Generated plot(axes, 0, 0, y, title='Generated scene', fontsize=12) # Empty plots plot(axes, 1, 0, fontsize=12) plot(axes, 2, 0, fontsize=12) plot(axes, 3, 0, fontsize=12) # Put K generation steps in separate subfigures for step in range(pretrained_flags.K_steps): x_step = stats['x_k'][step] m_step = stats['log_m_k'][step].exp() mx_step = stats['mx_k'][step] if 'log_s_k' in stats: s_step = stats['log_s_k'][step].exp() pre = 'Mask x RGB ' if step == 0 else '' plot(axes, 0, 1 + step, mx_step, pre + f'k={step+1}', fontsize=12) pre = 'RGB ' if step == 0 else '' plot(axes, 1, 1 + step, x_step, pre + f'k={step+1}', fontsize=12) pre = 'Mask ' if step == 0 else '' plot(axes, 2, 1 + step, m_step, pre + f'k={step+1}', True, fontsize=12) if 'log_s_k' in stats: pre = 'Scope ' if step == 0 else '' plot(axes, 3, 1 + step, s_step, pre + f'k={step+1}', True, axis=step == 0, fontsize=12) # Beautify and show figure plt.subplots_adjust(wspace=0.05, hspace=0.05) manager = plt.get_current_fig_manager() manager.resize(*manager.window.maxsize()) plt.show()
fn_png = os.path.join(dir_results, '%s.png' % fn_base) fn_pdf = os.path.join(dir_results, '%s.pdf' % fn_base) if not (os.path.exists(dir_results)): os.mkdir(dir_results) epsilon_range = np.array([0.2, 0.4, 0.6, 0.8, 1.0]) if args.run: print('Run Experiments') run(args.dataset, fn_results, epsilon_range, test_size=float(args.test_size), ARD=args.ARD, n_repetitions=args.n_reps, nInd=args.nInd, loss=args.loss) if args.plot: matplotlib.rc('font', **{'size': 14}) D = pd.read_pickle(fn_results) plotting.plot(D, models, x="epsilon", xticks=[0.2, 0.4, 0.6, 0.8, 1.0], ylim=(0, 0.85)) plt.savefig(fn_png) plt.savefig(fn_pdf) plt.close()
drift, metric_name, plot_name) if not os.path.exists("results/experiment3a/plots/gen/%s/%s/" % (drift, metric_name)): os.makedirs("results/experiment3a/plots/gen/%s/%s/" % (drift, metric_name)) clf_indexes = [] for clf_id, clf_name in reversed(list(enumerate(clf_names))): try: # Load data from file filename = "results/experiment3a/metrics/gen/%s/%s/%s/%s.csv" % ( drift, s_name, metric_name, clf_name) plot_data = np.genfromtxt(filename, delimiter=',', dtype=np.float32) # Plot metrics of each stream plot_object = plot(plot_data, clf_name, clf_id, sigma) # Save average of scores into mean_scores, 1 stream = 1 avg scores = plot_data.copy() mean_score = np.mean(scores) mean_scores[metric_id, stream_id, clf_id] = mean_score clf_indexes.append(clf_id) except IOError: # print("File", filename, "not found") print("File not found") # continue if file not found # Save plots of metrics of each stream save_plot(plot_object, drift, metric_name, metric_a, np.array(clf_names)[list(reversed(clf_indexes))],
agent = RlGlueCompatWrapper(learner, gamma=0.99) glue = RlGlue(agent, env) glue.start() for episode in range(EPISODES): glue.num_steps = 0 glue.total_reward = 0 glue.runEpisode(max_steps=1000) print(Learner.__name__, run, episode, glue.num_steps) collector.collect(Learner.__name__, glue.total_reward) collector.reset() import matplotlib.pyplot as plt from utils.plotting import plot ax = plt.gca() for Learner in LEARNERS: name = Learner.__name__ data = collector.getStats(name) plot(ax, data, label=name, color=COLORS[name]) plt.legend() plt.show()
def run_case(): x = np.linspace(0, 1, 100) y = toy_fun(x) print('--> Create training set') x_t = np.array([ +0.05, +0.05, +0.17, +0.17, +0.22, +0.30, +0.35, +0.37, +0.52, +0.53, +0.69, +0.70, +0.82, +0.90 ]) psi_t = np.array([ +0.56, +0.65, +0.90, +1.18, +2.39, +3.40, +2.89, +2.64, -2.69, -3.20, -3.40, -2.77, +0.41, +0.35 ]) x_v = x psi_v = y n_data = np.alen(x_t) print('--> Create prediction set') grid_pts = 100 x_p = np.linspace(0, 1, grid_pts) n_pred = np.alen(x_p) n_totp = n_data + n_pred print('--> Learn GP') # Set kernel parameters psi_t = psi_t.reshape(n_data, 1) y_t = np.hstack((np.cos(psi_t), np.sin(psi_t))) x_t = x_t.reshape(n_data, 1) x_p = x_p.reshape(n_pred, 1) config = { 'xi': x_t, 'y': y_t, } ell2 = 1.15**2 s2 = 700. noise = 2.E-3 hyp = np.zeros([3, 1]) hyp[0] = ell2 hyp[1] = s2 hyp[2] = noise hyp = np.log(hyp.flatten()) ans = gp.learning_se_iso(hyp, config) print ans.message hyp = np.exp(ans.x) ell2 = hyp[0] s2 = hyp[1] noise = hyp[2] params = {'ell2': ell2, 's2': s2} print('--> Initialising model variables') t0 = time() yp, var_yp = gp.predict_se_iso(y_t, x_t, x_p, params, noise) tf = time() print 'Total elapsed time in prediction: ' + str(tf - t0) + ' s' # Keep all values between -pi and pi z_p = yp[:, 0] + 1.j * yp[:, 1] s2_p = np.diag(var_yp) new_psi_p = np.angle(z_p) n_grid = 1000 p, th = gp.get_predictive_for_wrapped(new_psi_p, var_yp, res=n_grid) # Predictions print('--> Saving and displaying results') # First the heatmap in the background fig, scaling_x, scaling_y, offset_y = plot.circular_error_bars(th, p, True) # Then scale the predicted and training sets to match the dimensions of the heatmap scaled_x_t = x_t * scaling_x scaled_y_t = uc.cfix(psi_t) * scaling_y + offset_y scaled_x_p = x_p * scaling_x scaled_y_p = uc.cfix(new_psi_p) * scaling_y + offset_y scaled_x_v = x_v * scaling_x scaled_y_v = uc.cfix(psi_v) * scaling_y + offset_y # Now plot the optimised psi's and datapoints plot.plot(scaled_x_p, scaled_y_p, 'c.') # optimised prediction plot.plot(scaled_x_t, scaled_y_t, 'xk', mew=2.0) # training set plot.plot(scaled_x_v, scaled_y_v, 'ob', fillstyle='none') # held out set plot.ylabel('Regressed variable $(\psi)$') plot.xlabel('Input variable $(x)$') plot.tight_layout() plot.grid(True) holl_score = 0. for ii in xrange(0, n_pred): holl_score += uc.loglik_gp2circle(psi_v[ii], yp[ii], s2_p[ii]) print 'HOLL score: ' + str(holl_score) print('Finished running case!')
def main(): # Parse flags config = forge.config() fet.print_flags() # Restore flags of pretrained model flag_path = osp.join(config.model_dir, 'flags.json') fprint(f"Restoring flags from {flag_path}") pretrained_flags = AttrDict(fet.json_load(flag_path)) pretrained_flags.debug = True # Fix seeds. Always first thing to be done after parsing the config! torch.manual_seed(0) np.random.seed(0) random.seed(0) # Make CUDA operations deterministic torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # Load data config.batch_size = 1 _, _, test_loader = fet.load(config.data_config, config) # Load model model = fet.load(config.model_config, pretrained_flags) model_path = osp.join(config.model_dir, config.model_file) fprint(f"Restoring model from {model_path}") checkpoint = torch.load(model_path, map_location='cpu') model_state_dict = checkpoint['model_state_dict'] model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_1', None) model_state_dict.pop('comp_vae.decoder_module.seq.0.pixel_coords.g_2', None) model.load_state_dict(model_state_dict) fprint(model) # Visualise model.eval() for count, batch in enumerate(test_loader): if count >= config.num_images: break # Forward pass output, _, stats, _, _ = model(batch['input']) # Set up figure fig, axes = plt.subplots(nrows=4, ncols=1 + pretrained_flags.K_steps) # Input and reconstruction plot(axes, 0, 0, batch['input'], title='Input image', fontsize=12) plot(axes, 1, 0, output, title='Reconstruction', fontsize=12) # Empty plots plot(axes, 2, 0, fontsize=12) plot(axes, 3, 0, fontsize=12) # Put K reconstruction steps into separate subfigures x_k = stats['x_r_k'] log_m_k = stats['log_m_k'] mx_k = [x * m.exp() for x, m in zip(x_k, log_m_k)] log_s_k = stats['log_s_k'] if 'log_s_k' in stats else None for step in range(pretrained_flags.K_steps): mx_step = mx_k[step] x_step = x_k[step] m_step = log_m_k[step].exp() if log_s_k: s_step = log_s_k[step].exp() pre = 'Mask x RGB ' if step == 0 else '' plot(axes, 0, 1 + step, mx_step, pre + f'k={step+1}', fontsize=12) pre = 'RGB ' if step == 0 else '' plot(axes, 1, 1 + step, x_step, pre + f'k={step+1}', fontsize=12) pre = 'Mask ' if step == 0 else '' plot(axes, 2, 1 + step, m_step, pre + f'k={step+1}', True, fontsize=12) if log_s_k: pre = 'Scope ' if step == 0 else '' plot(axes, 3, 1 + step, s_step, pre + f'k={step+1}', True, axis=step == 0, fontsize=12) # Beautify and show figure plt.subplots_adjust(wspace=0.05, hspace=0.15) manager = plt.get_current_fig_manager() manager.resize(*manager.window.maxsize()) plt.show()
agent.resetDict() df = pandas.DataFrame(action_dict) print(action_dict) print(df.mean()) ''' plot ''' plt.figure() ax = plt.gca() for Learner in LEARNERS: name = Learner.__name__ datab = collectorloss.getStats_list(name) plot(ax, datab, label="Loss", color="red") plt.xlabel("Update Steps") plt.ylabel("TD Loss") plt.legend() plt.show() # plt.figure() # ax = plt.gca() # for Learner in LEARNERS: # name = Learner.__name__ # datab = collectorb.getStats(name) # plot(ax, datab, label="back", color="red") # datas = collectors.getStats(name) # plot(ax, datas, label="do nothing", color='blue')
def run_case(): print('--> Create training set') x = np.linspace(0, 1, 100) y = toy_fun(x) x_t = np.array([ +0.05, +0.05, +0.17, +0.17, +0.22, +0.30, +0.35, +0.37, +0.52, +0.53, +0.69, +0.70, +0.82, +0.90 ]) y_t = np.array([ +0.56, +0.65, +0.90, +1.18, +2.39, +3.40, +2.89, +2.64, -2.69, -3.20, -3.40, -2.77, +0.41, +0.35 ]) x_v = x y_v = y n_data = np.alen(x_t) print('--> Create prediction set') grid_pts = 100 x_p = np.linspace(0, 1, grid_pts) n_pred = np.alen(x_p) n_totp = n_data + n_pred print('--> Calculate kernel') # Set kernel parameters noise = 1.E-2 params = { 's2': 250.00, 'ell2': 5.50E-2**2, } k1 = np.array([[10.]]) k2 = np.array([[0.]]) y_t = y_t.reshape(n_data, 1) x_t = x_t.reshape(n_data, 1) x_p = x_p.reshape(grid_pts, 1) x = np.vstack((x_p, x_t)) # Calculate kernels mat_k_cc = kernels.se_iso(x, x, params) mat_k_ss = kernels.se_iso(x, x, params) mat_k = np.bmat([[mat_k_cc, np.zeros_like(mat_k_cc)], [np.zeros_like(mat_k_ss), mat_k_ss]]) mat_k = np.asarray(mat_k) mat_k += noise * np.eye(mat_k.shape[0]) # Find inverse mat_ell = la.cholesky(mat_k, lower=True) mat_kin = la.solve(mat_ell.T, la.solve(mat_ell, np.eye(mat_ell.shape[0]))) print('--> Initialising model variables') psi_p = (2. * np.random.rand(n_pred, 1) - 1) mf_k1 = np.log(np.random.rand(n_totp, 1) * 10) mf_m1 = (2. * np.random.rand(n_totp, 1) - 1) n_var = psi_p.shape[0] + mf_k1.shape[0] + mf_m1.shape[0] idx = np.arange(0, n_var) config = { 'N_data': n_data, 'N_pred': n_pred, 'c_data': np.cos(y_t), 's_data': np.sin(y_t), 'c_2data': np.cos(2 * y_t), 's_2data': np.sin(2 * y_t), 'Kinv': mat_kin, 'idx_psi_p': idx[0:psi_p.shape[0]], 'idx_mf_k1': idx[psi_p.shape[0]:psi_p.shape[0] + mf_k1.shape[0]], 'idx_mf_m1': idx[psi_p.shape[0] + mf_k1.shape[0]:psi_p.shape[0] + mf_k1.shape[0] + mf_m1.shape[0]], 'k1': k1, 'k2': k2 } xin = np.vstack((psi_p, mf_k1, mf_m1)) print('--> Starting optimisation') t0 = time() results = mgvm.vi.inference_model_opt(xin, config) tf = time() print 'Total elapsed time: ' + str(tf - t0) + ' s' print results.message # Keep all values between -pi and pi new_psi_p = uc.cfix(results.x[config['idx_psi_p']]) new_mf_k1 = results.x[config['idx_mf_k1']] new_mf_m1 = results.x[config['idx_mf_m1']] # Predictions print('--> Saving and displaying results') # First the heatmap in the background p, th = mgvm.vi.predictive_dist(n_pred, new_mf_k1, new_mf_m1, k1, 0., pi_shift=True) fig, scaling_x, scaling_y, offset_y = plot.circular_error_bars(th, p, True) scaled_x_t = x_t * scaling_x scaled_y_t = uc.cfix(y_t) * scaling_y + offset_y scaled_x_v = x_v * scaling_x scaled_y_v = uc.cfix(y_v) * scaling_y + offset_y scaled_x_p = x_p * scaling_x scaled_y_p = uc.cfix(new_psi_p) * scaling_y + offset_y # Now plot the optimised psi's and datapoints plot.plot(scaled_x_p, scaled_y_p, 'c.') # optimised prediction plot.plot(scaled_x_t, scaled_y_t, 'xk', mew=2.) # training set plot.plot(scaled_x_v, scaled_y_v, 'ob', fillstyle='none') # training set plot.xticks([0, 20, 40, 60, 80, 100], ['0.0', '0.2', '0.4', '0.6', '0.8', '1.0']) plot.ylabel('Regressed variable $(\psi)$') plot.xlabel('Input variable $(x)$') plot.tight_layout() holl_score = 0. for ii in xrange(0, y_v.shape[0]): holl_score += uc.holl(y_v[ii], new_psi_p[ii], 0, k1, 0) print 'HOLL Score: ' + str(holl_score) print('Finished running case!')
def run_case(): print('--> Create training set') x = np.linspace(0, 1, 100) y = toy_fun(x) x_t = np.array([ +0.05, +0.05, +0.17, +0.17, +0.22, +0.30, +0.35, +0.37, +0.52, +0.53, +0.69, +0.70, +0.82, +0.90 ]) y_t = np.array([ +0.56, +0.65, +0.90, +1.18, +2.39, +3.40, +2.89, +2.64, -2.69, -3.20, -3.40, -2.77, +0.41, +0.35 ]) x_v = x y_v = y y_t = uc.cfix(y_t) y_v = uc.cfix(y_v) n_data = np.alen(x_t) print('--> Create prediction set') grid_pts = 100 x_p = np.linspace(0, 1, grid_pts) n_pred = np.alen(x_p) n_totp = n_data + n_pred print('--> Learn GP') # Set kernel parameters y_t = y_t.reshape(n_data, 1) x_t = x_t.reshape(n_data, 1) x_p = x_p.reshape(n_pred, 1) config = { 'xi': x_t, 'y': y_t, } ell2 = 0.5**2 s2 = 200. noise = 1.E-4 hyp = np.zeros([3, 1]) hyp[0] = ell2 hyp[1] = s2 hyp[2] = noise hyp = np.log(hyp.flatten()) ans = gp.learning_se_iso(hyp, config) print ans.message hyp = np.exp(ans.x) ell2 = hyp[0] s2 = hyp[1] noise = hyp[2] params = {'ell2': ell2, 's2': s2} print('--> Initialising model variables') t0 = time() yp, var_yp = gp.predict_se_iso(y_t, x_t, x_p, params, noise) tf = time() print 'Total elapsed time in prediction: ' + str(tf - t0) + ' s' new_psi_p = yp s2_p = np.diag(var_yp) p, th = gp.get_predictive_for_plots_1d(yp, var_yp, res=1000) # Predictions print('--> Saving and displaying results') # First the heatmap in the background fig, scaling_x, scaling_y, offset_y = plot.circular_error_bars(th, p, True) # Then scale the predicted and training sets to match the dimensions of the heatmap scaled_x_t = x_t * scaling_x scaled_y_t = uc.cfix(y_t) * scaling_y + offset_y scaled_x_v = x_v * scaling_x scaled_y_v = uc.cfix(y_v) * scaling_y + offset_y scaled_x_p = x_p * scaling_x scaled_y_p = new_psi_p * scaling_y + offset_y # scaled_mode = (mode + np.pi) * 1000 / (2 * np.pi) # Now plot the optimised psi's and datapoints plot.plot(scaled_x_p, scaled_y_p, 'c.') # optimised prediction plot.plot(scaled_x_t, scaled_y_t, 'xk', mew=2.0) # training set plot.plot(scaled_x_v, scaled_y_v, 'ob', fillstyle='none') # training set plot.ylabel('Regressed variable $(\psi)$') plot.xlabel('Input variable $(x)$') plot.tight_layout() holl_score = 0. for ii in xrange(0, y_v.shape[0]): holl_score += -np.sum(0.5 * (yp[ii] - y_v[ii])**2 / s2_p[ii] - 0.5 * np.log(s2_p[ii] * 2. * np.pi)) print 'HOLL Score: ' + str(holl_score) print('Finished running case!')