def sld_profile(self, save_path): """Plots the SLD profile of the sample. Args: save_path (str): path to directory to save SLD profile to. """ q = np.geomspace(0.005, 0.3, 500) scale, bkg, dq = 1, 1e-6, 2 experiment = refl1d_experiment(self.structure, q, scale, bkg, dq, 0) z, slds, _, slds_mag, _ = experiment.magnetic_smooth_profile() fig = plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111) # Plot the SLD profile. ax.plot(z, slds, label='SLD', color='black') ax.plot(z, slds_mag, label='Magnetic SLD', color='red') ax.set_xlabel('$\mathregular{Distance\ (\AA)}$', fontsize=11, weight='bold') ax.set_ylabel('$\mathregular{SLD\ (10^{-6} \AA^{-2})}$', fontsize=11, weight='bold') ax.legend() # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'sld_profile')
def saveConfusionMatrix(cm, class_names, classifier_name='ns'): ''' This function prints a confusion matrix for a particular classification task. ARGUMENTS: cm: a 2-D numpy array of the confusion matrix (cm[i,j] is the number of times a sample from class i was classified in class j) class_names: a list that contains the names of the classes ''' cmCsv = numpy.empty([len(class_names), len(class_names)]) header = [] # header of matrix header.append("/") if cm.shape[0] != len(class_names): print("printConfusionMatrix: Wrong argument sizes\n") return for i, c in enumerate(class_names): header.append(c) for i, c in enumerate(class_names): for j in range(len(class_names)): val = 100.0 * cm[i][j] / numpy.sum(cm) cmCsv[i][j] = format(val, '.2f') # Save as csv out = numpy.column_stack([class_names, cmCsv]) out1 = numpy.row_stack([header, out]) numpy.savetxt('confusion_matrix_{0}.csv'.format(classifier_name), out1, delimiter=',', fmt="%s") utils.save_plot(utils.get_confusion_matrix(cmCsv, class_names, class_names), "confusion_matrix_{0}".format(classifier_name))
def sld_profile(self, save_path, filename='sld_profile', ylim=None, legend=True): """Plots the SLD profile of the lipid sample. Args: save_path (str): path to directory to save SLD profile to. filename (str): file name to use when saving the SLD profile. ylim (tuple): limits to place on the SLD profile y-axis. legend (bool): whether to include a legend in the SLD profile. """ fig = plt.figure() ax = fig.add_subplot(111) # Plot the SLD profile for each measured contrast. for structure in self.structures: ax.plot(*structure.sld_profile(self.distances)) x_label = '$\mathregular{Distance\ (\AA)}$' y_label = '$\mathregular{SLD\ (10^{-6} \AA^{-2})}$' ax.set_xlabel(x_label, fontsize=11, weight='bold') ax.set_ylabel(y_label, fontsize=11, weight='bold') # Limit the y-axis if specified. if ylim: ax.set_ylim(*ylim) # Add a legend if specified. if legend: ax.legend(self.labels) # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, filename)
def evaluate_model(train, test, n_input, save_model_path, save_plot_path): # fit model model = build_model(train, n_input) # history is a list of yearly data history = [x for x in train] # walk-forward validation over period predictions = list() for i in range(len(test)): # predict the period yhat_sequence = forecast_multichannel(model, history, n_input) # store the predictions predictions.append(yhat_sequence) # get real observation and add to history for predicting the next period history.append(test[i, :]) # get array of predictions prediction = np.array(predictions) prediction = np.ravel(prediction) # get array of actual values from test set actual = test[:, :, 0] actual_plot = np.ravel(actual) actual[actual == 0] = np.nanmean(actual) actual = np.ravel(actual) # calaculate and print scores rmse, mape = calculate_scores(actual, prediction) print('RMSE: %.3f' % rmse) print('MAPE: %.3f' % mape) # save plot in /temp/ path as png file save_plot(actual_plot, prediction, save_plot_path) plot_prediction(actual_plot, prediction) # save model model.save(save_model_path)
def plot_horizontal_histogram(u, v, w, data): from settings import DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS, DEFAULT_STEP step = DEFAULT_STEP module_matrix = create_module_matrix(u, v)[::step, ::step] module_vector = module_matrix.ravel() fig = plt.figure(figsize=(20, 10), facecolor='w', edgecolor='r') plt.hist(module_vector, bins=DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS.get('bins', 100), normed=1, facecolor='green', alpha=0.75) plt.xlim([ DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS.get('speed_down', 0), DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS.get('speed_up', 50) ]) plt.ylim([ DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS.get('probability_down', 0), DEFAULT_HORIZONTAL_HISTOGRAM_SETTINGS.get('probability_up', 0.5) ]) if 'show' in sys.argv: plt.show() return None plt.ylabel('Probability') plt.grid(True) save_plot(data=data, img_type='horizontal-histogram') plt.close(fig)
def experiment(env, step_type, training_rounds, gamma, lr, train_ep, oneshot, test_ep, save_folder, device, config): """ Run an experiment where agents are trained and tested (including cross-play) training_rounds is the number of pairs of policies trained """ step_func = step_funcs[step_type] print("---- Starting ----") ### Train policies p1s, p2s = train_policies(env, training_rounds, step_func, train_ep, gamma, lr, device) qs = QuickSaver(subfolder=save_folder) qs.save_json(config, name='config') ### Save policies list_p1s = list(map(lambda p: p.tolist(), p1s)) list_p2s = list(map(lambda p: p.tolist(), p2s)) save_results(qs, 'Pols', list_p1s, list_p2s) for name, prior in env.generate_test_priors(): print("Testing prior", name, "...") ### Test policies r1s, r2s = several_test(env, prior, p1s, p2s) xr1s, xr2s = several_cross_test(env, prior, p1s, p2s, n_crosses=training_rounds) ### Plot results plot_results(env, prior, r1s, r2s, color='orange') plot_results(env, prior, xr1s, xr2s, color='blue') save_plot(qs, name + '_results') ### Save results save_results(qs, name + '_Pfs', r1s, r2s) save_results(qs, name + '_XPfs', xr1s, xr2s)
def nested_sampling(self, angle_times, save_path, filename, dynamic=False): """Runs nested sampling on simulated data of the sample. Args: angle_times (list): points and times for each angle to simulate. save_path (str): path to directory to save corner plot to. filename (str): file name to use when saving corner plot. dynamic (bool): whether to use static or dynamic nested sampling. """ # Simulate data for the sample. model, data = simulate(self.structure, angle_times) # The structure was defined in refnx. if isinstance(self.structure, refnx.reflect.Structure): dataset = refnx.reflect.ReflectDataset( [data[:, 0], data[:, 1], data[:, 2]]) objective = refnx.anaylsis.Objective(model, dataset) # The structure was defined in Refl1D. elif isinstance(self.structure, refl1d.model.Stack): objective = bumps.fitproblem.FitProblem(model) # Otherwise, the structure is invalid. else: raise RuntimeError('invalid structure given') # Sample the objective using nested sampling. sampler = Sampler(objective) fig = sampler.sample(dynamic=dynamic) # Save the sampling corner plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, filename + '_nested_sampling')
def plot_data_2d(data, plot_file): fig = new_fig('Reduced data') x = data.iloc[:, 0] y = data.iloc[:, 1] plt.scatter(x, y) save_plot(plot_file, fig) print('Saved plot in %s' % plot_file)
def reflectivity_profile(self, save_path, q_min=0.005, q_max=0.4, points=500, scale=1, bkg=1e-7, dq=2): """Plots the reflectivity profile of the sample. Args: save_path (str): path to directory to save reflectivity profile to. q_min (float): minimum Q value to plot. q_max (float): maximum Q value to plot. points (int): number of points to plot. scale (float): experimental scale factor. bkg (float): level of instrument background noise. dq (float): instrument resolution. """ # Geometriaclly-space Q points over the specified range. q = np.geomspace(q_min, q_max, points) # The structure was defined in refnx. if isinstance(self.structure, refnx.reflect.Structure): model = refnx.reflect.ReflectModel(self.structure, scale=scale, bkg=bkg, dq=dq) # The structure was defined in Refl1D. elif isinstance(self.structure, refl1d.model.Stack): model = refl1d_experiment(self.structure, q, scale, bkg, dq) # Otherwise, the structure is invalid. else: raise RuntimeError('invalid structure given') # Calculate the model reflectivity. r = reflectivity(q, model) # Plot Q versus the model reflectivity. fig = plt.figure() ax = fig.add_subplot(111) ax.plot(q, r, color='black') x_label = '$\mathregular{Q\ (Å^{-1})}$' y_label = 'Reflectivity (arb.)' ax.set_xlabel(x_label, fontsize=11, weight='bold') ax.set_ylabel(y_label, fontsize=11, weight='bold') ax.set_yscale('log') # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'reflectivity_profile')
def finish_epoch(self, epoch, loss_type, avg_loss, total_loss): # Note that we explicitly don't reset self.iters here, because it would mess up how often we plot if self.loss_type != loss_type: return self.plot_losses.append(avg_loss) save_plot(self.plot_losses, self.loss_file, 1) if self.perplexity_file: self.plot_perplexities.append(perplexity(avg_loss)) save_plot(self.plot_perplexities, self.perplexity_file, 1) return "continue"
def groove(model_name: str, interpolate_sequence: NoteSequence, num_steps_per_sample: int, num_output: int, total_bars: int) -> NoteSequence: """ Adds groove to the given sequence by splitting it in manageable sequences and using the given model to humanize it. """ model = get_model(model_name) # Split the sequences in chunks of 4 seconds (which is 2 bars at 120 qpm), # which is necessary since the model is trained for 2 bars split_interpolate_sequences = mm.sequences_lib.split_note_sequence( interpolate_sequence, 4) if len(split_interpolate_sequences) != num_output: raise Exception(f"Wrong number of interpolate size, " f"expected: 10, actual: {split_interpolate_sequences}") # Uses the model to encode the list of sequences, returning the encoding # (also called z or latent vector) which will the used in the decoding, # The other values mu and sigma are not used, but kept in the code for # clarity. # # The resulting array shape is (a, b), where a is the number of # split sequences (should correspond to num_output), and b is the encoding # size. # # This might throw a NoExtractedExamplesError exception if the # sequences are not properly formed (for example if the sequences # are not quantized, a sequence is empty or not of the proper length). encoding, mu, sigma = model.encode( note_sequences=split_interpolate_sequences) # Uses the model to decode the encoding (also called z or latent vector), # returning a list of humanized sequence with one element per encoded # sequences (each of length num_steps_per_sample). groove_sequences = model.decode(z=encoding, length=num_steps_per_sample) # Concatenates the resulting sequences (of length num_output) into one # single sequence. groove_sequence = mm.sequences_lib.concatenate_sequences( groove_sequences, [4] * num_output) # Saves the midi and the plot in the groove folder, # with the plot having total_bars size save_midi(groove_sequence, "groove", model_name) save_plot(groove_sequence, "groove", model_name, plot_max_length_bar=total_bars, show_velocity=True, bar_fill_alphas=[0.50, 0.50, 0.05, 0.05]) return groove_sequence
def main(args_dict): # Extract configuration from command line arguments MK = np.array(args_dict['MK']) M = 100 K = MK / M print('M = %d; K = %d' % (M, K)) x_type = args_dict['x_type'] deltas = args_dict['deltas'] do_confidence = args_dict['confidence'] # Load data from JSON files generated by (non-public) Matlab code jsons = [ json_load('data/bandits_normal_delta%s_MK%d.json' % (delta, MK)) for delta in deltas ] lnZs = np.array([json['lnZ'] for json in jsons]) MAPs = np.array([json['MAPs_ttest'] for json in jsons]) # Estimate estimator MSEs for the various tricks (as specified by alphas) alphas = np.linspace(-0.2, 1.5, 100) MSEs, MSEs_stdev = MAPs_to_estimator_MSE_vs_alpha(1, MAPs, lnZs, alphas, K) # Set up plot matplotlib_configure_as_notebook() fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(4.25, 3.25)) ax.set_xlabel('trick parameter $\\alpha$') ax.set_ylabel('MSE of estimator of $\ln Z$') # Plot the MSEs labels = ['$\\delta = %g$' % (delta) for delta in deltas] colors = [ plt.cm.plasma((np.log10(delta) - (-3)) / (0 - (-3))) for delta in deltas ] plot_MSEs_to_axis(ax, alphas, MSEs, MSEs_stdev, do_confidence, labels, colors) # Finalize plot for vertical in [0.0, 1.0]: ax.axvline(vertical, color='black', linestyle='dashed', alpha=.7) ax.annotate('Gumbel trick', xy=(0.0, 0.0052), rotation=90, horizontalalignment='right', verticalalignment='bottom') ax.annotate('Exponential trick', xy=(1.0, 0.0052), rotation=90, horizontalalignment='right', verticalalignment='bottom') lgd = ax.legend(loc='upper center') ax.set_ylim((5 * 1e-3, 5 * 1e-2)) save_plot(fig, 'figures/fig3b', bbox_extra_artists=(lgd, ))
def interpolate(model_name: str, sample_sequences: List[NoteSequence], num_steps_per_sample: int, num_output: int, total_bars: int) -> NoteSequence: """ Interpolates between 2 sequences using the given model. """ if len(sample_sequences) != 2: raise Exception(f"Wrong number of sequences, " f"expected: 2, actual: {len(sample_sequences)}") if not sample_sequences[0].notes or not sample_sequences[1].notes: raise Exception( f"Empty note sequences, " f"sequence 1 length: {len(sample_sequences[0].notes)}, " f"sequence 2 length: {len(sample_sequences[1].notes)}") model = get_model(model_name) # Use the model to interpolate between the 2 input sequences, # with the number of output (counting the start and end sequence), # number of steps per sample and default temperature # # This might throw a NoExtractedExamplesError exception if the # sequences are not properly formed (for example if the sequences # are not quantized, a sequence is empty or not of the proper length). interpolate_sequences = model.interpolate( start_sequence=sample_sequences[0], end_sequence=sample_sequences[1], num_steps=num_output, length=num_steps_per_sample) # Saves the midi and the plot in the interpolate folder save_midi(interpolate_sequences, "interpolate", model_name) save_plot(interpolate_sequences, "interpolate", model_name) # Concatenates the resulting sequences (of length num_output) into one # single sequence. # The second parameter is a list containing the number of seconds # for each input sequence. This is useful if some of the input # sequences do not have notes at the end (for example the last # note ends at 3.5 seconds instead of 4) interpolate_sequence = mm.sequences_lib.concatenate_sequences( interpolate_sequences, [4] * num_output) # Saves the midi and the plot in the merge folder, # with the plot having total_bars size save_midi(interpolate_sequence, "merge", model_name) save_plot(interpolate_sequence, "merge", model_name, plot_max_length_bar=total_bars, bar_fill_alphas=[0.50, 0.50, 0.05, 0.05]) return interpolate_sequence
def main(args_dict): # Extract configuration from command line arguments MK = args_dict['MK'] Kmin = args_dict['Kmin'] # Load data data = json_load('data/astar_rbr_MK%d.json' % (MK)) lnZ = data['lnZ'] MAPs = np.array(data['MAPs']) print('Loaded %d MAP samples from A* sampling' % (len(MAPs))) # Estimate MSE of lnZ estimators from Gumbel and Exponential tricks MSEs_Gumb = [] MSEs_Expo = [] Ms = xrange(1, MK / Kmin) for M in Ms: # Computation with M samples, repeated K >= Kmin times with a new set every time K = MK / M myMAPs = np.reshape(MAPs[:(K * M)], (K, M)) # Compute unbiased estimators of ln(Z) lnZ_Gumb = np.mean(myMAPs, axis=1) lnZ_Expo = EULER - np.log(np.mean(np.exp(-myMAPs), axis=1)) - (np.log(M) - digamma(M)) # Save MSE estimates MSEs_Gumb.append(np.mean((lnZ_Gumb - lnZ)**2)) MSEs_Expo.append(np.mean((lnZ_Expo - lnZ)**2)) # Set up plot matplotlib_configure_as_notebook() fig, ax = plt.subplots(1, 1, facecolor='w', figsize=(4.25, 3.25)) ax.set_xscale('log') ax.set_xlabel('desired MSE (lower to the right)') ax.set_ylabel('required number of samples $M$') ax.grid(b=True, which='both', linestyle='dotted', lw=0.5, color='black', alpha=0.3) # Plot MSEs ax.plot(MSEs_Gumb, Ms, color=tableau20(0), label='Gumbel') ax.plot(MSEs_Expo, Ms, color=tableau20(2), label='Exponential') # Finalize plot ax.set_xlim((1e-2, 2)) ax.invert_xaxis() lgd = ax.legend(loc='upper left') save_plot(fig, 'figures/fig3a', (lgd, ))
def main(args_dict): # Extract configuration from command line arguments Ms = np.array(args_dict['Ms']) alphas = np.linspace(args_dict['alpha_min'], args_dict['alpha_max'], args_dict['alpha_num']) K = args_dict['K'] do_confidence = args_dict['confidence'] # Estimate MSEs by sampling print('Estimating MSE of estimators of Z...') MSEs_Z, MSE_stdevs_Z = estimate_MSE_vs_alpha(lambda x: x, Ms, alphas, K) print('Estimating MSE of estimators of ln(Z)...') MSEs_lnZ, MSE_stdevs_lnZ = estimate_MSE_vs_alpha(np.log, Ms, alphas, K) # Set up plot matplotlib_configure_as_notebook() fig = plt.figure(facecolor='w', figsize=(8.25, 3.25)) gs = gridspec.GridSpec(1, 3, width_ratios=[1.0, 1.0, 0.5]) ax = [plt.subplot(gs[0]), plt.subplot(gs[2]), plt.subplot(gs[1])] ax[0].set_xlabel('$\\alpha$') ax[2].set_xlabel('$\\alpha$') ax[0].set_ylabel('MSE of estimators of $Z$, in units of $Z^2$') ax[2].set_ylabel('MSE of estimators of $\ln Z$, in units of $1$') colors = [plt.cm.plasma(0.8 - 1.0 * i / len(Ms)) for i in xrange(len(Ms))] # Gumbel (alpha=0) and Exponential (alpha=1) tricks can be handled analytically legend_Gumbel = 'Gumbel trick\n($\\alpha=0$, theoretical)' legend_Exponential = 'Exponential trick\n($\\alpha=1$, theoretical)' ax[0].scatter(np.zeros(len(Ms)), Z_Gumbel_MSE(Ms), marker='o', color=colors, label=legend_Gumbel) ax[0].scatter(np.ones(len(Ms)), Z_Exponential_MSE(Ms), marker='^', color=colors, label=legend_Exponential) ax[2].scatter(np.zeros(len(Ms)), lnZ_Gumbel_MSE(Ms), marker='o', color=colors, label=legend_Gumbel) ax[2].scatter(np.ones(len(Ms)), lnZ_Exponential_MSE(Ms), marker='^', color=colors, label=legend_Exponential) # Remaining tricks MSE were estimated by sampling labels = ['$M=%d$' % (M) for M in Ms] plot_MSEs_to_axis(ax[0], alphas, MSEs_Z, MSE_stdevs_Z, do_confidence, labels, colors) plot_MSEs_to_axis(ax[2], alphas, MSEs_lnZ, MSE_stdevs_lnZ, do_confidence, labels, colors) # Finalize plot ax[0].set_ylim((5*1e-3, 10)) ax[2].set_ylim((5*1e-3, 10)) handles, labels = ax[0].get_legend_handles_labels() remove_chartjunk(ax[1]) ax[1].spines["bottom"].set_visible(False) ax[1].tick_params(axis="both", which="both", bottom="off", top="off", labelbottom="off", left="off", right="off", labelleft="off") ax[1].legend(handles, labels, frameon=False, loc='upper center', bbox_to_anchor=[0.44, 1.05]) plt.tight_layout() save_plot(fig, 'figures/fig2_K%d' % (K))
def sample(model_name: str, num_steps_per_sample: int) -> List[NoteSequence]: """ Samples 2 sequences using the given model. """ model = get_model(model_name) # Uses the model to sample 2 sequences, # with the number of steps and default temperature sample_sequences = model.sample(n=2, length=num_steps_per_sample) # Saves the midi and the plot in the sample folder save_midi(sample_sequences, "sample", model_name) save_plot(sample_sequences, "sample", model_name) return sample_sequences
def plot_bayes(save_path='./results/YIG_sample'): data = np.loadtxt(os.path.join(save_path, 'bayes.csv'), delimiter=',') times, factors = data[:, 0], data[:, 1] fig = plt.figure(figsize=[9, 7]) ax = fig.add_subplot(111) ax.plot(1.5 * times, factors) ax.set_xlabel('Counting Time (min)', fontsize=11) ax.set_ylabel('$\mathregular{Bayes \ Factor \ (2 \ln B_{xy})}$', fontsize=11) # Save the plot. save_plot(fig, save_path, 'bayes')
def reflectivity_profile(self, save_path, filename='reflectivity_profile'): """Plots the reflectivity profile of the lipid sample. Args: save_path (str): path to directory to save profile to. filename (str): file name to use when saving the profile. """ fig = plt.figure() ax = fig.add_subplot(111) # Iterate over each measured contrast. colours = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i, objective in enumerate(self.objectives): # Get the measured data and calculate the model reflectivity. q, r, dr = objective.data.x, objective.data.y, objective.data.y_err r_model = objective.model(q) # Offset the data, for clarity. offset = 10**(-2*i) r *= offset dr *= offset r_model *= offset # Add the offset in the label. label = self.labels[i] if offset != 1: label += ' $\mathregular{(x10^{-'+str(2*i)+'})}$' # Plot the measured data and the model reflectivity. ax.errorbar(q, r, dr, marker='o', ms=3, lw=0, elinewidth=1, capsize=1.5, color=colours[i], label=label) ax.plot(q, r_model, color=colours[i], zorder=20) x_label = '$\mathregular{Q\ (Å^{-1})}$' y_label = 'Reflectivity (arb.)' ax.set_xlabel(x_label, fontsize=11, weight='bold') ax.set_ylabel(y_label, fontsize=11, weight='bold') ax.set_xscale('log') ax.set_yscale('log') ax.set_ylim(1e-10, 3) ax.legend() # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, filename)
def plot_lines(data_file, plot_file): data = pd.read_csv(data_file) fig = new_fig() xlab = 'Use time of Head' ylab = 'Temp. THG' for hd in data['S/N of Head'].unique(): print('Generating lines for head %s' % hd) data_hd = data.loc[data['S/N of Head'] == hd] x = data_hd[xlab] y = data_hd[ylab] plt.xlabel(xlab) plt.ylabel(ylab) plt.plot(x, y, label=hd) plt.scatter(x, y) plt.legend(loc='upper right') save_plot(plot_file, fig)
def magnetism(yig_thick_range, pt_thick_range, angle_times, save_path, save_views=False): sample = SampleYIG() sample.Pt_mag.value = 0.01638 x, y, infos = [], [], [] for i, yig_thick in enumerate(yig_thick_range): # Display progress. if i % 5 == 0: print('>>> {0}/{1}'.format( i * len(pt_thick_range), len(pt_thick_range) * len(yig_thick_range))) for pt_thick in pt_thick_range: g = sample.underlayer_info(angle_times, yig_thick, pt_thick) infos.append(g[0, 0]) x.append(yig_thick) y.append(pt_thick) fig = plt.figure(figsize=[10, 8]) ax = fig.add_subplot(111, projection='3d') surface = ax.plot_trisurf(x, y, infos, cmap='plasma') fig.colorbar(surface, fraction=0.046, pad=0.04) ax.set_xlabel('$\mathregular{YIG\ Thickness\ (\AA)}$', fontsize=11, weight='bold') ax.set_ylabel('$\mathregular{Pt\ Thickness\ (\AA)}$', fontsize=11, weight='bold') ax.set_zlabel('Fisher Information', fontsize=11, weight='bold') ax.ticklabel_format(axis='z', style='sci', scilimits=(0, 0)) # Save the plot. save_path = os.path.join(save_path, sample.name) save_plot(fig, save_path, 'underlayer_choice') if save_views: save_path = os.path.join(save_path, 'underlayer_choice') for i in range(0, 360, 10): ax.view_init(elev=40, azim=i) save_plot(fig, save_path, 'underlayer_choice_{}'.format(i))
def lasso_vae(hparams, xs_dict, images_nums, is_save): """Images for Lasso and VAE""" hparams.measurement_type = 'gaussian' hparams.model_types = ['Lasso', 'VAE'] for num_measurements in [10, 25, 50, 100, 200, 300, 400, 500, 750]: pattern1 = './estimated/mnist/full-input/gaussian/0.1/' + str( num_measurements) + '/lasso/0.1/{0}.png' pattern2 = './estimated/mnist/full-input/gaussian/0.1/' + str( num_measurements ) + '/vae/0.0_1.0_0.1_adam_0.01_0.9_False_1000_10/{0}.png' patterns = [pattern1, pattern2] view(xs_dict, patterns, images_nums, hparams, alg_labels=True) base_path = './results/mnist_reconstr_{}_orig_lasso_vae.pdf' save_path = base_path.format(num_measurements) utils.save_plot(is_save, save_path)
def finish_iter(self, loss_type, loss): if self.loss_type != loss_type: return if self.plot_every > 0: self.plot_loss_total += loss self.iters += 1 if self.iters % self.plot_every == 0: plot_loss_avg = self.plot_loss_total / self.plot_every self.plot_losses.append(plot_loss_avg) self.plot_loss_total = 0 if self.perplexity_file: plot_perplexity_avg = perplexity(plot_loss_avg) self.plot_perplexities.append(plot_perplexity_avg) if self.iters % self.save_every == 0: save_plot(self.plot_losses, self.loss_file, self.plot_scale) if self.plot_perplexities: save_plot(self.plot_perplexities, self.perplexity_file, self.plot_scale)
def end_to_end(hparams, xs_dict, images_nums, is_save): """Image for End to end models""" hparams.measurement_type = 'fixed' is_save = True hparams.model_types = [] patterns = [] base_pattern = './estimated/mnist/full-input/{0}/0.1/{1}/learned/50-200/{2}.png' for measurement_type in ['fixed', 'learned']: for num_measurements in [10, 20, 30]: hparams.model_types.append('{}{}'.format(measurement_type.title(), num_measurements)) patterns.append( base_pattern.format(measurement_type, num_measurements, '{0}')) view(xs_dict, patterns, images_nums, hparams, alg_labels=True) save_path = './results/mnist_e2e_orig_fixed_learned.pdf' utils.save_plot(is_save, save_path)
def nested_sampling(self, angle_times, save_path, filename, dynamic=False): """Runs nested sampling on simulated data of the sample. Args: angle_times (list): points and counting times for each measurement angle to simulate. save_path (str): path to directory to save corner plot to. filename (str): name of file to save corner plot to. dynamic (bool): whether to use static or dynamic nested sampling. """ objective = bumps.fitproblem.FitProblem(self.experiment) # Sample the objective using nested sampling. sampler = Sampler(objective) fig = sampler.sample(dynamic=dynamic) # Save the sampling corner plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'nested_sampling_' + filename)
def main(): """Make and save image matrices""" hparams = Hparams() xs_dict = celebA_input.model_input(hparams) start, stop = 20, 30 images_nums = get_image_nums(start, stop, hparams) is_save = True for num_measurements in [50, 100, 200, 500, 1000, 2500, 5000, 7500, 10000]: pattern1 = './estimated/celebA/full-input/gaussian/0.01/' + str( num_measurements) + '/lasso-dct/0.1/{0}.png' pattern2 = './estimated/celebA/full-input/gaussian/0.01/' + str( num_measurements) + '/lasso-wavelet/1e-05/{0}.png' pattern3 = './estimated/celebA/full-input/gaussian/0.01/' + str( num_measurements ) + '/dcgan/0.0_1.0_0.001_0.0_0.0_adam_0.1_0.9_False_500_10/{0}.png' patterns = [pattern1, pattern2, pattern3] view(xs_dict, patterns, images_nums, hparams) base_path = './results/celebA_reconstr_{}_orig_lasso-dct_lasso-wavelet_dcgan.pdf' save_path = base_path.format(num_measurements) utils.save_plot(is_save, save_path)
def nested_sampling(self, contrasts, angle_times, save_path, filename, underlayers=None, dynamic=False): """Runs nested sampling on simulated data of the lipid sample. Args: contrasts (list): SLDs of contrasts to simulate. angle_times (list): points and times for each angle to simulate. save_path (str): path to directory to save corner plot to. filename (str): file name to use when saving corner plot. underlayers (list): thickness and SLD of each underlayer to add. dynamic (bool): whether to use static or dynamic nested sampling. """ # Create objectives for each contrast to sample with. objectives = [] for contrast in contrasts: # Simulate an experiment using the given contrast. sample = self._using_conditions(contrast, underlayers) model, data = simulate(sample, angle_times, scale=1, bkg=5e-6, dq=2) dataset = refnx.dataset.ReflectDataset([data[:,0], data[:,1], data[:,2]]) objectives.append(refnx.analysis.Objective(model, dataset)) # Combine objectives into a single global objective. global_objective = refnx.analysis.GlobalObjective(objectives) # Exclude certain parameters if underlayers are being used. if underlayers is None: global_objective.varying_parameters = lambda: self.params else: global_objective.varying_parameters = lambda: self.underlayer_params # Sample the objective using nested sampling. sampler = Sampler(global_objective) fig = sampler.sample(dynamic=dynamic) # Save the sampling corner plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'nested_sampling_'+filename)
def regression(model, X_train, X_test, Y_train, Y_test, type = 'linear'): model.fit(X_train, Y_train) if type == 'linear': for row in zip(model.coef_, attributes): print("[%0.3f, %s]" % row) print "%0.3f" % model.intercept_ in_sample_errors = calculate_linear_errors(model, model.predict(X_train), Y_train) plot_errors(in_sample_errors, 'Absolute error (in-sample)') utils.save_plot(pyplot, name = "build/%s_in_sample.png" % type) print "In-sample variance: %f" % numpy.var(in_sample_errors) print "In-sample mean: %f" % numpy.mean(in_sample_errors) out_sample_errors = calculate_linear_errors(model, model.predict(X_test), Y_test) plot_errors(out_sample_errors, 'Absolute error (out-sample)') utils.save_plot(pyplot, name = "build/%s_out_sample.png" % type) print "Out-of-sample variance: %0.3f" % numpy.var(out_sample_errors) print "Out-of-sample mean: %0.3f" % numpy.mean(out_sample_errors) return (numpy.mean(out_sample_errors) + numpy.mean(in_sample_errors)) / 2
def sld_profile(self, save_path): """Plots the SLD profile of the sample. Args: save_path (str): path to directory to save SLD profile to. """ # The structure was defined in refnx. if isinstance(self.structure, refnx.reflect.Structure): z, slds = self.structure.sld_profile() # The structure was defined in Refl1D. elif isinstance(self.structure, refl1d.model.Stack): q = np.geomspace(0.005, 0.3, 500) # This is not used. scale, bkg, dq = 1, 1e-6, 2 # These are not used. experiment = refl1d_experiment(self.structure, q, scale, bkg, dq) z, slds, _ = experiment.smooth_profile() # Otherwise, the structure is invalid. else: raise RuntimeError('invalid structure given') fig = plt.figure() ax = fig.add_subplot(111) # Plot the SLD profile. ax.plot(z, slds, color='black', label=self.name) x_label = '$\mathregular{Distance\ (\AA)}$' y_label = '$\mathregular{SLD\ (10^{-6} \AA^{-2})}$' ax.set_xlabel(x_label, fontsize=11, weight='bold') ax.set_ylabel(y_label, fontsize=11, weight='bold') # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'sld_profile')
def reflectivity_profile(self, save_path): fig = plt.figure() ax = fig.add_subplot(111) colours = ['b', 'g'] count = 0 for probe, qr in zip(self.experiment.probe.xs, self.experiment.reflectivity()): if qr is not None: ax.errorbar(probe.Q, probe.R, probe.dR, marker='o', ms=2, lw=0, elinewidth=0.5, capsize=0.5, label=self.labels[count] + ' Data', color=colours[count]) ax.plot(probe.Q, qr[1], color=colours[count], zorder=20, label=self.labels[count] + ' Fitted') count += 1 ax.set_xlabel('$\mathregular{Q\ (Å^{-1})}$', fontsize=11, weight='bold') ax.set_ylabel('Reflectivity (arb.)', fontsize=11, weight='bold') ax.set_yscale('log') ax.legend(loc='lower left') # Save the plot. save_path = os.path.join(save_path, self.name) save_plot(fig, save_path, 'reflectivity_profile')
def record(self, fold): # save plots save_plot(self.val_record, 'loss', self.args.n_eval, 'tmp/val_loss.png') save_plot(self.val_record, 'f1', self.args.n_eval, 'tmp/val_f1.png') save_plot(self.norm_record, 'grad_norm', self.args.n_eval, 'tmp/grad_norm.png') if self.args.test: save_plots([self.val_record, self.test_record], ['loss', 'f1'], ['val', 'test'], self.args.n_eval) # create subdir for this experiment os.makedirs(self.record_dir, exist_ok=True) subdir = os.path.join(self.models_dir, str_date_time()) if self.args.mode == 'test': subdir += '_test' os.mkdir(subdir) # write model params and results to csv csvlog = os.path.join(subdir, 'info.csv') param_dict = {} for arg in vars(self.args): param_dict[arg] = str(getattr(self.args, arg)) info = torch.load(self.best_info_path) hash = get_hash() if self.args.machine == 'dt' else 'no_hash' passed_args = ' '.join(sys.argv[1:]) param_dict = { 'hash': hash, 'subdir': subdir, **param_dict, **info, 'args': passed_args } dict_to_csv(param_dict, csvlog, 'w', 'index', reverse=False) header = True if fold == 0 else False dict_to_csv(param_dict, self.record_path, 'a', 'columns', reverse=True, header=header) # copy all records to subdir png_files = ['val_loss.png', 'val_f1.png' ] if not self.args.test else ['loss.png', 'f1.png'] csv_files = [ 'val_probs*.csv', 'train_steps.csv', 'submission.csv', 'test_probs.csv' ] copy_files([*png_files, 'models/*.info', *csv_files], 'tmp', subdir) return subdir