示例#1
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 20
    sequence_len = 50
    num_workers = 2
    # training parameters
    max_epochs = 200
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # get dataloaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len,
                                             num_workers, gpu)

    # create network and optimizier
    net = SingleFrame('VGGNet19')
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     sequence_len, criterion,
                                                     optimizer, max_epochs,
                                                     gpu)
    print('Best Validation Acc:', val_acc)
    # plot
    plot_data(losses, accuracies, 'outputs/online/SingleFramePlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/online/SingleFrameParams.pkl')
示例#2
0
def evaluate((data, dq, shifts, psf_model, parms, core)):
    """
    Compute the scaled squared error and regularization under the current 
    model.
    """
    if core:
        patch_shape = parms.core_shape
    else:
        patch_shape = parms.patch_shape
    min_pixels = np.ceil(parms.min_frac * patch_shape[0] * patch_shape[1])

    psfs = render_psfs(psf_model, shifts, patch_shape, parms.psf_grid, parms.k)

    if parms.return_parms:
        if parms.background == 'constant':            
            fit_parms = np.zeros((data.shape[0], 2))
        else:
            fit_parms = np.zeros((data.shape[0], 4))
        masks = np.zeros_like(data, dtype=np.bool)

    nll = np.zeros_like(data)
    for i in range(data.shape[0]):
        fitparms, bkg, ind = fit_single_patch((data[i], psfs[i],
                                               dq[i], parms))
        model = fitparms[0] * psfs[i] + bkg
        scaled = fitparms[0] * psfs[i]

        # chi-squared like term
        if (model[ind].size >= min_pixels):
            nll[i, ind] = eval_nll(data[i][ind], model[ind], parms)
        else:
            nll[i] = parms.max_nll

        if parms.plot_data:
            # get pre-clip nll
            ind = parms.flags.ravel() != 1
            old_nll = np.zeros(patch_shape[0] * patch_shape[1])
            old_nll[ind] = eval_nll(data[i][ind], parms.old_model[ind], parms)
            # plot the data
            plot_data(i, data[i], model, bkg, nll[i], old_nll, parms)

        if parms.return_parms:
            fit_parms[i] = fitparms
            masks[i] = ind

    if parms.return_parms:
        return nll, fit_parms, masks
    else:
        return nll
示例#3
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 2
    sequence_len = 50
    window_size = 5
    flow = False
    num_workers = 2
    # network parameters
    model = 'VGGNet19'
    rnn_hidden = 512
    rnn_layers = 1
    # training parameters
    max_epochs = 1
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # get loaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len,
                                             window_size, flow, num_workers,
                                             gpu)

    # create network and optimizer
    net = SingleStream(model, rnn_hidden, rnn_layers, pretrained=True)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     sequence_len, window_size,
                                                     criterion, optimizer,
                                                     max_epochs, gpu)
    # plot
    if flow:
        s_plots = 'outputs/online/SingleStreamFlowPlots.png'
        s_params = 'outputs/online/SingleStreamFlowParams.pkl'
    else:
        s_plots = 'outputs/online/SingleStreamAppPlots.png'
        s_params = 'outputs/online/SingleStreamAppParams.pkl'

    # plot
    plot_data(losses, accuracies, s_plots)
    # save network
    torch.save(net.state_dict(), s_params)
def look_at_and_pre_process_data(data, rawdata, variables):
    # Now plot the input data.
    show_data = raw_input("Show the raw-data? (1 = YES): ")
    if show_data == '1':
        pl.plot_data(data, variables)

    # Preprocess the data (mean-centering, normalization).
    text_1 = "Pre-process the data (ENTER = normalization AND mean centering, "
    text_2 = "1 = JUST mean centering, 0 = None): "
    these_processes = raw_input(text_1 + text_2)
    data, rawdata = dp.preprocess_data(these_processes, data, rawdata)

    # "Enhance" certain variables to put all their influence into one component.
    text = "Enhance variables? As integers: Variable_1, Variable_2, ... ; ENTER = none): "
    enhance_these = raw_input(text)
    data, rawdata = dp.boost_variables(enhance_these, data, rawdata)
示例#5
0
def main():
    """Main Function."""
    # dataloader parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    batch_size = 2
    sequence_len = 10
    num_workers = 2
    # network parameters
    spat_model = 'VGGNet11'
    temp_model = 'VGGNet11'
    rnn_hidden = 32
    rnn_layers = 1
    # training parameters
    max_epochs = 2
    learning_rate = 1e-4
    window_size = 5
    criterion = nn.CrossEntropyLoss()

    # get loaders
    dataloaders, dataset_sizes = get_loaders(train_path, valid_path,
                                             batch_size, sequence_len, flow,
                                             num_workers, gpu)

    # create network and optimizer
    net = TwoStreamFusion(spat_model,
                          temp_model,
                          rnn_hidden,
                          rnn_layers,
                          pretrained=False)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(
        net, dataloaders, dataset_sizes, batch_size, sequence_len - 1,
        window_size, criterion, optimizer, max_epochs, gpu)
    # plot
    plot_data(losss, accuracies, 'outputs/online/TwoStreamPlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/online/TwoStreamParams.pkl')
示例#6
0
def main():
    """Main Function."""
    # dataloaders parameters
    gpu = torch.cuda.is_available()
    train_path = 'data/train_data.txt'
    valid_path = 'data/valid_data.txt'
    test_path = 'data/test_data.txt'
    batch_size = 32
    num_workers = 2
    # network parameters
    model = 'VGGNet19'
    rnn_hidden = 512
    rnn_layers = 2
    # training parameters
    max_epochs = 100
    learning_rate = 1e-4
    criterion = nn.CrossEntropyLoss()

    # create dataloaders
    dataloaders, dataset_sizes = get_loaders(train_path,
                                             valid_path,
                                             batch_size,
                                             num_workers,
                                             gpu=True)
    print('Dataset Sizes:')
    print(dataset_sizes)
    # create network object and optimizer
    net = SingleStream(model, rnn_hidden, rnn_layers, pretrained=True)
    print(net)
    optimizer = torch.optim.Adam(net.parameters(), learning_rate)
    # train the network
    net, val_acc, losses, accuracies = train_network(net, dataloaders,
                                                     dataset_sizes, batch_size,
                                                     criterion, optimizer,
                                                     max_epochs, gpu)
    # plot
    plot_data(losses, accuracies, 'outputs/offline/SingleStreamPlots.png')
    # save network
    torch.save(net.state_dict(), 'outputs/offline/SingleStreamParams.pkl')
示例#7
0
                      str("files/" +
                          str(SET_OF_PARAMETERS).replace('parameters_', '') +
                          '_histos.dat'))
                save_histo(
                    [e.time for t, e in histories_for_scan[key].items()],
                    [e.TOT for t, e in histories_for_scan[key].items()],
                    h_name, "files/" +
                    str(SET_OF_PARAMETERS).replace('parameters_', '') +
                    '_histos.dat')

    if parameters_store["simulation_parameters"]["DISPLAY_DATA"] != "FALSE":
        from utils import read_data, save_histo
        dataset = read_data(
            parameters_store["simulation_parameters"]["DISPLAY_DATA"])
        from plotting import plot_data, plot_data_vs_model
        plot_data(dataset)
        plot_data_vs_model(dataset, history, parameters_store,
                           SET_OF_PARAMETERS)

        if parameters_store["simulation_parameters"][
                "SAVE_HISTOGRAMS"] != "FALSE":
            print('..storing h_data ' +
                  str("files/" +
                      str(SET_OF_PARAMETERS).replace('parameters_', '') +
                      '_histos.dat'))
            save_histo(
                dataset["t"], dataset["tot"], 'h_data',
                "files/" + str(SET_OF_PARAMETERS).replace('parameters_', '') +
                '_histos.dat')

    if parameters_store["simulation_parameters"]["IMPROVED_MODEL"] != "FALSE":
示例#8
0
                NUM_WORDS_AFTER_VERB, ACTIVATIONS, PRINT_PLOTTING_INFO)

            modified_correct_plotting_dict, modified_wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment(
                training_filepath_2, modified_testing_filepaths,
                MODIFIED_RESULTS_PATHS, None, MODE, CONTEXT_SIZE_LIST,
                NUM_WORDS_BEFORE_SUBJECT, NUM_WORDS_AFTER_VERB, ACTIVATIONS,
                PRINT_PLOTTING_INFO)

            if MODIFICATION:
                plotting.plot_modified_data(correct_plotting_dict,
                                            wrong_plotting_dict,
                                            modified_correct_plotting_dict,
                                            modified_wrong_plotting_dict,
                                            plot_savefile, MODE)
            else:
                plotting.plot_data(correct_plotting_dict, wrong_plotting_dict,
                                   plot_savefile, MODE)

        elif LABELS_TYPE == 'training_normal':
            correct_plotting_dict, wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment_timestep_training(
                training_filepath_2, testing_filepaths, RESULTS_PATHS, None,
                MODE, CONTEXT_SIZE_LIST, NUM_WORDS_BEFORE_SUBJECT,
                NUM_WORDS_AFTER_VERB, ACTIVATIONS, PRINT_PLOTTING_INFO)

            modified_correct_plotting_dict, modified_wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment_timestep_training(
                training_filepath_2, modified_testing_filepaths,
                MODIFIED_RESULTS_PATHS, None, MODE, CONTEXT_SIZE_LIST,
                NUM_WORDS_BEFORE_SUBJECT, NUM_WORDS_AFTER_VERB, ACTIVATIONS,
                PRINT_PLOTTING_INFO)

            if MODIFICATION:
                plotting.plot_modified_data(correct_plotting_dict,
示例#9
0
         modified_correct_plotting_dict, modified_wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment(training_filepath_2,
                                                                                                    modified_testing_filepaths,
                                                                                                    MODIFIED_RESULTS_PATHS,
                                                                                                    None,
                                                                                                    MODE,
                                                                                                    CONTEXT_SIZE_LIST,
                                                                                                    NUM_WORDS_BEFORE_SUBJECT,
                                                                                                    NUM_WORDS_AFTER_VERB,
                                                                                                    ACTIVATIONS,
                                                                                                    PRINT_PLOTTING_INFO)
         
         
         if MODIFICATION:
             plotting.plot_modified_data(correct_plotting_dict, wrong_plotting_dict, modified_correct_plotting_dict, modified_wrong_plotting_dict, plot_savefile, MODE)
         else:
             plotting.plot_data(correct_plotting_dict, wrong_plotting_dict, plot_savefile, MODE)
         
     elif LABELS_TYPE == 'training_normal':
         correct_plotting_dict, wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment_timestep_training(training_filepath_2,
                                                                                                    testing_filepaths,
                                                                                                    RESULTS_PATHS,
                                                                                                    None,
                                                                                                    MODE,
                                                                                                    CONTEXT_SIZE_LIST,
                                                                                                    NUM_WORDS_BEFORE_SUBJECT,
                                                                                                    NUM_WORDS_AFTER_VERB,
                                                                                                    ACTIVATIONS,
                                                                                                    PRINT_PLOTTING_INFO)
 
         modified_correct_plotting_dict, modified_wrong_plotting_dict = diagnostic_experiment.run_diagnostic_experiment_timestep_training(training_filepath_2,
                                                                                                    modified_testing_filepaths,
示例#10
0
            ct0_samples = samples['ct0']
            ct1_samples = samples['ct1']
            ct2_samples = samples['ct2']
            ct3_samples = samples['ct3']
            ct4_samples = samples['ct4']
            ct5_samples = samples['ct5']
            v_samples = samples['voltage']

            # Save samples to disk
            with open('data/samples/last-debug.pkl', 'wb') as f:
                pickle.dump(samples, f)

            if not title:
                title = input("Enter the title for this chart: ")

            plot_data(samples, title)
            ip = get_ip()
            if ip:
                logger.info(
                    f"Chart created! Visit http://{ip}/{title}.html to view the chart. Or, simply visit http://{ip} to view all the charts created using 'debug' and/or 'phase' mode."
                )
            else:
                logger.info(
                    "Chart created! I could not determine the IP address of this machine. Visit your device's IP address in a webrowser to view the list of charts you've created using 'debug' and/or 'phase' mode."
                )

        if MODE.lower() == 'phase':
            # This mode is intended to be used for correcting the phase error in your CT sensors. Instead of reading the CT sensors, it will open the 'last-debug.pkl' file and read the contents, which
            # contain the samples from the last time the program was ran in "debug" mode. This is to save electricity so you don't need to keep your resistive load device running while you calibrate.
            # The function then continues to build 5 different variations of the raw AC voltage wave based on the ct#_phasecal variable.
            # Finally, a single chart is constructed that shows all of the raw CT data points, the "as measured" voltage wave, and the phase corrected voltage wave. The chart is written to an HTML file
示例#11
0
                    num_correct_clean_d2[s] = num_correct_clean_d2[s] + 1
                if class_adv_defended == yB:
                    num_correct_rob_a2d2[s] = num_correct_rob_a2d2[s] + 1

        num_imgs += 1
        print(num_imgs)

from plotting import plot_data

plot_data(sigmas=sigmas,
          num_imgs=num_imgs,
          num_adv_success_a1d0=num_adv_success_a1d0,
          num_correct_clean_d0=num_correct_clean_d0,
          num_correct_rob_a1d0=num_correct_rob_a1d0,
          num_adv_success_a1d1=num_adv_success_a1d1,
          num_correct_clean_d1=num_correct_clean_d1,
          num_correct_rob_a1d1=num_correct_rob_a1d1,
          num_adv_success_a2d1=num_adv_success_a2d1,
          num_correct_rob_a2d1=num_correct_rob_a2d1,
          num_adv_success_a2d2=num_adv_success_a2d2,
          num_correct_clean_d2=num_correct_clean_d2,
          num_correct_rob_a2d2=num_correct_rob_a2d2)

#
# print(sigmas)
# print(num_correct_clean)
# print(num_correct_rob)
# print(num_correct_rob2)
# print(num_adv_success)
# print(num_better_adv_success)
import pdb
示例#12
0
文件: test.py 项目: hgrov52/ML-Models
# Z = X**q
# Z = data.add_bias(Z)
# W,stats = pocket_perceptron.perceptron(Z,y)
# print(W)
# plotting.plot_mesh(W,X,q=q)
# plotting.plot_data(X,y=y,title=title)
# plotting.plot_transform(W,q=q)
# plotting.show()

# =======================================
# graphing testing
X,f = data.qth_order_data(lower=-8000,upper=10,q=5,n=100000)
z_ = data.zoom_limits(X)
print(z_)
X = data.increase_resolution(f,z_)
plotting.plot_data(X)
plotting.set_limits(z_)
plotting.show()

# ========================================
# linear fit testing
# X,f = data.qth_order_data(q=q,n=50,noise=noise)
# plotting.plot_data(X)
# # returns function

# p = linear_regression.linear_regression(X,q=q_fit)
# plotting.polynomial(p,X)
# print(p.error())

# z_ = data.zoom_limits(X)
# print(z_)
示例#13
0
文件: mps.py 项目: necoleman/pymps
def compute_square_eigenvalues(write_out=False):
	"""Compute Laplace Dirichlet eigenvalues of the square"""
	a = 2
	N = 25
	num_pts = 2*N
	k = np.arange(1,N).reshape(N-1,1)

	# expand about origin
	x1 = np.ones(num_pts)
	y1 = np.linspace(0,1,num_pts)
	x2 = np.linspace(0,1,num_pts)
	y2 = np.ones(num_pts)

	xint = np.random.random(2*num_pts)
	yint = np.random.random(2*num_pts)

	x = np.concatenate( (x1, x2, xint) )
	y = np.concatenate( (y1, y2, yint) )
	
	plt.scatter(x,y)
	plt.axes().set_aspect('equal')
	plt.show()
	
	r = np.sqrt(x**2 + y**2)
	t = np.arctan2(y,x)

	lams = np.arange(0.5, 100, 0.5)
	S = []
	Alist = []
	for lam in lams:
		A = evaluate_basis_func_bessel(a, lam, r, t, k)
		Alist.append(A)
		S.append(find_sing_val(A,4*N))#2*num_pts))

	if write_out:
		with open('shape.dat', 'w+') as f:
			for n in range(4*num_pts):
				f.write( str(r[n]*np.cos(t[n]))+' '+str(r[n]*np.sin(t[n])) + '\n')
	
		with open('out.dat', 'w+') as f:
			for n in range(len(lams)):
				f.write( str(lams[n]) + ' ' + str(S[n]) + '\n')

	mins = []
	for i in range(len(lams))[1:-1]:
		if S[i-1] > S[i] and S[i+1] > S[i]:
			mins.append(lams[i])

	print mins

	eigs = []
	for m in range(1,5):
		for n in range(1,5):
			eigs.append( np.pi**2*(m**2 + n**2) )
	print sorted(eigs)

	return lams, S, Alist

	if write_out:
		plot_data(lams, [S], ['minimum singular value'], 'minimum singular value',
	            'frequency', 'sing value', 'sing_vals.html')
示例#14
0
def fit_patches(data, dq, shifts, psf_model, parms, old_fit_parms=None):
    """
    Fit the patches and return model components and nll.
    """
    # initialize
    nll = np.zeros_like(data)
    masks = np.zeros_like(data, dtype=np.bool)
    if parms.background == None:            
        fit_parms = np.zeros(data.shape[0])
    elif parms.background == 'constant':
        fit_parms = np.zeros((data.shape[0], 2))
    else:
        assert 0, 'no linear bkgs, need to implement uncertainties'
        fit_parms = np.zeros((data.shape[0], 4))
    fit_vars = np.zeros_like(fit_parms)

    psfs = render_psfs(psf_model, shifts, parms.patch_shape, parms.psf_grid,
                       parms.k)

    for i in range(data.shape[0]):
        dlt_nll = np.inf
        if old_fit_parms == None:
            fp, fv, bkg, ind = fit_single_patch(data[i], psfs[i], dq[i], parms)
        else:
            bkg = make_background(data[i], old_fit_parms[i], parms.background)
            model = old_fit_parms[i][0] * psfs[i] + bkg
            nm = parms.floor + parms.gain * np.abs(model)
            fp, fv, bkg, ind = fit_single_patch(data[i], psfs[i], dq[i], parms,
                                                var=nm)

        model = fp[0] * psfs[i] + bkg
        nm = parms.floor + parms.gain * np.abs(model)
        cur_nll = np.sum(patch_nll(data[i][ind], model[ind], parms))
        cur_fp = fp
        cur_fv = fv
        while dlt_nll > parms.nll_tol:
            fp, fv, bkg, idx = fit_single_patch(data[i], psfs[i], dq[i], parms,
                                           var=nm)
            model = fp[0] * psfs[i] + bkg
            nm = parms.floor + parms.gain * np.abs(model)
            new_nll = np.sum(patch_nll(data[i][idx], model[idx], parms))
            dlt_nll = cur_nll - new_nll
            if dlt_nll > 0:
                ind = idx
                cur_fp = fp
                cur_fv = fv
                cur_nll = new_nll

        assert cur_nll < parms.max_nll
        nll[i, ind] = cur_nll
        masks[i] = ind
        fit_parms[i] = cur_fp
        fit_vars[i] = cur_fv

        if parms.plot_data:
            # get pre-clip nll
            if parms.clip_parms == None:
                parms.old_model = model
                parms.old_bkg = bkg
                flags = dq[i].reshape(parms.patch_shape)
                flags[flags > 1] = 1
                parms.flags = flags
            else:
                ind = parms.flags.ravel() != 1
            old_nll = np.zeros(data.shape[1])
            old_nll[ind] = patch_nll(data[i][ind], parms.old_model[ind], parms)
            
            # plot the data
            t = time.time()
            plot_data(i, data[i], model, bkg, nll[i], old_nll, parms)

    return fit_parms, fit_vars, nll, masks
示例#15
0
def compute_square_eigenvalues(write_out=False):
    """Compute Laplace Dirichlet eigenvalues of the square"""
    a = 2
    N = 25
    num_pts = 2 * N
    k = np.arange(1, N).reshape(N - 1, 1)

    # expand about origin
    x1 = np.ones(num_pts)
    y1 = np.linspace(0, 1, num_pts)
    x2 = np.linspace(0, 1, num_pts)
    y2 = np.ones(num_pts)

    xint = np.random.random(2 * num_pts)
    yint = np.random.random(2 * num_pts)

    x = np.concatenate((x1, x2, xint))
    y = np.concatenate((y1, y2, yint))

    plt.scatter(x, y)
    plt.axes().set_aspect('equal')
    plt.show()

    r = np.sqrt(x**2 + y**2)
    t = np.arctan2(y, x)

    lams = np.arange(0.5, 100, 0.5)
    S = []
    Alist = []
    for lam in lams:
        A = evaluate_basis_func_bessel(a, lam, r, t, k)
        Alist.append(A)
        S.append(find_sing_val(A, 4 * N))  #2*num_pts))

    if write_out:
        with open('shape.dat', 'w+') as f:
            for n in range(4 * num_pts):
                f.write(
                    str(r[n] * np.cos(t[n])) + ' ' + str(r[n] * np.sin(t[n])) +
                    '\n')

        with open('out.dat', 'w+') as f:
            for n in range(len(lams)):
                f.write(str(lams[n]) + ' ' + str(S[n]) + '\n')

    mins = []
    for i in range(len(lams))[1:-1]:
        if S[i - 1] > S[i] and S[i + 1] > S[i]:
            mins.append(lams[i])

    print mins

    eigs = []
    for m in range(1, 5):
        for n in range(1, 5):
            eigs.append(np.pi**2 * (m**2 + n**2))
    print sorted(eigs)

    return lams, S, Alist

    if write_out:
        plot_data(lams, [S], ['minimum singular value'],
                  'minimum singular value', 'frequency', 'sing value',
                  'sing_vals.html')
示例#16
0
	plt.plot(N,i_iterations,label='iterative')
	plt.xlabel("Number of data points")
	plt.ylabel("Number of iterations")
	plt.ylim(-.0001,min(np.max(v_iterations),np.max(i_iterations)))
	plt.legend()
	plt.title("Comparison of Vectorized versus Iterative number of iterations")
	plt.show()


if __name__ == '__main__':
	d=2
	n=100
	f = np.random.rand(d+1)
	f[1]*=-1

	#vector_vs_iterative(f,n,d)

	X,y = data.data(f,n,d,classify)

	W,stats = perceptron(X,y)
	print("W:",W)

	plotting.plot_mesh(W,X)
	plotting.plot_implicit(W)
	plotting.plot_data(X,y=y)
	a,b,c=W
	plt.title("Ordinary Perceptron\nSlope: {:.2f} | Intercept: {:.2f}".format(-a/b,-c/b))
	plt.show()


示例#17
0
def baseline(args):
    args = parse_arguments(args)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    file_helper = FileHelper()
    train_helper = TrainHelper(device)
    train_helper.seed_torch(seed=args.seed)

    model_name = train_helper.get_filename_from_baseline_params(args)
    run_folder = file_helper.get_run_folder(args.folder, model_name)

    metrics_helper = MetricsHelper(run_folder, args.seed)

    # get sender and receiver models and save them
    sender, receiver, diagnostic_receiver = get_sender_receiver(device, args)

    sender_file = file_helper.get_sender_path(run_folder)
    receiver_file = file_helper.get_receiver_path(run_folder)
    # torch.save(sender, sender_file)

    if receiver:
        torch.save(receiver, receiver_file)

    model = get_trainer(
        sender,
        device,
        args.dataset_type,
        receiver=receiver,
        diagnostic_receiver=diagnostic_receiver,
        vqvae=args.vqvae,
        rl=args.rl,
        entropy_coefficient=args.entropy_coefficient,
        myopic=args.myopic,
        myopic_coefficient=args.myopic_coefficient,
    )

    model_path = file_helper.create_unique_model_path(model_name)

    best_accuracy = -1.0
    epoch = 0
    iteration = 0

    if args.resume_training or args.test_mode:
        epoch, iteration, best_accuracy = load_model_state(model, model_path)
        print(
            f"Loaded model. Resuming from - epoch: {epoch} | iteration: {iteration} | best accuracy: {best_accuracy}"
        )

    if not os.path.exists(file_helper.model_checkpoint_path):
        print("No checkpoint exists. Saving model...\r")
        torch.save(model.visual_module, file_helper.model_checkpoint_path)
        print("No checkpoint exists. Saving model...Done")

    train_data, valid_data, test_data, valid_meta_data, _ = get_training_data(
        device=device,
        batch_size=args.batch_size,
        k=args.k,
        debugging=args.debugging,
        dataset_type=args.dataset_type,
    )

    train_meta_data, valid_meta_data, test_meta_data = get_meta_data()

    # dump arguments
    pickle.dump(args, open(f"{run_folder}/experiment_params.p", "wb"))

    pytorch_total_params = sum(p.numel() for p in model.parameters())

    if not args.disable_print:
        # Print info
        print("----------------------------------------")
        print(
            "Model name: {} \n|V|: {}\nL: {}".format(
                model_name, args.vocab_size, args.max_length
            )
        )
        print(sender)
        if receiver:
            print(receiver)

        if diagnostic_receiver:
            print(diagnostic_receiver)

        print("Total number of parameters: {}".format(pytorch_total_params))

    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)

    # Train
    current_patience = args.patience
    best_accuracy = -1.0
    converged = False

    start_time = time.time()

    if args.test_mode:
        test_loss_meter, test_acc_meter, _ = train_helper.evaluate(
            model, test_data, test_meta_data, device, args.rl
        )

        average_test_accuracy = test_acc_meter.avg
        average_test_loss = test_loss_meter.avg

        print(
            f"TEST results: loss: {average_test_loss} | accuracy: {average_test_accuracy}"
        )
        return

    iterations = []
    losses = []
    hinge_losses = []
    rl_losses = []
    entropies = []
    accuracies = []

    while iteration < args.iterations:
        for train_batch in train_data:
            print(f"{iteration}/{args.iterations}       \r", end="")

            ### !!! This is the complete training procedure. Rest is only logging!
            _, _ = train_helper.train_one_batch(
                model, train_batch, optimizer, train_meta_data, device
            )

            if iteration % args.log_interval == 0:

                if not args.rl:
                    valid_loss_meter, valid_acc_meter, _, = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl
                    )
                else:
                    valid_loss_meter, hinge_loss_meter, rl_loss_meter, entropy_meter, valid_acc_meter, _ = train_helper.evaluate(
                        model, valid_data, valid_meta_data, device, args.rl
                    )

                new_best = False

                average_valid_accuracy = valid_acc_meter.avg

                if (
                    average_valid_accuracy < best_accuracy
                ):  # No new best found. May lead to early stopping
                    current_patience -= 1

                    if current_patience <= 0:
                        print("Model has converged. Stopping training...")
                        converged = True
                        break
                else:  # new best found. Is saved.
                    new_best = True
                    best_accuracy = average_valid_accuracy
                    current_patience = args.patience
                    save_model_state(model, model_path, epoch, iteration, best_accuracy)

                # Skip for now  <--- What does this comment mean? printing is not disabled, so this will be shown, right?
                if not args.disable_print:

                    if not args.rl:
                        print(
                            "{}/{} Iterations: val loss: {}, val accuracy: {}".format(
                                iteration,
                                args.iterations,
                                valid_loss_meter.avg,
                                valid_acc_meter.avg,
                            )
                        )
                    else:
                        print(
                            "{}/{} Iterations: val loss: {}, val hinge loss: {}, val rl loss: {}, val entropy: {}, val accuracy: {}".format(
                                iteration,
                                args.iterations,
                                valid_loss_meter.avg,
                                hinge_loss_meter.avg,
                                rl_loss_meter.avg,
                                entropy_meter.avg,
                                valid_acc_meter.avg,
                            )
                        )

                iterations.append(iteration)
                losses.append(valid_loss_meter.avg)
                if args.rl:
                    hinge_losses.append(hinge_loss_meter.avg)
                    rl_losses.append(rl_loss_meter.avg)
                    entropies.append(entropy_meter.avg)
                accuracies.append(valid_acc_meter.avg)

            iteration += 1
            if iteration >= args.iterations:
                break

        epoch += 1

        if converged:
            break

    # prepare writing of data
    dir_path = os.path.dirname(os.path.realpath(__file__))
    dir_path = dir_path.replace("/baseline", "")
    timestamp = str(datetime.datetime.now())
    filename = "output_data/vqvae_{}_rl_{}_dc_{}_gs_{}_dln_{}_dld_{}_beta_{}_entropy_coefficient_{}_myopic_{}_mc_{}_seed_{}_{}.csv".format(
        args.vqvae,
        args.rl,
        args.discrete_communication,
        args.gumbel_softmax,
        args.discrete_latent_number,
        args.discrete_latent_dimension,
        args.beta,
        args.entropy_coefficient,
        args.myopic,
        args.myopic_coefficient,
        args.seed,
        timestamp,
    )
    full_filename = os.path.join(dir_path, filename)

    # write data
    d = [iterations, losses, hinge_losses, rl_losses, entropies, accuracies]
    export_data = zip_longest(*d, fillvalue="")
    with open(full_filename, "w", encoding="ISO-8859-1", newline="") as myfile:
        wr = csv.writer(myfile)
        wr.writerow(
            ("iteration", "loss", "hinge loss", "rl loss", "entropy", "accuracy")
        )
        wr.writerows(export_data)
    myfile.close()

    # plotting
    print(filename)
    plot_data(filename, args)

    return run_folder
示例#18
0
            ct2_samples = samples['ct2']
            ct3_samples = samples['ct3']
            ct4_samples = samples['ct4']
            ct5_samples = samples['ct5']
            v_samples = samples['voltage']

            # Save samples to disk
            with open('data/samples/last-debug.pkl', 'wb') as f:
                pickle.dump(samples, f)

            if not title:
                title = input("Enter the title for this chart: ")

            title = title.replace(" ", "_")
            logger.debug("Building plot.")
            plot_data(samples, title)
            ip = get_ip()
            if ip:
                logger.info(
                    f"Chart created! Visit http://{ip}/{title}.html to view the chart. Or, simply visit http://{ip} to view all the charts created using 'debug' and/or 'phase' mode."
                )
            else:
                logger.info(
                    "Chart created! I could not determine the IP address of this machine. Visit your device's IP address in a webrowser to view the list of charts you've created using 'debug' and/or 'phase' mode."
                )

        if MODE.lower() == 'phase':
            # This mode is intended to be used for correcting the phase error in your CT sensors. Please ensure that you have a purely resistive load running through your CT sensors - that means no electric fans and no digital circuitry!

            PF_ROUNDING_DIGITS = 3  # This variable controls how many decimal places the PF will be rounded
def calculate_metric_terms(cand=None,
                           cluster_function=None,
                           plot=False,
                           debug=False,
                           **kwargs):
    """
    Calculate metric values

    :param cand: Candidates from an observation with the four features, snr, and base label (RFI or FRB).
    :param cluster_function: Function to use for clustering
    :param plot: To plot the results
    :param debug: More logging for debugging
    :param kwargs: Arguments for the clustering function
    :return:
        Number of candidates
        FRB found: True if FRB candidates were recovered
        homogenity_frbs: homogenity of the FRBs
        completenes_frbs: completenes of the FRBs
        v_measure: V measure value
    """
    if isinstance(cand, dict):
        d = cand['cands']
        l = cand['labels']
        s = cand['snrs']
        data = d
    else:
        f = np.load(cand)
        if debug:
            print(cand)
        d = f['cands']
        l = f['labels']
        s = f['snrs']
        data = d

    assert cluster_function

    # cluster
    clusterer = cluster_function(**kwargs).fit(data)
    tl = l
    cl = clusterer.labels_

    # for each cluster containing an FRB candidate, calculate its homogenity
    # defined as number of FRB candidates in that cluster/total number of candidates in that cluster
    # this is to favor clusters which just have FRB and less RFI excluding unclustered candidates
    # total homogenity is the weighted mean of all homogenities
    # weighted by number of candidates in that cluster
    # excludes unclustered candidates
    cluster_labels_frb = np.array(list(set(cl[tl == 1])))
    nfrb_cands = (tl == 1).sum()

    nfrb_c = 0  # total number of frbs in frb clusters
    ntot_c = 0  # total number of candidates in frb clusters
    for cluster in cluster_labels_frb:
        if cluster == -1:
            continue
        indexes = np.where(cl == cluster)[0]
        groundtruth_labels = np.take(tl, indexes)
        ntot = len(groundtruth_labels)
        nfrb = (groundtruth_labels == 1).sum()
        ntot_c += ntot
        nfrb_c += nfrb
    if ntot_c == 0:
        assert len(cluster_labels_frb) == 1
        assert cluster_labels_frb[0] == -1
        homogenity_frbs = 0
    else:
        homogenity_frbs = nfrb_c / ntot_c

    # for each cluster containing an FRB candidate, calculate its completeness.
    # defined as number of frbs in that cluster/total number of FRBs
    # total completeness is the weighted mean of all completeness
    # this is to ensure minimum number of FRB clusters
    # weighted by number of candidates in that cluster
    # includes unclustered candidates

    c = 0  # completeness for FRB clusters
    ntot_c = 0
    for cluster in cluster_labels_frb:
        indexes = np.where(cl == cluster)[0]
        groundtruth_labels = np.take(tl, indexes)
        ntot = len(groundtruth_labels)
        nfrb = (groundtruth_labels == 1).sum()
        ntot_c += ntot
        c += nfrb * ntot

    completenes_frbs = c / (nfrb_cands * ntot_c)

    # v measure is the harmonic mean of homogenity and completeness to make
    # sure that both are high and weighted equally

    v_measure = 2 * homogenity_frbs * completenes_frbs / (completenes_frbs +
                                                          homogenity_frbs)

    if debug:
        print(f'Homogenity of FRBs is {homogenity_frbs}')
        print(f'Completeness of FRBs is {completenes_frbs}')
        print(f'Harmonic mean of these two is {v_measure}')

    if plot:
        plot_data(data, cl, s)

    true_labels = tl
    cluster_labels = cl

    # check if the FRB candidate is recovered or not (after taking the max snr element of each cluster)
    clusters = cluster_labels
    cl_rank = np.zeros(len(clusters), dtype=int)
    cl_count = np.zeros(len(clusters), dtype=int)

    for cluster in np.unique(clusters):
        clusterinds = np.where(clusters == cluster)[0]
        snrs = s[clusterinds]
        cl_rank[clusterinds] = np.argsort(np.argsort(snrs)[::-1]) + 1
        cl_count[clusterinds] = len(clusterinds)

    # rank one is the highest snr candidate of the cluster
    calcinds = np.unique(np.where(cl_rank == 1)[0])

    # i think if this contains 1 i.e FRB candidate, that means that FRB wasn't missed,
    # and some FRB plots will be generated
    # Take indexes of rank 1 candididates (from calcinds) in true_labels, and see if
    # label 1 is in there. If yes, then FRB candidate will pass through in one
    # or more clusters
    if (np.take(true_labels, calcinds) == 1).sum() > 0:
        frb_found = True
    else:
        frb_found = False

    return len(
        true_labels), frb_found, homogenity_frbs, completenes_frbs, v_measure