Esempio n. 1
0
def main():
    if param['disable_calibration'] == 0:
        print('\nCCD CALIBRATION:\n')
        calibration.calibrate_data()
        print('\nCalibration successfully completed.')

    if param['disable_analysis'] == 0:
        print('\n----------------------------------------')
        print('\nDATA ANALYSIS:\n')
        analysis.analyze_data()
        print('Data analysis successfully completed.')

    if param['disable_plots'] == 0:
        print('\n----------------------------------------')
        print('\nPLOTTING:\n')
        plots.make_plots()
        print('Plotting successfully completed.')
Esempio n. 2
0
def run_one(parameters_with_uncertainties, household_well_as, group_name, data,
            numbins):
    """Run the two mass balance models for one set of input parameters. Outputs the data with predicted
    values appended, the results of the two regressions, and the parameters for the two models.
    """
    # get the correct subset of the data
    data_subset = make_subset(data, group_name)
    # run regressions
    distributed_results, household_results, data_subset = run_regressions(
        data_subset, group_name, household_well_as)
    # calculate parameter values
    distributed_params, household_params = calculate_parameters(
        distributed_results, household_results, parameters_with_uncertainties,
        group_name, household_well_as)
    # plot results
    make_plots(distributed_results, distributed_params, household_results,
               household_params, data_subset, group_name, numbins,
               household_well_as)
    # compare subsets
    compare_subsets(distributed_results, data)
    return data_subset, distributed_results, distributed_params, household_results, household_params
Esempio n. 3
0
        whitening_factor=np.sqrt(float(ndata))  # whitening scale factor
    )
    return params


params = get_params()

# load in y normscale
hf = h5py.File('plotting_data_%s/y_normscale_value.h5' % params['run_label'],
               'r')
y_normscale = np.array(hf['y_normscale'])
hf.close()

# Make directory for plots
#plots.make_dirs(params,params['plot_dir'][0])

# Declare plot class variables
plotter = plots.make_plots(params, None, None, None)

# plot losses
#plotter.make_loss_plot(None,None,params['report_interval'],fwd=False)

# Make KL plot
#plotter.gen_kl_plots(VICI_inverse_model,None,None,None)

# Make pp plot
plotter.plot_pp(VICI_inverse_model, None, None, 0, None, None, None)

# Make corner plots
#plotter.make_corner_plot(None,None,sampler='dynesty1')
Esempio n. 4
0
                                      params['sigma'], params['usepars'],
                                      params['n_burnin'])

    # save samples for latter
    f = h5py.File("data/generated_samples_%s" % run_label, "w")
    f.create_dataset("x_data_train_h", data=x_data_train_h)
    f.create_dataset("y_data_train_lh", data=y_data_train_lh)
    f.create_dataset("y_data_test_h", data=y_data_test_h)
    f.create_dataset("pos_test", data=pos_test)
    f.create_dataset("samples", data=samples)
    f.close()

# Make directory for plots
#plots.make_dirs(params['plot_dir'][0])
# Declare plot class variables
plotter = plots.make_plots(params, samples, None, pos_test)

# First, we learn a multi-fidelity model that lerns to infer high-fidelity (accurate) observations from trget images/objects and low fidelity simulated observations. for this we use the portion of the training set for which we do have real/high fidelity observations.
#_, _ = VICI_forward_model.train(params, x_data_train_h, y_data_train_h, y_data_train_lh, "forward_model_dir/forward_model.ckpt", plotter) # This trains the forward model and saves the weights in forward_model_dir/forward_model.ckpt

# We then train the inference model using all training images and associated low-fidelity (inaccurate) observations. Using the previously trained forward model to draw from the observation likelihood.
#_, _ = VICI_inverse_model.train(params, x_data_train, y_data_train_l, np.shape(y_data_train_h)[1], "forward_model_dir/forward_model.ckpt", "inverse_model_dir/inverse_model.ckpt", plotter, y_data_test_h) # This trains the inverse model to recover posteriors using the forward model weights stored in forward_model_dir/forward_model.ckpt and saves the inverse model weights in inverse_model_dir/inverse_model.ckpt
_, _ = VICI_inverse_model.resume_training(
    params, x_data_train, y_data_train_l,
    np.shape(y_data_train_h)[1], "forward_model_dir/forward_model.ckpt",
    "inverse_model_dir/inverse_model.ckpt")

# The trained inverse model weights can then be used to infer a probability density of solutions given new measurements
xm, xsx, XS, pmax = VICI_inverse_model.run(
    params, y_data_test_h,
    np.shape(x_data_train)[1], "inverse_model_dir/inverse_model.ckpt"
Esempio n. 5
0
File: main.py Progetto: jsbaan/NLP2
    print('Training IBM 1')
    trained_lexicon = IBM1_EM(e, f, lexicon, nr_it=nr_it)

    print('calculating final scores...')
    output_naacl(viterbi(e_test, f_test, trained_lexicon),
                 'AER/naacl_IBM1.txt')
    os.system('perl data/testing/eval/wa_check_align.pl AER/naacl_IBM1.txt')
    os.system(
        'perl data/testing/eval/wa_eval_align.pl data/testing/answers/test.wa.nonullalign AER/naacl_IBM1.txt'
    )

    if savemodel:
        dill.dump(trained_lexicon,
                  open("trained_models/" + model + ".dill", "wb"))

    if plots: make_plots('perplexity_IBM1.p', 'AER_IBM1.p')

elif model == 'IBM1_VB':
    print('Training Bayesian IBM 1')
    trained_lexicon, elbo_values = IBM1_VB(e, f, lexicon, nr_it=nr_it)
    pickle.dump(file=open('elbo_values.pkl', 'wb'), obj=elbo_values)
    print('finished training')

    # Save model
    if savemodel:
        dill.dump(trained_lexicon,
                  open("trained_models/IBM1VB_theta.dill", "wb"))

    print('calculating final scores...')
    output_naacl(viterbi(e_test, f_test, trained_lexicon),
                 'AER/naacl_IBM1VB.txt')
Esempio n. 6
0
def run(prefix, regions, taxa, iters, procs, program_path, profile,
        threading_configurations, flamegraph_cmd):
    os.makedirs(prefix, exist_ok=True)

    exp_program = [
        program.lagrange(binary_path=os.path.abspath(program_path),
                         profile=profile)
    ]

    exp = []

    exp_name_format = "{taxa}taxa_{regions}regions_{workers}workers_{tpw}tpw"

    with rich.progress.Progress() as progress_bar:

        total_datasets = len(regions) * len(taxa) *\
                len(threading_configurations)
        total_work = total_datasets * iters
        extra_work = 0
        if profile:
            extra_work += 1
        make_task = progress_bar.add_task("Making datasets...",
                                          total=total_datasets)

        with open(os.path.join(prefix, 'parameters.yaml'), 'w') as yamlfile:
            yamlfile.write(
                yaml.dump(
                    {
                        'prefix': prefix,
                        'regions': regions,
                        'taxa': taxa,
                        'iters': iters,
                        'procs': procs,
                        'program_path': program_path,
                        'program_sha256': compute_hash_with_path(program_path),
                        'profile': profile,
                        'threading_configurations': threading_configurations,
                    },
                    explicit_start=True,
                    explicit_end=True))

        with open(os.path.join(prefix, 'notes.md'), 'a') as notesfile:
            notesfile.write("- Started on: {}\n".format(
                datetime.datetime.now().isoformat()))

        for r, t, tc in itertools.product(regions, taxa,
                                          threading_configurations):
            exp_path = os.path.join(
                prefix,
                exp_name_format.format(regions=r,
                                       taxa=t,
                                       workers=tc[0],
                                       tpw=tc[1]))
            exp.append(
                experiment.experiment(exp_path,
                                      make_datasets(t, r, iters, tc[0], tc[1]),
                                      exp_program))
            progress_bar.update(make_task, advance=1.0)

        rich.print("Running {} experiments".format(len(exp)))

        overall_task = progress_bar.add_task("Running...",
                                             total=total_datasets + extra_work)

        for e in exp:
            e.run(procs)
            progress_bar.update(overall_task, advance=1.0)

        if not profile:
            results = []
            for e in exp:
                results.extend(e.collect_results())

            with open(os.path.join(prefix, 'results.csv'), 'w') as csv_file:
                writer = csv.DictWriter(csv_file,
                                        fieldnames=results[0].header())
                writer.writeheader()
                for result in results:
                    writer.writerow(result.write_row())

            dataframe = pandas.read_csv(os.path.join(prefix, 'results.csv'))
            plots.make_plots(dataframe, prefix)

        else:
            fg_work = len(exp) * len(exp[0].datasets)
            fg_task = progress_bar.add_task("Making Flamegraphs...",
                                            total=fg_work)

            for e in exp:
                for d in e.datasets:
                    flamegraph.build(d)
                    progress_bar.update(fg_task, advance=1.0)

            progress_bar.update(overall_task, advance=1.0)

        with open(os.path.join(prefix, 'notes.md'), 'a') as notesfile:
            notesfile.write("- Finshed on: {}\n".format(
                datetime.datetime.now().isoformat()))
Esempio n. 7
0
                      loc=(0.86, 0.22),
                      fontsize=20)
        plt.savefig(
            '%s/latest_%s/corner_plot_%s_%d.png' %
            (params['plot_dir'], params['run_label'], params['run_label'], i))
        plt.close()
        del figure
        print('Made corner plot: %s' % str(i + 1))

        # Store ML predictions for later plotting use
        VI_pred_all.append(VI_pred)

    VI_pred_all = np.array(VI_pred_all)

    # Define pp and KL plotting class
    plotter = plots.make_plots(params, XS_all, VI_pred_all, x_data_test,
                               model_loc)

    if params['make_kl_plot'] == True:
        # Make KL plots
        plotter.gen_kl_plots(VICI_inverse_model, y_data_test, x_data_test,
                             y_normscale, bounds, snrs_test)

    if params['make_pp_plot'] == True:
        # Make pp plot
        plotter.plot_pp(VICI_inverse_model, y_data_test, x_data_test, 0,
                        y_normscale, x_data_test, bounds)

    if params['make_loss_plot'] == True:
        plotter.plot_loss()
Esempio n. 8
0
def train(params, x_data, y_data, siz_high_res, save_dir, plotter, y_data_test,train_files,normscales,y_data_train_noisefree,samples,pos_test,y_normscale):    

    x_data = x_data
    y_data_train_l = y_data

    # USEFUL SIZES
    xsh = np.shape(x_data)
    yshl1 = np.shape(y_data)[1]
    ysh1 = np.shape(y_data)[1]    
    print(xsh,yshl1,ysh1)
 
    #z_dimension_fm = params['z_dimensions_fw']
    #n_weights_fm = params['n_weights_fw']
    
    z_dimension = params['z_dimension']
    bs = params['batch_size']
    n_weights = params['n_weights']
    lam = 1
    
    graph = tf.Graph()
    session = tf.Session(graph=graph)
    with graph.as_default():
        tf.set_random_seed(np.random.randint(0,10))
        SMALL_CONSTANT = 1e-6
        
        # PLACE HOLDERS
        x_ph = tf.placeholder(dtype=tf.float32, shape=[None, xsh[1]], name="x_ph")
        bs_ph = tf.placeholder(dtype=tf.int64, name="bs_ph") # batch size placeholder
        yt_ph = tf.placeholder(dtype=tf.float32, shape=[None, ysh1], name="yt_ph")
        
        # LOAD FORWARD MODEL NEURAL NETWORKS
        #DEC_XYlZtoYh = OELBO_decoder_difference.VariationalAutoencoder("OELBO_decoder", ysh1, z_dimension_fm+yshl1+xsh[1], n_weights_fm) # p(Yh|X,Yl,Z)
        #ENC_XYltoZ = OELBO_encoder.VariationalAutoencoder("OELBO_encoder", yshl1+xsh[1], z_dimension_fm, n_weights_fm) # p(Z|X,Yl)
        #ENC_XYhYltoZ = VAE_encoder.VariationalAutoencoder("vae_encoder", xsh[1]+ysh1+yshl1, z_dimension_fm, n_weights_fm) # q(Z|X,Yl,Yh)
        
        # LOAD VICI NEURAL NETWORKS
        autoencoder = VICI_decoder.VariationalAutoencoder("VICI_decoder", xsh[1], z_dimension+ysh1, n_weights) # r(x|z,y)
        autoencoder_ENC = VICI_encoder.VariationalAutoencoder("VICI_encoder", ysh1, z_dimension, n_weights) # generates params for r(z|y)
        autoencoder_VAE = VICI_VAE_encoder.VariationalAutoencoder("VICI_VAE_encoder", xsh[1]+ysh1, z_dimension, n_weights) # used to sample from r(z|y)?
        
        # DEFINE MULTI-FIDELITY FORWARD MODEL
        #####################################################################################################################
        SMALL_CONSTANT = 1e-6
        
        # NORMALISE INPUTS
        yl_ph_n = tf_normalise_dataset(yt_ph) # placeholder for normalised low-res y data
        x_ph_n = tf_normalise_dataset(x_ph)   # placeholder for normalised x data
        #yl_ph_n = yt_ph
        #x_ph_n = x_ph
        #yl_ph_n = tf.Print(yl_ph_n, [yl_ph_n], first_n=1, summarize=10, message="Thss is yl_ph_n: ")
        #x_ph_n = tf.Print(x_ph_n, [x_ph_n], first_n=1, summarize=10, message="This is x_ph_n: ")

        # GET p(Z|X,Yl) - takes in x data and low res y data and returns mean and logvar of Gaussian z distribution
        #zxyl_mean,zxyl_log_sig_sq = ENC_XYltoZ._calc_z_mean_and_sigma(tf.concat([x_ph_n,yl_ph_n],1))
        #zxyl_mean = tf.Print(zxyl_mean, [zxyl_mean], first_n=1, summarize=10, message="Thss is zxyl_mean: ")
        #zxyl_log_sig_sq = tf.Print(zxyl_log_sig_sq, [zxyl_log_sig_sq], first_n=1, summarize=10, message="Thss is zxyl_log_sig_sq: ")
        # then samples z from that distribution
        #rxyl_samp = ENC_XYhYltoZ._sample_from_gaussian_dist(tf.shape(x_ph_n)[0], z_dimension_fm, zxyl_mean, tf.log(tf.exp(zxyl_log_sig_sq)+SMALL_CONSTANT))
        #rxyl_samp = tf.Print(rxyl_samp, [rxyl_samp], first_n=1, summarize=10, message="Thss is rxyl_samp: ")        

        # GET p(Yh|X,Yl,Z) FROM SAMPLES Z ~ p(Z|X,Yl)
        # then decodes back to high res y data 
        #reconstruction_yh = DEC_XYlZtoYh.calc_reconstruction(tf.concat([x_ph_n,yl_ph_n,rxyl_samp],1))
        # = tf.Print(, [], first_n=1, summarize=10, message="Thss is : ")
        #reconstruction_yh = tf.Print(reconstruction_yh, [reconstruction_yh], first_n=1, summarize=10, message="Thss is reconstruction_yh: ")
        #yh_diff = reconstruction_yh[0] # looks like the difference between the low res y data and the mean reconstructed high res y data
        #yh_diff = tf.Print(yh_diff, [yh_diff], first_n=1, summarize=10, message="Thss is yh_diff: ") 
        #yh_mean = yl_ph_n+yh_diff      # the mean reconstrcted high res data
        #yh_mean = tf.Print(yh_mean, [yh_mean], first_n=1, summarize=10, message="Thss is yh_mean: ")
        #yh_log_sig_sq = reconstruction_yh[1]  # the reconstructed high res y data logvar
        #yh_log_sig_sq = tf.Print(yh_log_sig_sq, [yh_log_sig_sq], first_n=1, summarize=10, message="Thss is yh_log_sig_sq: ")
        # then sample something? y doesn't seem to be used anywhere after this 
        # this ends up being the reconstruction of the exact y data corresponding to each x data input
        #y = ENC_XYhYltoZ._sample_from_gaussian_dist(tf.shape(yt_ph)[0], tf.shape(yt_ph)[1], yh_mean, yh_log_sig_sq)
        #y = tf.Print(y, [y], first_n=1, summarize=10, message="Thss is y: ")        

        # DRAW SYNTHETIC Ys TRAINING DATA
#        _, y = OM.forward_model(x_ph_amp,x_ph_ph)
        
        ##########################################################################################################################################
        
        # GET r(z|y)
        #y_ph = tf.placeholder(dtype=tf.float32, shape=[None, ysh1], name="y_ph")  # placeholder for y data
        #y_ph_n = tf_normalise_dataset(y_ph)                                       # placeholder for normalised y data
        #y_ph = tf.Print(y_ph, [y_ph], first_n=1, summarize=10, message="Thss is y_ph: ")
        #y_ph_n = y_ph
        # run inverse autoencoder to generate mean and logvar of z given y data - these are the parameters for r(z|y)
        #zy_mean,zy_log_sig_sq = autoencoder_ENC._calc_z_mean_and_sigma(y_ph_n) 
        zy_mean,zy_log_sig_sq = autoencoder_ENC._calc_z_mean_and_sigma(yl_ph_n)        

        # DRAW FROM r(z|y) - given the Gaussian parameters generate z samples
        rzy_samp = autoencoder_VAE._sample_from_gaussian_dist(bs_ph, z_dimension, zy_mean, zy_log_sig_sq)
        
        # GET r(x|z,y) from r(z|y) samples
        #rzy_samp_y = tf.concat([rzy_samp,y_ph_n],1)
        rzy_samp_y = tf.concat([rzy_samp,yl_ph_n],1)
        reconstruction_xzy = autoencoder.calc_reconstruction(rzy_samp_y)
        x_mean = reconstruction_xzy[0]
        x_log_sig_sq = reconstruction_xzy[1]
        
        # KL(r(z|y)||p(z))
        #latent_loss = -0.5 * tf.reduce_sum(1 + zy_log_sig_sq - tf.square(zy_mean) - tf.exp(zy_log_sig_sq), 1)
        #KL = tf.reduce_mean(latent_loss)
       
        # GET q(z|x,y)
        #xy_ph = tf.concat([x_ph_n,y_ph_n],1)
        xy_ph = tf.concat([x_ph_n,yl_ph_n],1)
        zx_mean,zx_log_sig_sq = autoencoder_VAE._calc_z_mean_and_sigma(xy_ph)
 
        # DRAW FROM q(z|x,y)
        qzx_samp = autoencoder_VAE._sample_from_gaussian_dist(bs_ph, z_dimension, zx_mean, zx_log_sig_sq)
        
        # GET r(x|z,y)
        #qzx_samp_y = tf.concat([qzx_samp,y_ph_n],1)
        qzx_samp_y = tf.concat([qzx_samp,yl_ph_n],1)
        reconstruction_xzx = autoencoder.calc_reconstruction(qzx_samp_y)
        x_mean_vae = reconstruction_xzx[0]
        x_log_sig_sq_vae = reconstruction_xzx[1]
        
        # COST FROM RECONSTRUCTION
        normalising_factor_x_vae = - 0.5 * tf.log(SMALL_CONSTANT+tf.exp(x_log_sig_sq_vae)) - 0.5 * np.log(2 * np.pi)
        square_diff_between_mu_and_x_vae = tf.square(x_mean_vae - x_ph_n)
        inside_exp_x_vae = -0.5 * tf.div(square_diff_between_mu_and_x_vae,SMALL_CONSTANT+tf.exp(x_log_sig_sq_vae))
        reconstr_loss_x_vae = -tf.reduce_sum(normalising_factor_x_vae + inside_exp_x_vae, 1)
        cost_R_vae = tf.reduce_mean(reconstr_loss_x_vae)
        
        # KL(q(z|x,y)||r(z|y))
        v_mean = zy_mean #2
        aux_mean = zx_mean #1
        v_log_sig_sq = tf.log(tf.exp(zy_log_sig_sq)+SMALL_CONSTANT) #2
        aux_log_sig_sq = tf.log(tf.exp(zx_log_sig_sq)+SMALL_CONSTANT) #1
        v_log_sig = tf.log(tf.sqrt(tf.exp(v_log_sig_sq))) #2
        aux_log_sig = tf.log(tf.sqrt(tf.exp(aux_log_sig_sq))) #1
        cost_VAE_a = v_log_sig-aux_log_sig+tf.divide(tf.exp(aux_log_sig_sq)+tf.square(aux_mean-v_mean),2*tf.exp(v_log_sig_sq))-0.5
        cost_VAE_b = tf.reduce_sum(cost_VAE_a,1)                           
        KL_vae = tf.reduce_mean(cost_VAE_b)                               # computes the mean over all tensor elements
        
        # THE VICI COST FUNCTION
        lam_ph = tf.placeholder(dtype=tf.float32, name="lam_ph")
        COST_VAE = KL_vae+cost_R_vae
        COST = COST_VAE
        
        # VARIABLES LISTS
        var_list_VICI = [var for var in tf.trainable_variables() if var.name.startswith("VICI")]
        var_list_ELBO = [var for var in tf.trainable_variables() if var.name.startswith("ELBO")]
        
        # DEFINE OPTIMISER (using ADAM here)
        optimizer = tf.train.AdamOptimizer(params['initial_training_rate']) 
        minimize = optimizer.minimize(COST,var_list = var_list_VICI)
        
        # DRAW FROM q(x|y)
        qx_samp = autoencoder_ENC._sample_from_gaussian_dist(bs_ph, xsh[1], x_mean, SMALL_CONSTANT + tf.log(tf.exp(x_log_sig_sq)))
        
        # INITIALISE AND RUN SESSION
#        init = tf.variables_initializer(var_list_VICI)
        #init = tf.initialize_all_variables()
        init = tf.global_variables_initializer()
        session.run(init)
        #saver_ELBO = tf.train.Saver(var_list_ELBO)
        #saver_ELBO.restore(session,load_dir)
        saver = tf.train.Saver()
    
    KL_PLOT = np.zeros(np.int(np.round(params['num_iterations']/params['report_interval'])+1)) # vector to store test OELBO values
    COST_PLOT = np.zeros(np.int(np.round(params['num_iterations']/params['report_interval'])+1)) # vector to store test VAE ELBO values

    print('Training Inference Model...')    
    # START OPTIMISATION OF OELBO
    indices_generator = batch_manager.SequentialIndexer(params['batch_size'], xsh[0])
    ni = -1
    test_n = 100
    olvec = []
    for i in range(params['num_iterations']):

        next_indices = indices_generator.next_indices()
        
        # run the session - input batchsize the x-data training batch and the y-data training batch 
        #yn = session.run(y, feed_dict={bs_ph:bs, x_ph:x_data[next_indices, :], yt_ph:y_data_train_l[next_indices, :]})
        #session.run(minimize, feed_dict={bs_ph:bs, x_ph:x_data[next_indices, :],  y_ph:yn, lam_ph:lam, yt_ph:y_data_train_l[next_indices, :]}) # minimising cost function

        # Make 25 noise realizations
        if params['do_extra_noise']:
            x_data_train_l = x_data[next_indices,:]
            y_data_train_l = y_data_train_noisefree[next_indices,:] + np.random.normal(0,1,size=(params['batch_size'],params['ndata']))
            if params['do_normscale']:
                y_data_train_l /= y_normscale[0]
            #print('generated {} elements of new training data noise'.format(params['batch_size']))

            session.run(minimize, feed_dict={bs_ph:bs, x_ph:x_data_train_l, lam_ph:lam, yt_ph:y_data_train_l}) # minimising cost function
        else:
            session.run(minimize, feed_dict={bs_ph:bs, x_ph:x_data[next_indices, :], lam_ph:lam, yt_ph:y_data_train_l[next_indices, :]}) # minimising cost function

        if i % params['report_interval'] == 0 and i > 0:
            ni = ni+1
                
            #ynt = session.run(y, feed_dict={bs_ph:test_n, x_ph:x_data[0:test_n,:], yt_ph:y_data_train_l[0:test_n,:]})
            #cost_value_vae, KL_VAE = session.run([COST_VAE, KL_vae], feed_dict={bs_ph:test_n, x_ph:x_data[0:test_n,:], y_ph:ynt, lam_ph:lam, yt_ph:y_data_train_l[0:test_n,:]})
            cost_value_vae, KL_VAE = session.run([cost_R_vae, KL_vae], feed_dict={bs_ph:test_n, x_ph:x_data_train_l[0:test_n,:], lam_ph:lam, yt_ph:y_data_train_l[0:test_n,:]})
            KL_PLOT[ni] = KL_VAE
            COST_PLOT[ni] = cost_value_vae

            # plot losses
            plotter.make_loss_plot(COST_PLOT[:ni+1],KL_PLOT[:ni+1],params['report_interval'],fwd=False)

            if params['print_values']==True:
                print('--------------------------------------------------------------')
                print('Iteration:',i)
                print('Training Set -ELBO:',cost_value_vae)
                print('KL Divergence:',KL_VAE)

        if i % params['plot_interval'] == 0 and i>0:
            # The trained inverse model weights can then be used to infer a probability density of solutions given new measurements
            _, _, XS, _, _  = VICI_inverse_model.run(params, y_data_test, np.shape(x_data)[1], "inverse_model_dir_%s/inverse_model.ckpt" % params['run_label'])

            # Convert XS back to unnormalized version
            if params['do_normscale']:
                for m in range(params['ndim_x']):
                    XS[:,m,:] = XS[:,m,:]*normscales[m]

            # Generate final results plots
            plotter = plots.make_plots(params,samples,XS,pos_test)

            # Make corner plots
            plotter.make_corner_plot(sampler='dynesty1')

            # Make KL plot
#            plotter.gen_kl_plots(VICI_inverse_model,y_data_test,x_data,normscales)

            # Make pp plot
#            plotter.plot_pp(VICI_inverse_model,y_data_train_l,x_data_train,0,normscales)
       
        if i % params['save_interval'] == 0 and i > 0:

            # Save model 
            save_path = saver.save(session,save_dir)
                
                
    return COST_PLOT, KL_PLOT, train_files 
Esempio n. 9
0
def main(N=30,
         Q=20,
         T=10,
         actions_per_timestep_per_node=15,
         interactions_per_timestep=90,
         num_generations=8,
         num_experiment_per_generation=3,
         fixed_sigma=0.1,
         num_epochs=2,
         alpha_clip_0=0.4,
         threshold=5E-4):
    for experiment_run_id in range(num_generations):
        for generative_id, generative_setting in tqdm(
                hyperparams_settings.items()):
            generative_seed = hash(f"{experiment_run_id}-{generative_id}") & (
                2**32 - 1)
            u_v_t_w, v_a_t_w, X_original, w_original = generate(
                N=N,
                Q=Q,
                T=T,
                actions_per_timestep_per_node=actions_per_timestep_per_node,
                fixed_sigma=fixed_sigma,
                interactions_per_timestep=interactions_per_timestep,
                seed=generative_seed,
                **generative_setting)

            for _ in range(num_experiment_per_generation):
                for estimation_id, estimation_setting in hyperparams_settings.items(
                ):
                    print(
                        '==========================================================================='
                    )
                    print('generative_setting:', generative_id,
                          '\testimation_setting:', estimation_id,
                          '\texperiment_run_id:', experiment_run_id)
                    print(
                        '==========================================================================='
                    )

                    mlflow.start_run()
                    mlflow.log_param("generative_seed", generative_seed)
                    mlflow.log_param("generative_setting", generative_id)
                    mlflow.log_param("estimation_setting", estimation_id)
                    mlflow.log_param("true_hyperparameters",
                                     generative_id == estimation_id)
                    mlflow.log_param("generation_id",
                                     f"{generative_id}_{experiment_run_id}")
                    mlflow.log_param("N", N)
                    mlflow.log_param("Q", Q)
                    mlflow.log_param("T", T)
                    mlflow.log_param("interactions_per_timestep",
                                     interactions_per_timestep)
                    mlflow.log_param("actions_per_timestep_per_node",
                                     actions_per_timestep_per_node)
                    mlflow.log_param("num_epochs", num_epochs)
                    mlflow.log_param("alpha_clip_0", alpha_clip_0)
                    for k, v in generative_setting.items():
                        mlflow.log_param('generation_' + k, v)
                    for k, v in estimation_setting.items():
                        mlflow.log_param('estimation_' + k, v)

                    try:

                        X_estimated, w_estimated, _sigma, t2signs_estimated, alphas, evals = model.learn_opinion_dynamics(
                            N=N,
                            Q=Q,
                            T=T,
                            verbose=False,
                            u_v_t_weights=u_v_t_w,
                            v_a_t_weights=v_a_t_w,
                            num_epochs=num_epochs,
                            threshold=threshold,
                            alpha_clip_0=alpha_clip_0,
                            **estimation_setting)

                        comparison, avg_diffs = compare(
                            X_original, X_estimated, w_original, w_estimated)
                        comparison.update(
                            compare_signs(u_v_t_w, X_original,
                                          t2signs_estimated,
                                          **generative_setting))

                        for key, val in list(evals.items()) + list(
                                comparison.items()):
                            mlflow.log_metric(key, val)
                        plots.make_plots(X_original, X_estimated, w_original,
                                         w_estimated)

                        np.savetxt('estimated_alphas.txt', alphas)
                        mlflow.log_artifact('estimated_alphas.txt')
                        np.savetxt('avg_diffs.txt', avg_diffs)
                        mlflow.log_artifact('avg_diffs.txt')

                    except Exception as e:  # pylint: disable=broad-except
                        mlflow.set_tag('crashed', True)
                        mlflow.set_tag('exception', e)
                        with open("exception.txt", 'w') as f:
                            traceback.print_exc(file=f)
                        mlflow.log_artifact("exception.txt")

                    mlflow.end_run()