示例#1
0
def re_processing(url_load, url_re):
    # url_load gives the url for loading the optimal weight of NN.
    # url_re gives the url for saving the computed relative errors.
    global inputs, targets
    data = np.load(url_load)
    opt_weights = data['optweights'].item()
    x_scaler = data['x_scaler'].item()
    y_scaler = data['y_scaler'].item()

    RelativeError = []
    for datafile in range(8000):
        x_traj_test, y_traj_test = sample_multitraj(datafile, datafile+1)

        inputs = x_scaler.transform(x_traj_test)
        targets = y_scaler.transform(y_traj_test)
        outputs = nn_encode_foward_decode(opt_weights, inputs)

        outputs = y_scaler.inverse_transform(outputs)
        targets = y_scaler.inverse_transform(targets)

        re = np.mean([np.linalg.norm(targets[i] - outputs[i]) / np.linalg.norm(targets[i]) for i in range(len(targets))])

        print('sample {:d} relative training norm error {:+1.4e}'.format(datafile, re))
        RelativeError.append(re)
        np.savez(url_re, re = RelativeError, url_load = url_load)
示例#2
0
def sample_with_progress(repeats, n_samps, n_steps, epsilon, path=None):
    if path is not None:
        dirname = os.path.dirname(path)
        if not os.path.exists(dirname):
            os.makedirs(dirname)
    last = np.random.randn(D)
    timestamps = []
    all_samples = []
    nfevals = []
    for i in range(repeats):
        timestamps.append(time())
        samples = hmc(lnpdf,
                      x0=last,
                      n_samples=int(n_samps),
                      n_steps=n_steps,
                      epsilon=epsilon)
        last = samples[-1]
        nfevals.append(lnpdf.counter)
        all_samples.append(samples)
        if path is not None:
            np.savez(path + '_iter' + str(i),
                     samples=last,
                     timestamps=timestamps,
                     nfevals=nfevals)

    timestamps.append(time())
    if path is not None:
        np.savez(path,
                 samples=all_samples,
                 timestamps=timestamps,
                 nfevals=nfevals)
示例#3
0
    def save(self, desc=""):

        fn = self.default_file_name()
        if len(desc) != 0:
            fn += "_%s" % desc
        np.savez(fn, gen=self.model.ps)
        return fn
示例#4
0
 def generate_dataset(self, cachefile='data/decoy-mnist.npz'):
     if cachefile and os.path.exists(cachefile):
         cache = np.load(cachefile)
         data = tuple([cache[f] for f in sorted(cache.files)])
     else:
         data = self._generate_dataset(os.path.dirname(cachefile))
         if cachefile:
             np.savez(cachefile, *data)
     self.Xr, self.X, self.y, self.E, self.Xtr, self.Xt, self.yt, self.Et = data
     self.status.initialized = True
示例#5
0
def run():
    # train and save the neural network
    global inputs, targets, training_error

    training_error = []

    # max number of iterations in optimization
    num_iters = 100

    N = 100 # Number of uniformly sampled trajectories in training data set.

    # sample training data
    # x_traj, y_traj, index = randomsample(N)
    x_traj, y_traj, index = shufflesample(N*10, sampling_rate = 0.1)

    # normalize the training data
    x_scaler = MinMaxScaler((-1,1))
    x_scaler.fit(x_traj)
    y_scaler = MinMaxScaler((-1,1))
    y_scaler.fit(y_traj)

    x_traj_scale = x_scaler.transform(x_traj)
    y_traj_scale = y_scaler.transform(y_traj)

    inputs = x_traj_scale
    targets = y_traj_scale

    # Decide NN architecture
    D = x_traj.shape[1]
    G = 20

    init_weights = initialize_weights(G, D)

    print('----------  Optimizing KOOPMAN NEURAL NET for {} iterations ..... \n'.format(num_iters))
    # use adam to optimize
    opt_weights = adam(grad(objective), init_weights, step_size=0.01, num_iters = num_iters, callback=callback)

    # use sgd to optimize
    # opt_weights = sgd(grad(objective), init_weights, step_size=0.1, num_iters = num_iters, callback=callback)

    print('done')

    # save the optimal weights and related parameters
    np.savez('data/sample_1/optweights_tanh_minmax_random1000shuffle_G20_layer2_sgd_2.npz', optweights = opt_weights, x_scaler = x_scaler, y_scaler = y_scaler, index = index, training_error = training_error)

    # Pick a trajectory and check the prediction of the nn on this trajectory

    x_traj_test, y_traj_test = sample_multitraj(6350, 6351)
    inputs = x_scaler.transform(x_traj_test)
    targets = y_scaler.transform(y_traj_test)
    outputs = nn_encode_foward_decode(opt_weights, inputs)
    re = np.mean([np.linalg.norm(targets[i] - outputs[i]) / np.linalg.norm(targets[i]) for i in range(len(targets))])
    print('Relative training norm error {:+1.4e}'.format(re))

    figplot(outputs, url = None )
示例#6
0
 def save_model(self):
     """Saves a checkpoint of the current model."""
     np.savez(
         self.checkpoint_path,
         epoch=self.epoch,
         θ=self.θ,
         α=self.α,  # policy parameters
         η=self.η,
         Σ=self.Σ,  # dual parameters
         fourier_features=(self.fourier_freq,
                           self.fourier_offset))  # feature parameters
示例#7
0
def init_condition(url_init, num = 8000):
    # Extract and save the initial conditions from num trajecotries
    x = np.zeros((1,4))
    print('Initial conditions collection started.')
    for j in range(num):
        x_traj_test, y_traj_test = sample_multitraj(j, j+1)
        x0 = x_traj_test[0,:].reshape((1,4))
        x = np.concatenate((x, x0), axis=0)

    print('Initial conditions collection finished.')
    x = x[1:,:]
    np.savez(url_init, x0 = x)
示例#8
0
def get_eigval_dict(point_df, trajs, npz_path, network):
    if not os.path.exists(npz_path):
        eigval_dict = {}

        for ii, point_row in point_df.iterrows():
            traj_idx = int(point_row["traj_idx"])
            step_idx = int(point_row["step_idx"])
            theta = trajs[traj_idx]["theta"][step_idx]
            hessian = np.squeeze(network.hess(theta))
            eigvals = np.linalg.eigvalsh(hessian)
            eigval_dict[str(ii) + "_" + str(step_idx)] = eigvals

        np.savez(npz_path, **eigval_dict)
    else:
        eigval_dict = dict(np.load(npz_path))

    return eigval_dict
示例#9
0
def save_folded(file, folded_val, pattern, **argk):
    """
    Save a folded value to a file with its pattern.

    Flatten a folded value and save it with its pattern to a file using
    ``numpy.savez``.  Additional keyword arguments will also be saved to the
    file.

    Parameters
    ---------------
    file: String or file
        Follows the conventions of ``numpy.savez``.  Note that the ``npz``
        extension will be added if it is not present.
    folded_val:
        The folded value of a parameter.
    pattern:
        A ``paragami`` pattern for the folded value.
    """
    flat_val = pattern.flatten(folded_val, free=False)
    pattern_json = pattern.to_json()
    np.savez(file, flat_val=flat_val, pattern_json=pattern_json, **argk)
示例#10
0
    # result = minimize(loss, theta0,
    #               jac=grad(loss),
    #               method='L-BFGS-B',
    #               bounds=bounds,
    #               options={'disp': True,
    #                        'maxiter': 5})

    # theta1 = result.x
    # np.save(minimized_theta_fname, theta1)
    # print('theta after initial minimization', theta1)
    # print('gradient norm after initial minimization = {}'.format(np.linalg.norm(grad_log_prob(theta1))))

    # 5. Run MALA

    stepsize = 1e-8
    n_steps = 2000
    traj, log_probs, grads, acceptance_probabilities, stepsizes = MALA(
        theta0,
        log_prob,
        grad(log_prob),
        n_steps=n_steps,
        stepsize=stepsize,
        adapt_stepsize=True)

    np.savez(os.path.join(data_path,
                          'elemental_types_mala_freesolv_{}.npz'.format(name)),
             traj=traj,
             grads=grads,
             acceptance_probabilities=acceptance_probabilities,
             stepsizes=stepsizes)
示例#11
0
                if b.units[pID][u]['inCortex']:
                    ccfRegions.append(b.probeCCF[pID]['ISIRegion'])
                else:
                    ccfRegions.append(b.units[pID][u]['ccfRegion'])

                uid.append(pID + '_' + u)
            else:
                print('fit failed for ' + pID + ' ' + u)
#        except:
#            continue

saveDir = r"C:\Users\svc_ccg\Desktop\Data\analysis"
np.savez(os.path.join(saveDir, expName + '_glmfits.npz'),
         modelParams=modelParams,
         ccfRegions=ccfRegions,
         fittest=test_corr,
         uid=uid)
model.save(saveDir, expName + '_glm_model.pkl'
           )  #save the last model just to have the structure for recreating
elapsedTime = time.time() - startTime
print('\nelapsed time: ' + str(elapsedTime))

spikes = b.units['A']['292']['times']
binned_spikes = binVariable(spikes[spikes < b.lastBehaviorTime],
                            binwidth)[changeBins]
model = licking_model.Model(dt=binwidth, licks=binned_spikes)

for fname, ffilter in filterList:
    model.add_filter(fname, ffilter)
示例#12
0
y_L = y_end[np.where(u_sum<=-25)]
d_init = (np.mean([y[:5] for y in ys],axis=(0,1)) / bin_size).reshape((1,N))
C_init = np.hstack((np.mean(y_U,axis=(0,1))[:,None],np.mean(y_L,axis=(0,1))[:,None])) / bin_size - d_init.T
test_acc.emissions.ds[0] = d_init
test_acc.emissions.Cs[0] = C_init
init_params = copy.deepcopy(test_acc.params)
test_acc_vlem = copy.deepcopy(test_acc)

# fit model with particle em
logjoints, all_particles, all_weights = test_acc._fit_particle_em(ys, inputs=us,
											N_particles=10, num_iters=2)
final_params = test_acc.params

ssmdm_dir = os.path.expanduser('/tigress/dz5/ssmdm')
save_name = "dual_accumulator_particle_em.npz"
np.savez(os.path.join(ssmdm_dir, save_name), logjoints=logjoints,
		all_particles=all_particles, all_weights=all_weights)
        final_params=final_params, init_params=init_params)#,
	#q_params=q_params, all_ys=all_ys, all_xs=all_xs, all_us=all_us, all_zs=all_zs)

d = np.load("dual_accumulator_particle_em_100.npz", allow_pickle=True)
ys = d["ys"]
us = d["us"]
xs = d["xs"]
zs = d["zs"]
ys = [np.array(y) for y in ys]
us = [np.array(u) for u in us]
init_params = d["init_params"]
final_params = d["final_params"]
all_particles = d["all_particles"]
all_weights = d["all_weights"]
logjoints = d["logjoints"]
示例#13
0
    def run(self, theta, niter=10, tol=.0001, verbose=False, path=""):
        """ runs NPV for ... iterations 
            mimics npv_run.m from Sam Gershman's original matlab code

            USAGE: [F mu s2] = npv_run(nlogpdf,theta,[nIter])

            INPUTS:
             theta - [N x D+1] initial parameter settings, where
                        N is the number of components,
                        D is the number of latent variables in the model,
                      and the last column contains the log bandwidths (variances)
              nIter (optional) - maximum number of iterations (default: 10)
              tol (optional) - change in the evidence lower bound (ELBO) for
              convergence (default: 0.0001)

            OUTPUTS:
              F - [nIter x 1] approximate ELBO value at each iteration
              mu - [N x D] component means
              s2 - [N x 1] component bandwidths
        """
        N, Dpp = theta.shape
        D = Dpp - 1

        # set LBFGS optim arguments
        disp = 10 if verbose else None
        opts = {
            'disp': disp,
            'maxiter': 5000,
            'gtol': 1e-7,
            'ftol': 1e-7
        }  #, 'factr':1e2}
        elbo_vals = np.zeros(niter)

        timestamps = []
        timestamps.append(time())
        for ii in xrange(niter):
            elbo_vals[ii] = self.mc_elbo(theta)
            print "iteration %d (elbo = %2.4f)" % (ii, elbo_vals[ii])

            # first-order approximation (L1): optimize mu, one component at a time
            print " ... optimizing mus "
            for n in xrange(N):
                print " ... %d / %d " % (n, N)
                fun, gfun = self.make_elbo1_funs(theta, n)
                res = minimize(fun,
                               x0=theta[n, :D],
                               jac=gfun,
                               method='L-BFGS-B',
                               options=opts)
                theta[n, :D] = res.x

            #print theta[:,:D]
            #print " ... elbo: ", self.mc_elbo(theta)

            # second-order approximation (L2): optimize s2
            print " ... optimizing sigmas"
            mu = theta[:, :D]
            h = np.zeros(N)
            for n in xrange(N):
                # compute Hessian trace using finite differencing or autograd
                h[n] = np.sum(np.diag(hessian(self.lnpdf)(mu[n])))

            fun, gfun = self.make_elbo2_funs(theta, h)
            res = minimize(fun,
                           x0=theta[:, -1],
                           jac=gfun,
                           method='L-BFGS-B',
                           options=opts)
            theta = np.column_stack([mu, res.x])

            #  mmd_samples = mogsamples(2000, theta)
            if (ii % 5 == 0):
                timestamps.append(time())
                np.savez(path + '/iter' + str(ii) + "of" + str(niter) + ".npz",
                         timestamps=timestamps,
                         mu=mu,
                         sigma=np.exp(theta[:, -1]) + self.s2min,
                         n_feval=self.lnpdf.counter)

            # calculate the approximate ELBO (L2)
            #if (ii > 1) and (np.abs(elbo_vals[ii] - elbo_vals[ii-1] < tol))
            # TODO check for convergence
            #if (ii > 1) and (np.abs(F[ii]-F[ii-1]) < tol)
            #    break # end % check for convergence

        # unpack params and return
        mu = theta[:, :D]
        s2 = np.exp(theta[:, -1]) + self.s2min
        return mu, s2, elbo_vals, theta
for img in np.unique(image_id):
    eventsToInclude.append(
        (img, [flash_times[image_id == img], 8, 0.8, 0.1, -0.2]))

np.save(os.path.join(saveDir, expName + '_events.npy'), eventsToInclude)

###RUN for entire experiment###
spikeRateThresh = 0.5
unit_data = []
for pID in b.probes_to_analyze:
    for u in probeSync.getOrderedUnits(b.units[pID]):
        spikes = b.units[pID][u]['times']
        binned_spikes = binVariable(spikes[spikes < b.lastBehaviorTime],
                                    binwidth)[changeBins]
        if np.sum(spikes < b.lastBehaviorTime
                  ) > spikeRateThresh * b.lastBehaviorTime:
            uid = pID + '_' + u
            if b.units[pID][u]['inCortex']:
                ccfRegion = b.probeCCF[pID]['ISIRegion']
            else:
                ccfRegion = b.units[pID][u]['ccfRegion']

            unit_data.append((uid, ccfRegion, binned_spikes))

np.save(os.path.join(saveDir, expName + '_unitdata.npy'), unit_data)

np.savez(os.path.join(saveDir, expName + '_params.npz'),
         lastBehaviorTime=b.lastBehaviorTime,
         binwidth=binwidth,
         spikeRateThresh=spikeRateThresh)
                          'maxiter': 5
                      })

    theta1 = result.x
    np.save(minimized_theta_fname, theta1)
    print('theta after initial minimization', theta1)
    print('gradient norm after initial minimization = {}'.format(
        np.linalg.norm(grad_log_prob(theta1))))

    # 5. Run MALA

    stepsize = 1e-7
    collision_rate = 1e-1
    n_steps = 1000
    traj, log_probs = langevin(x0=theta1,
                               v0=np.random.randn(len(theta1)),
                               log_prob_fun=log_prob,
                               grad_log_prob_fun=grad_log_prob,
                               n_steps=n_steps,
                               stepsize=stepsize,
                               collision_rate=collision_rate)

    np.savez(os.path.join(
        data_path, 'hydrogen_or_not_langevin_freesolv_{}.npz'.format(name)),
             traj=traj,
             stepsize=stepsize,
             log_probs=log_probs,
             collision_rate=collision_rate,
             n_steps=n_steps,
             gaussian_ll=gaussian_ll)
                          'maxiter': 5
                      })

    theta1 = result.x
    np.save(minimized_theta_fname, theta1)
    print('theta after initial minimization', theta1)
    print('gradient norm after initial minimization = {}'.format(
        np.linalg.norm(grad_log_prob(theta1))))

    # 5. Run MALA

    #stepsize = 1e-3
    #collision_rate = np.inf
    n_steps = 10000

    traj, log_probs, grads, acceptance_probabilities, stepsizes = MALA(
        x0=theta1,
        stepsize=1e-4,
        log_prob_fun=log_prob,
        grad_log_prob_fun=grad_log_prob,
        n_steps=n_steps,
        adapt_stepsize=True)

    np.savez(os.path.join(
        data_path, 'all_types_independent_mala_freesolv_{}.npz'.format(name)),
             traj=traj,
             grads=grads,
             log_probs=log_probs,
             acceptance_probabilities=acceptance_probabilities,
             stepsizes=stepsizes)
示例#17
0
def run():
    global inputs, targets, hyper
    num_iters = 2500
    x, y, x_traj_val, y_traj_val, u_val, T_val, Ts_val = build_dataset(
        'cartpoledata/autosysdata_train2.h5', ver=1)

    # u = np.asmatrix(u).T
    # trajectory = np.concatenate((x,u), axis = 1)

    DoTraining = 1
    DoValidation = 1

    if DoTraining == 1:

        inputs = x
        targets = y

        Dx = x.shape[1]
        G = 20

        init_weights = initialize_weights(G, Dx)

        print(
            '----------  Optimizing KOOPMAN NEURAL NET for {} iterations ..... \n'
            .format(num_iters))
        opt_weights = adam(grad(objective),
                           init_weights,
                           step_size=0.01,
                           num_iters=num_iters,
                           callback=callback)

        print('done')

        np.savez('cartpoledata/train_results_data2_iter2500.npz',
                 optweights=opt_weights,
                 initweights=init_weights)

        decoded = nn_encode_decode(opt_weights, inputs)
        outputs = nn_encode_foward_decode(opt_weights, inputs)

        plt.figure()
        _ = plt.scatter(targets, outputs, marker='D', c='g', alpha=0.1)
        plt.xlabel('targets')
        plt.ylabel('outputs')
        plt.title('Dynamic Scatter')
        plt.grid()

        plt.figure()
        _ = plt.scatter(inputs, decoded, marker='D', c='b', alpha=0.1)
        plt.xlabel('inputs')
        plt.ylabel('decoded')
        plt.title('Encoding-decoding Scatter')
        plt.grid()

        plt.figure()
        _ = plt.plot(outputs[:, 2])
        _ = plt.plot(targets[:, 2])

        plt.show()

        re = np.mean([
            np.linalg.norm(targets[i] - outputs[i]) /
            np.linalg.norm(targets[i]) for i in range(len(targets))
        ])

        print('Relative norm error {:+1.4e}'.format(re))

    if DoValidation == 1:
        if DoTraining != 1:
            optdata = np.load('cartpoledata/train_results_data2.npz')
            opt_weights = optdata['optweights'].item()

        inputs = x_traj_val
        targets = y_traj_val

        decoded = nn_encode_decode(opt_weights, inputs)
        outputs = nn_encode_foward_decode(opt_weights, inputs)

        re_val = np.mean([
            np.linalg.norm(targets[i] - outputs[i]) /
            np.linalg.norm(targets[i]) for i in range(len(targets))
        ])
        print('Relative validation norm error {:+1.4e}'.format(re_val))

        plt.figure()
        _ = plt.scatter(targets, outputs, marker='D', c='g', alpha=0.1)
        plt.xlabel('targets')
        plt.ylabel('outputs')
        plt.title('Dynamic Scatter')
        plt.grid()

        plt.figure()
        _ = plt.scatter(inputs, decoded, marker='D', c='b', alpha=0.1)
        plt.xlabel('inputs')
        plt.ylabel('decoded')
        plt.title('Encoding-decoding Scatter')
        plt.grid()

        plt.figure()
        _ = plt.plot(outputs[:, 0])
        _ = plt.plot(targets[:, 0])
        plt.title('Trajectory prediction of x1')

        plt.figure()
        _ = plt.plot(outputs[:, 1])
        _ = plt.plot(targets[:, 1])
        plt.title('Trajectory prediction of x2')
        plt.figure()
        _ = plt.plot(outputs[:, 2])
        _ = plt.plot(targets[:, 2])
        plt.title('Trajectory prediction of x3')
        plt.figure()
        _ = plt.plot(outputs[:, 3])
        _ = plt.plot(targets[:, 3])
        plt.title('Trajectory prediction of x4')

        plt.show()

    print('--- Finish ---')
示例#18
0
 def save_results(self, filename):
     results_dict = self.runs[-1]
     np.savez(filename, **results_dict)
示例#19
0
        # optimize weights
        print("Initializing weights...")
        init_weights = adam(init_gradient,
                            pre_init_weights,
                            step_size=0.1,
                            num_iters=INIT_ITERS,
                            callback=init_callback)

        # pickle processed data in /cache (if doesn't already exist)
        if not os.path.exists('cache'):
            print('creating cache folder')
            os.makedirs('cache')
        if not os.path.isfile(picklefilename):
            print('saving pickled regression initalization data')
            np.savez(picklefilename, init_weights=init_weights)
    ###############################################################

    ###############################################################
    # OPTIMIZE NOISE-AWARE LIKELIHOOD #
    ###############################################################
    # Build noise-aware logistic regression objective.
    accuracies, objective, gradient, unpack_params = build_noise_objective(
        L2_VAR_2, NUM_TRAIN, train_images, train_labels, C, D, N, L)

    # Build callback for ADAM optimizer
    liklog = np.zeros(NUM_ITERATIONS)
    param_log = np.zeros((NUM_ITERATIONS, N + L))

    def callback(params, t, g):
        lik = -objective(params, t)
示例#20
0
    NODAE_train_mean = np.array(stats_NODAE['train_loss'])[:, 0]
    NODAE_train_std = np.array(stats_NODAE['train_loss'])[:, 1]
    hnn_test_mean = np.array(stats_hnn['test_loss'])[:, 0]
    hnn_test_std = np.array(stats_hnn['test_loss'])[:, 1]
    NODAE_test_mean = np.array(stats_NODAE['test_loss'])[:, 0]
    NODAE_test_std = np.array(stats_NODAE['test_loss'])[:, 1]
    stats_hnn = [hnn_train_mean, hnn_train_std, hnn_test_mean, hnn_test_std]
    stats_NODAE = [NODAE_train_mean, NODAE_train_std, NODAE_test_mean, NODAE_test_std]
    return stats_hnn, stats_NODAE


if __name__ == "__main__":
    args = get_args()
    plt.rcParams.update({'figure.autolayout': True})
    plt.rc('font', size=14)
    if args.retrain:
        stats_hnn, stats_NODAE = train(args)
        np.savez('results_' + str(args.hidden_dim) + '_' + str(args.total_steps) + '.npz',
                 stats_hnn=stats_hnn, stats_NODAE=stats_NODAE)
    else:
        try:
            results = np.load('results_' + str(args.hidden_dim) + '_' + str(args.total_steps) + '.npz',
                              allow_pickle=True)
            stats_hnn = results['stats_hnn']
            stats_NODAE = results['stats_NODAE']
        except:
            stats_hnn, stats_NODAE = train(args)
            np.savez('results_' + str(args.hidden_dim) + '_' + str(args.total_steps) + '.npz',
                     stats_hnn=stats_hnn, stats_NODAE=stats_NODAE)
    plot_results(args, stats_hnn, stats_NODAE)
示例#21
0
plt.ylabel("inferred state switch time")
plt.legend()
plt.axis("equal")
plt.tight_layout()

# print(np.sum(np.array(true_z_dir) == np.array(z_dir)))
# print(np.sum(np.array(true_z_dir) == np.array(z_dir_bbvi)))

# save simulated experiment
# comp_2D_vlem_bbvi
params_vlem = test_acc.params
params_lds = test_acc_lds.params
q_params_vlem = q_lem.params
q_params_lds = q_lds.params
np.savez("simexp_comp_vlem_bbvi_2Dacc.npz",
         params_true=latent_acc.params,
         params_vlem=params_vlem,
         params_lds=params_lds,
         q_params_vlem=q_params_vlem,
         q_params_lds=q_params_lds,
         elbos_vlem=q_elbos_lem_total,
         elbos_lds=q_elbos_lds,
         ys=ys,
         xs=xs,
         us=us,
         zs=zs,
         bin_size=bin_size,
         transition_Rs=latent_acc.transitions.Rs,
         init_params=init_params,
         init_var=init_var)
示例#22
0
    def grad_loss(x):
        grad_f = grad(loss)(x)
        print('gradient evaluated at {}: {}'.format(x, np.linalg.norm(grad_f)))
        traj.append(x)
        grad_traj.append(grad_f)
        return grad_f

    result = minimize(
        loss,
        theta0,
        jac=grad_loss,
        method='L-BFGS-B',
        #method='Newton-CG',
        #hessp=hessian_vector_product(loss),
        #callback=callback,
        bounds=bounds,
        options={
            'disp': True,
            'maxiter': 100
        })

    theta1 = result.x
    np.save(minimized_theta_fname, theta1)
    traj_fname = os.path.join(
        data_path, 'elemental_types_l-bfgs_freesolv_{}_traj.npy'.format(name))
    np.savez(traj_fname, traj=np.array(traj), grad_traj=np.array(grad_traj))
    print('theta after initial minimization', theta1)
    print('gradient norm after minimization = {}'.format(
        np.linalg.norm(grad_log_prob(theta1))))
示例#23
0
def make_model(model_name):
    if model_name == "baseball":
        # baseball model and data
        lnpdf_named = baseball.lnpdf
        lnpdf_flat = baseball.lnpdf_flat
        lnpdft = lambda z, t: np.squeeze(lnpdf_flat(z, t))
        lnpdf = lambda z: np.squeeze(lnpdf_flat(z, 0))
        D = baseball.D
        return lnpdf, D, None
    elif model_name == "frisk":
        lnpdf_tmp, unpack, D, sdf, pnames = frisk.make_model_funs(
            precinct_type=1)

        def lnpdf(th):
            lnpdf.counter += len(np.atleast_2d(th))
            return lnpdf_tmp(th)

        lnpdf.counter = 0
        return lnpdf, D, pnames
    elif model_name == "planarRobot_2":
        from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_autograd
        lnpdf = build_target_likelihood_planar_autograd(2)[0]
        return lnpdf, 2, None
    elif model_name == "planarRobot_3":
        from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_autograd
        lnpdf = build_target_likelihood_planar_autograd(3)[0]
        return lnpdf, 3, None
    elif model_name == "planarRobot_10":
        from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_autograd
        lnpdf = build_target_likelihood_planar_autograd(10)[0]
        return lnpdf, 10, None
    elif model_name == "planarRobot4p_10":
        from experiments.lnpdfs.create_target_lnpfs import build_target_likelihood_planar_4p_autograd
        lnpdf = build_target_likelihood_planar_4p_autograd(10)[0]
        return lnpdf, 10, None
    elif model_name == "GMM_20":
        from experiments.lnpdfs.create_target_lnpfs import build_GMM_lnpdf_autograd
        [lnpdf, true_means, true_covs] = build_GMM_lnpdf_autograd(20, 10)
        np.savez(args.output + 'target_gmm.npz',
                 true_means=true_means,
                 true_covs=true_covs)
        return lnpdf, 20, None
    elif model_name == "GMM_2":
        from experiments.lnpdfs.create_target_lnpfs import build_GMM_lnpdf_autograd
        [lnpdf, true_means, true_covs] = build_GMM_lnpdf_autograd(2, 10)
        np.savez(args.output + 'target_gmm.npz',
                 true_means=true_means,
                 true_covs=true_covs)
        return lnpdf, 2, None
    elif model_name == "german_credit":
        from experiments.lnpdfs.create_target_lnpfs import build_german_credit_lnpdf
        lnpdf = build_german_credit_lnpdf(with_autograd=True)
        return lnpdf, 25, None
    elif model_name == "breast_cancer":
        from experiments.lnpdfs.create_target_lnpfs import build_breast_cancer_lnpdf
        lnpdf = build_breast_cancer_lnpdf(with_autograd=True)
        return lnpdf, 31, None
    elif model_name == "iono":
        from experiments.lnpdfs.create_target_lnpfs import build_GPR_iono_with_grad_lnpdf
        lnpdf_grad = build_GPR_iono_with_grad_lnpdf(remove_autograd=True)

        def lnpdf(theta):
            theta = np.atleast_2d(theta)
            lnpdf.counter += len(theta)
            output = []
            for t in theta:
                output.append(lnpdf_grad(t)[0])
            return np.array(output)

        lnpdf.counter = 0
        return lnpdf, 34, None
示例#24
0
            break_condition='percent')
        lnpdf.counter = true_counter + lnpdf.counter / 2  # we don't count the evaluations by the progress callback

        # after all components are added, tune the weights of each comp
        comp_list = mog_bbvi.fit_mixture_weights(vbobj.comp_list,
                                                 vbobj.lnpdf,
                                                 num_iters=1000,
                                                 step_size=.25,
                                                 num_samps_per_component=10 *
                                                 D,
                                                 ax=None)
        vbobj.comp_list = comp_list

        timestamps.append(time())
        samples = vbobj.sample(2000)
        np.savez(os.path.join(args.output, "vboost_comp_%d.npz" % k), samples,
                 timestamps, lnpdf.counter)
        # save output here
        vb_outfile = os.path.join(args.output, "vboost_comp_%d.pkl" % k)
        lam_list = [(p, c.lam) for p, c in vbobj.comp_list]
        with open(vb_outfile, 'wb') as f:
            pickle.dump(lam_list, f)

    ## save output here
    #vb_outfile = os.path.join(args.output, "vboost.pkl")
    #lam_list = [(p, c.lam) for p, c in vbobj.comp_list]
    #with open(vb_outfile, 'wb') as f:
    #    pickle.dump(lam_list, f)

#############################################
# Nonparametric variational inference code  #
#  --- save posterior parameters            #
示例#25
0
    # OptRes = minimize(fun=objective, x0=controls_init, args=(args,), method="trust-constr", jac=objective_grad,
    #                   hessp=BFGS(), constraints=nonlinear_constraint, bounds=bounds,
    #                   options={'maxiter':100, 'gtol':1e-2, 'xtol':1e-2, 'initial_barrier_parameter':1e-1,
    #                            'disp': True, 'verbose':3})

    # OptRes = minimize(fun=objective, x0=controls_init, method="trust-constr",
    #                   jac=objective_grad, hessp=objective_hess, bounds=bounds,
    #                   options={'maxiter':20, 'gtol':1e-2, 'disp':True, 'verbose':3})

    print("optimization result = ", OptRes)

    print("state name = ", state_name)

    # compute the optimal solution
    controls_opt = unflatten(OptRes["x"])
    solution_opt = RK2(model.seir, y0, t_total, parameters, controls_opt)

    # compute the effective reproduction number
    Rt_opt = model.reproduction(t_total, parameters, controls_opt, solution_opt)

    # save data
    today = simulation_first_confirmed + len(data_confirmed)
    np.savez(savefilename, configurations=configurations, parameters=parameters,
             simulation_first_confirmed=simulation_first_confirmed, today=today,
             solution=solution, controls=controls, Rt=Rt,
             solution_opt=solution_opt, controls_opt=controls_opt, Rt_opt=Rt_opt)

    # print("# total infected = ", N_total - np.sum(solution_opt[ten_death_day+len(deaths), :number_group]),
    #       "# total death = ", np.sum(solution_opt[ten_death_day+len(deaths), 7 * number_group:8 * number_group]),
    #       "# hospitalized = ", np.sum(solution_opt[ten_death_day+len(deaths), 5 * number_group:6 * number_group]))
示例#26
0
                      })

    print("OptRes = ", OptRes)

    # compute the optimal solution
    controls_opt = misfit.unflatten(OptRes["x"])
    solution_opt = RK2(misfit.seir, misfit.y0, misfit.t_total,
                       misfit.parameters, controls_opt)
    parameters_opt = misfit.parameters
    # save data
    today = misfit.simulation_first_confirmed + len(misfit.data_confirmed)
    np.savez(savefilename,
             configurations=configurations,
             parameters=misfit.parameters,
             simulation_first_confirmed=misfit.simulation_first_confirmed,
             today=today,
             solution=solution,
             controls=misfit.controls,
             solution_opt=solution_opt,
             controls_opt=controls_opt,
             parameters_opt=parameters_opt)

    # import matplotlib.pyplot as plt
    # plt.figure()
    # alpha = OptRes["x"]
    # # alpha = (np.tanh(alpha)+1)/2
    # plt.plot(alpha, '.-')
    # plt.show()

    # dimension = 101
    # prior = BrownianMotion(dimension)
    # noise = np.random.normal(0, 1, dimension)