Ejemplo n.º 1
0
 def prepare(self):
     dataset = data()
     self.graph = dataset.loadGraph(self.params['graph'],
                                    string.atoi(self.params['dim']))
     self.drawer = sampler(self.graph, self.Z)
     self.model = BPR_model(self.Z, self.drawer.user_set,
                            self.drawer.item_set)
Ejemplo n.º 2
0
 def prepare(self):
     dataset = data()
     self.userX, uf_num, self.uIDx = dataset.loadFeature(self.params['userX'])
     self.itemX, vf_num, self.vIDx = dataset.loadFeature(self.params['itemX'])
     self.graph = dataset.loadGraph(self.params['graph'], string.atoi(self.params['dim']))
     self.drawer = sampler(self.graph, self.vIDx, vf_num, self.uIDx, float(self.params['sigma']), self.userX, self.itemX)
     self.model = RankPairFM_Model(string.atoi(self.params['k']), uf_num, vf_num, self.drawer.user_set, self.drawer.item_set)
     self.len_ux = len(self.userX)
     self.len_vx = len(self.itemX)
Ejemplo n.º 3
0
def run():
    df_pre = prep()
    df_sampled = sampler(df_pre)
    # df_reduced_dims = reduction(df_sampled)
    # quick_grapher.graph(df_reduced_dims)

    # Model fitting with SVM
    #df_B = svc(df_sampled)
    df_C = KNN(df_sampled)
Ejemplo n.º 4
0
 def prepare(self):
     dataset = data()
     self.graph = dataset.loadGraph(self.params['graph'],
                                    string.atoi(self.params['dim']))
     self.drawer = sampler(self.graph, self.Z)
     itemX, vf_num, self.vIDx = dataset.loadFeature(self.params['itemX'])
     self.itemX = dataset.constructSparseMat(itemX, vf_num)
     self.model = MAP_BPR_Model(string.atoi(self.params['k']),
                                self.drawer.user_set, self.drawer.item_set,
                                vf_num)
Ejemplo n.º 5
0
def tttest(learner, args, train_envs, test_envs, log_dir):
    batch_sampler = sampler(args.batch_size)
    for i in range(args.num_updates):
        loss_outer = []
        rew_rem = []
        for j in range(1):
            s, a, r = batch_sampler.sample(train_envs[j], learner)
            inner_loss = learner.cal_loss(s, a, r)
            grads = torch.autograd.grad(inner_loss, learner.parameters())
            grads = parameters_to_vector(grads)
            old_params = parameters_to_vector(learner.parameters())
            vector_to_parameters(old_params - args.outer_lr * grads,
                                 learner.parameters())
            mean_rew = torch.mean(r).data.numpy()
            rew_rem.append(mean_rew)

        print(np.mean(rew_rem))
Ejemplo n.º 6
0
def get_true_sample(get_point,batch_size,d_input_dim = 2):
	return sampler(get_point,batch_size)
Ejemplo n.º 7
0
    pandas.DataFrame(groups).to_csv('groups_test.csv', header=False)
    #X = pandas.read_csv('/homedirec/user/X.csv', index_col=0)
    #y = pandas.read_csv('/homedirec/user/y.csv', index_col=0, header=None)
    #feat_names = list(X.columns)#if not list, can't pickle
    #groups = pandas.read_csv('/homedirec/user/groups.csv', index_col=0, header=None)
    #groups = range(len(y))
#    feat_names = []
    X = pandas.DataFrame(X)
    y = pandas.DataFrame(y)
    feat_names = list(X.columns)#if not list, can't pickle
    #X, y = datasets.make_classification(n_samples=100, n_features=10, weights=[0.93,0.07])
    fname = args.op_dir + '/data_rs_' + str(args.random_state)+'.pickle'    
samp = args.samp
if samp:
    print "Sampling--generating iid sample!"
    X, y, groups = sampler(X, y, groups, random_state=args.random_state)
    groups.columns = [1]
#    pdb.set_trace()


sample_weights = [1 for el in groups[1]]
weight = args.weight
if weight==1:
    print "weighting samples"
    nhpid = get_nhosp_per_id(groups)
    sample_weights = [1/float(nhpid[el]) for el in groups[1]]
    print "sample weights head", sample_weights[0:10]

sample_weights = np.array(sample_weights)

X, y, groups = np.array(X), np.ravel(np.array(y)), np.array(groups)
    ]
    print("{:.4f}".format(fillList[len(fillList) - 1]))

if varyL:
    print("\nA = {:4.2f}. Is that ok?".format(loc))
else:
    print("\nL = {:3d} and A = {:4.2f}. Is that ok?".format(L, loc))

print("Filename:", saveFilename)

variedL, L, loc = sampler.everythingOK(variedL, L, loc)

print("A = ", loc)
print("L = ", L)
time.sleep(1)
sampler = sampler(Q, loc)

print(variedL)

# Check if all files are present
for E, T, H, L, n, sizeInFilename, fillInFilename, fill in zip(
        Ex, temp, Hz, variedL, nList, Lname, fillName,
        fillList):  # Check if all files are present before starting analysis
    for i in range(n):
        LName = "_L_" + str(L) if sizeInFilename else ""
        FName = "_fill_%.4f" % (fill) if fillInFilename else ""
        filename = dataLocation + str(
            i
        ) + LName + FName + filestruct + "{0:.5f}_Ex_{1:.5f}_T_{2:.5f}_run_0.dat".format(
            H, E, T)
        if isfile(filename) != True:
    if not os.path.exists(RESULTS_FOLDER+"noise_%f/results_p%d" %(NOISE_VALUE,p)) :
        os.makedirs(RESULTS_FOLDER+"noise_%f/results_p%d" %(NOISE_VALUE,p))
    #os.chdir("./results_p%d" %p)
    save_to_dir = RESULTS_FOLDER+"noise_%f/results_p%d/" %(NOISE_VALUE,p)

    #summary_array = np.zeros((REPEATS_PER_PARAMETER_SET, 10))  
    
    ## do no use this because some repeats may be missing!
    #for r in range(REPEATS_PER_PARAMETER_SET):
    
    file_list = glob.glob(RESULTS_FOLDER+'noise_%f/param_%d/*.dynamics' %(NOISE_VALUE,p))
    
    results_array = np.zeros((len(file_list), 17 * NUMBER_OF_BINS))  
    
    f_id = 0
    for file in file_list:
        
        S = sampler(NUMBER_OF_SAMPLES, file)
        S.sample()

        calc = timme_calculator(S, NUMBER_OF_BINS)
        results_array[f_id,:] = calc.calculate()
        f_id += 1

    np.savetxt(save_to_dir + "param%d_sample%d_bin%d.timme_fit" %(p,NUMBER_OF_SAMPLES,NUMBER_OF_BINS), results_array, delimiter=',')
        
    #os.chdir("../")
    


Ejemplo n.º 10
0
def main():
    parser = argparse.ArgumentParser(
        description='Train Unsupervised Blending GAN')
    parser.add_argument('--nz',
                        type=int,
                        default=100,
                        help='Size of the latent z vector')
    parser.add_argument('--ngf',
                        type=int,
                        default=64,
                        help='# of base filters in G')
    parser.add_argument('--ndf',
                        type=int,
                        default=64,
                        help='# of base filters in D')
    parser.add_argument('--nc',
                        type=int,
                        default=3,
                        help='# of output channels in G')
    parser.add_argument('--load_size',
                        type=int,
                        default=64,
                        help='Scale image to load_size')
    parser.add_argument(
        '--image_size',
        type=int,
        default=64,
        help='The height / width of the input image to network')

    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--lr_d',
                        type=float,
                        default=0.00005,
                        help='Learning rate for Critic, default=0.00005')
    parser.add_argument('--lr_g',
                        type=float,
                        default=0.00005,
                        help='Learning rate for Generator, default=0.00005')
    parser.add_argument('--d_iters',
                        type=int,
                        default=5,
                        help='# of D iters per each G iter')
    parser.add_argument('--n_epoch',
                        type=int,
                        default=25,
                        help='# of epochs to train for')
    parser.add_argument('--clamp_lower',
                        type=float,
                        default=-0.01,
                        help='Lower bound for clipping')
    parser.add_argument('--clamp_upper',
                        type=float,
                        default=0.01,
                        help='Upper bound for clipping')

    parser.add_argument('--data_root', help='Path to dataset')
    parser.add_argument('--experiment',
                        default='Wasserstein_GAN_result',
                        help='Where to store samples and models')
    parser.add_argument('--workers',
                        type=int,
                        default=10,
                        help='# of data loading workers')
    parser.add_argument('--batch_size',
                        type=int,
                        default=128,
                        help='input batch size')
    parser.add_argument('--test_size',
                        type=int,
                        default=64,
                        help='Batch size for testing')

    parser.add_argument('--manual_seed',
                        type=int,
                        default=5,
                        help='Manul seed')

    parser.add_argument('--resume',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1,
                        help='Interval of snapshot (epoch)')
    parser.add_argument('--print_interval',
                        type=int,
                        default=1,
                        help='Interval of printing log to console (iteration)')
    parser.add_argument('--plot_interval',
                        type=int,
                        default=10,
                        help='Interval of plot (iteration)')
    args = parser.parse_args()

    random.seed(args.manual_seed)

    print('Input arguments:')
    for key, value in vars(args).items():
        print('\t{}: {}'.format(key, value))
    print('')

    # Set up G & D
    print('Create & Init models ...')
    print('\tInit G network ...')
    G = DCGAN_G(args.image_size, args.nc, args.ngf, init_conv, init_bn)
    print('\tInit D network ...')
    D = DCGAN_D(args.image_size, args.ndf, 1, init_conv, init_bn)
    if args.gpu >= 0:
        print('\tCopy models to gpu {} ...'.format(args.gpu))
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        G.to_gpu()  # Copy the model to the GPU
        D.to_gpu()
    print('Init models done ...\n')
    # Setup an optimizer
    optimizer_d = make_optimizer(D, args.lr_d)
    optimizer_g = make_optimizer(G, args.lr_g)

    ########################################################################################################################
    # Setup dataset & iterator
    print('Load images from {} ...'.format(args.data_root))
    trainset = H5pyDataset(args.data_root,
                           load_size=args.load_size,
                           crop_size=args.image_size)
    print('\tTrainset contains {} image files'.format(len(trainset)))
    print('')
    train_iter = chainer.iterators.MultiprocessIterator(
        trainset,
        args.batch_size,
        n_processes=args.workers,
        n_prefetch=args.workers)
    ########################################################################################################################

    # Set up a trainer
    updater = WassersteinUpdater(models=(G, D),
                                 args=args,
                                 iterator=train_iter,
                                 optimizer={
                                     'main': optimizer_g,
                                     'D': optimizer_d
                                 },
                                 device=args.gpu)
    trainer = training.Trainer(updater, (args.n_epoch, 'epoch'),
                               out=args.experiment)

    # Snapshot
    snapshot_interval = (args.snapshot_interval, 'epoch')
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(G,
                                              'g_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(D,
                                              'd_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)

    # Display
    print_interval = (args.print_interval, 'iteration')
    trainer.extend(extensions.LogReport(trigger=print_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'main/loss', 'D/loss', 'D/loss_real', 'D/loss_fake']),
                   trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=args.print_interval))

    trainer.extend(extensions.dump_graph('D/loss', out_name='TrainGraph.dot'))

    # Plot
    plot_interval = (args.plot_interval, 'iteration')

    trainer.extend(extensions.PlotReport(['main/loss'],
                                         'iteration',
                                         file_name='loss.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(['D/loss'],
                                         'iteration',
                                         file_name='d_loss.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(['D/loss_real'],
                                         'iteration',
                                         file_name='loss_real.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(['D/loss_fake'],
                                         'iteration',
                                         file_name='loss_fake.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)

    # Eval
    path = os.path.join(args.experiment, 'samples')
    if not os.path.isdir(path):
        os.makedirs(path)
    print('Saving samples to {} ...\n'.format(path))

    noisev = Variable(
        np.asarray(np.random.normal(size=(args.test_size, args.nz, 1, 1)),
                   dtype=np.float32))
    noisev.to_gpu(args.gpu)
    trainer.extend(sampler(G, path, noisev, 'fake_samples_{}.png'),
                   trigger=plot_interval)

    if args.resume:
        # Resume from a snapshot
        print('Resume from {} ... \n'.format(args.resume))
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    print('Training start ...\n')
    trainer.run()
Ejemplo n.º 11
0
 def prepare(self):
     self.dataset = data()
     self.test_set = self.dataset.loadGraph(self.params['test_set'], string.atoi(self.params['dim']), False)
     self.test_pairs = self.dataset.loadGraph(self.params['test_pairs'], string.atoi(self.params['dim']), False)
     self.drawer = sampler(self.test_pairs, string.atoi(self.params['k']))
Ejemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser(description='Train Blending GAN')
    parser.add_argument('--nef',
                        type=int,
                        default=64,
                        help='# of base filters in encoder')
    parser.add_argument('--ngf',
                        type=int,
                        default=64,
                        help='# of base filters in decoder')
    parser.add_argument('--nc',
                        type=int,
                        default=3,
                        help='# of output channels in decoder')
    parser.add_argument('--nBottleneck',
                        type=int,
                        default=4000,
                        help='# of output channels in encoder')
    parser.add_argument('--ndf',
                        type=int,
                        default=64,
                        help='# of base filters in D')

    parser.add_argument('--lr_d',
                        type=float,
                        default=0.0002,
                        help='Learning rate for Critic, default=0.0002')
    parser.add_argument('--lr_g',
                        type=float,
                        default=0.002,
                        help='Learning rate for Generator, default=0.002')
    parser.add_argument('--beta1',
                        type=float,
                        default=0.5,
                        help='Beta for Adam, default=0.5')
    parser.add_argument('--l2_weight',
                        type=float,
                        default=0.999,
                        help='Weight for l2 loss, default=0.999')

    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--n_epoch',
                        type=int,
                        default=25,
                        help='# of epochs to train for')

    parser.add_argument('--data_root', help='Path to dataset')
    parser.add_argument('--load_size',
                        type=int,
                        default=64,
                        help='Scale image to load_size')
    parser.add_argument(
        '--image_size',
        type=int,
        default=64,
        help='The height / width of the input image to network')
    parser.add_argument('--ratio',
                        type=float,
                        default=0.5,
                        help='Ratio for center square size v.s. image_size')
    parser.add_argument('--val_ratio',
                        type=float,
                        default=0.05,
                        help='Ratio for validation set v.s. data set')

    parser.add_argument('--d_iters',
                        type=int,
                        default=5,
                        help='# of D iters per each G iter')
    parser.add_argument('--clamp_lower',
                        type=float,
                        default=-0.01,
                        help='Lower bound for clipping')
    parser.add_argument('--clamp_upper',
                        type=float,
                        default=0.01,
                        help='Upper bound for clipping')

    parser.add_argument('--experiment',
                        default='encoder_decoder_blending_result',
                        help='Where to store samples and models')
    parser.add_argument('--test_folder',
                        default='samples',
                        help='Where to store test results')
    parser.add_argument('--workers',
                        type=int,
                        default=10,
                        help='# of data loading workers')
    parser.add_argument('--batch_size',
                        type=int,
                        default=64,
                        help='Input batch size')
    parser.add_argument('--test_size',
                        type=int,
                        default=64,
                        help='Batch size for testing')

    parser.add_argument('--train_samples',
                        type=int,
                        default=150000,
                        help='# of training examples')
    parser.add_argument('--test_samples',
                        type=int,
                        default=256,
                        help='# of testing examples')

    parser.add_argument('--manual_seed',
                        type=int,
                        default=5,
                        help='Manul seed')

    parser.add_argument('--resume',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--snapshot_interval',
                        type=int,
                        default=1,
                        help='Interval of snapshot (epochs)')
    parser.add_argument('--print_interval',
                        type=int,
                        default=1,
                        help='Interval of printing log to console (iteration)')
    parser.add_argument('--plot_interval',
                        type=int,
                        default=10,
                        help='Interval of plot (iteration)')
    args = parser.parse_args()

    random.seed(args.manual_seed)

    print('Input arguments:')
    for key, value in vars(args).items():
        print('\t{}: {}'.format(key, value))
    print('')

    # Set up G & D
    print('Create & Init models ...')
    print('\tInit G network ...')
    G = EncoderDecoder(args.nef,
                       args.ngf,
                       args.nc,
                       args.nBottleneck,
                       image_size=args.image_size,
                       conv_init=init_conv,
                       bn_init=init_bn)
    print('\tInit D network ...')
    D = DCGAN_D(args.image_size,
                args.ndf,
                conv_init=init_conv,
                bn_init=init_bn)
    if args.gpu >= 0:
        print('\tCopy models to gpu {} ...'.format(args.gpu))
        chainer.cuda.get_device(args.gpu).use()  # Make a specified GPU current
        G.to_gpu()  # Copy the model to the GPU
        D.to_gpu()
    print('Init models done ...\n')
    # Setup an optimizer
    optimizer_d = make_optimizer(D, args.lr_d, args.beta1)
    optimizer_g = make_optimizer(G, args.lr_g, args.beta1)

    ########################################################################################################################
    # Setup dataset & iterator
    print('Load images from {} ...'.format(args.data_root))
    folders = sorted([
        folder for folder in os.listdir(args.data_root)
        if os.path.isdir(os.path.join(args.data_root, folder))
    ])
    val_end = int(args.val_ratio * len(folders))
    print('\t{} folders in total, {} val folders ...'.format(
        len(folders), val_end))
    trainset = BlendingDataset(args.train_samples, folders[val_end:],
                               args.data_root, args.ratio, args.load_size,
                               args.image_size)
    valset = BlendingDataset(args.test_samples, folders[:val_end],
                             args.data_root, args.ratio, args.load_size,
                             args.image_size)
    print('\tTrainset contains {} image files'.format(len(trainset)))
    print('\tValset contains {} image files'.format(len(valset)))
    print('')
    train_iter = chainer.iterators.MultiprocessIterator(
        trainset,
        args.batch_size,
        n_processes=args.workers,
        n_prefetch=args.workers)
    ########################################################################################################################

    # Set up a trainer
    updater = EncoderDecoderBlendingUpdater(models=(G, D),
                                            args=args,
                                            iterator=train_iter,
                                            optimizer={
                                                'main': optimizer_g,
                                                'D': optimizer_d
                                            },
                                            device=args.gpu)
    trainer = training.Trainer(updater, (args.n_epoch, 'epoch'),
                               out=args.experiment)

    # Snapshot
    snapshot_interval = (args.snapshot_interval, 'epoch')
    trainer.extend(
        extensions.snapshot(filename='snapshot_epoch_{.updater.epoch}.npz'),
        trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(G,
                                              'g_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)
    trainer.extend(extensions.snapshot_object(D,
                                              'd_epoch_{.updater.epoch}.npz'),
                   trigger=snapshot_interval)

    # Display
    print_interval = (args.print_interval, 'iteration')
    trainer.extend(extensions.LogReport(trigger=print_interval))
    trainer.extend(extensions.PrintReport(
        ['iteration', 'main/loss', 'D/loss', 'main/l2_loss']),
                   trigger=print_interval)
    trainer.extend(extensions.ProgressBar(update_interval=args.print_interval))

    trainer.extend(extensions.dump_graph('D/loss', out_name='TrainGraph.dot'))

    # Plot
    plot_interval = (args.plot_interval, 'iteration')

    trainer.extend(extensions.PlotReport(['main/loss'],
                                         'iteration',
                                         file_name='loss.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(['D/loss'],
                                         'iteration',
                                         file_name='d_loss.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)
    trainer.extend(extensions.PlotReport(['main/l2_loss'],
                                         'iteration',
                                         file_name='l2_loss.png',
                                         trigger=plot_interval),
                   trigger=plot_interval)

    # Eval
    path = os.path.join(args.experiment, args.test_folder)
    if not os.path.isdir(path):
        os.makedirs(path)
    print('Saving samples to {} ...\n'.format(path))

    train_batch = [trainset[idx][0] for idx in range(args.test_size)]
    train_v = Variable(chainer.dataset.concat_examples(train_batch, args.gpu),
                       volatile='on')
    trainer.extend(sampler(G, path, train_v, 'fake_samples_train_{}.png'),
                   trigger=plot_interval)

    val_batch = [valset[idx][0] for idx in range(args.test_size)]
    val_v = Variable(chainer.dataset.concat_examples(val_batch, args.gpu),
                     volatile='on')
    trainer.extend(sampler(G, path, val_v, 'fake_samples_val_{}.png'),
                   trigger=plot_interval)

    if args.resume:
        # Resume from a snapshot
        print('Resume from {} ... \n'.format(args.resume))
        chainer.serializers.load_npz(args.resume, trainer)

    # Run the training
    print('Training start ...\n')
    trainer.run()
#samples = range(500,50500,500)
#samples = range(50,5050,50)
#samples = np.logspace(2,15,num=14,base=2)
#samples = samples.astype(int)
samples = range(3, 20)

a = parameters[p, 1]
b = parameters[p, 2]
c = parameters[p, 3]

x00 = parameters[p, 13]
x10 = parameters[p, 14]
T2P = parameters[p, 12]

print "b = ", -b

for si in samples:

    ls = linear_simulator(a, b, c, x00, x10, DT, T2P, noise, False)
    ls.run()
    D = ls.get_dynamics()

    S = sampler(si, D)
    S.sample()
    calc = timme_calculator(S, NUMBER_OF_BINS)
    results, err = calc.calculate()
    results = results[0:6]

    print(results[2])
#print(S.sampled_dynamics)
Ejemplo n.º 14
0
def test(learner, args, train_envs, test_envs, log_dir):
    learner_test = network(args.num_layers, args.num_hidden, args.num_bandits)
    batch_sampler = sampler(args.batch_size, args.num_bandits)
    max_kl = args.max_kl
    cg_iters = args.cg_iters
    cg_damping = args.cg_damping
    ls_max_steps = args.ls_max_steps
    ls_backtrack_ratio = args.ls_backtrack_ratio
    train_rew = []
    for i in range(args.num_updates):
        #print(i)
        adapt_params = []
        inner_losses = []
        adapt_episodes = []
        rew_rem = []
        for j in range(args.num_tasks_train):
            e = batch_sampler.sample(train_envs[j], learner)
            inner_loss = learner.cal_loss(e.s, e.a, e.r)
            params = learner.update_params(inner_loss, args.inner_lr,
                                           args.first_order)
            a_e = batch_sampler.sample(train_envs[j], learner, params)
            adapt_params.append(params)
            adapt_episodes.append(a_e)
            inner_losses.append(inner_loss)
            mean_rew = torch.mean(a_e.r).data.numpy()
            rew_rem.append(mean_rew)

        print(np.mean(rew_rem))
        train_rew.append(np.mean(rew_rem))
        old_loss, _, old_pis = learner.surrogate_loss(adapt_episodes,
                                                      inner_losses)
        grads = torch.autograd.grad(old_loss,
                                    learner.parameters(),
                                    retain_graph=True)
        grads = parameters_to_vector(grads)

        # Compute the step direction with Conjugate Gradient
        hessian_vector_product = learner.hessian_vector_product(
            adapt_episodes, inner_losses, damping=cg_damping)
        stepdir = conjugate_gradient(hessian_vector_product,
                                     grads,
                                     cg_iters=cg_iters)

        # Compute the Lagrange multiplier
        shs = 0.5 * torch.dot(stepdir, hessian_vector_product(stepdir))
        lagrange_multiplier = torch.sqrt(shs / max_kl)

        step = stepdir / lagrange_multiplier

        # Save the old parameters
        old_params = parameters_to_vector(learner.parameters())

        # Line search
        step_size = 1.0
        for _ in range(ls_max_steps):
            vector_to_parameters(old_params - step_size * step,
                                 learner.parameters())
            loss, kl, _ = learner.surrogate_loss(adapt_episodes,
                                                 inner_losses,
                                                 old_pis=old_pis)
            improve = loss - old_loss
            if (improve.item() < 0.0) and (kl.item() < max_kl):
                break
            step_size *= ls_backtrack_ratio
        else:
            vector_to_parameters(old_params, learner.parameters())

        if (i + 1) % 10 == 0:
            test_input = torch.FloatTensor([[1]])
            test_output = learner.forward(test_input).data.numpy()[0]
            plt.figure()
            plt.bar(np.arange(len(test_output)), test_output)
            plt.savefig(log_dir + 'figures/before%i.png' % i)
            plt.close()
            for j in range(args.num_tasks_train):
                test_output = learner.forward(test_input,
                                              adapt_params[j]).data.numpy()[0]
                plt.figure()
                plt.bar(np.arange(len(test_output)), test_output)
                plt.savefig(log_dir + 'figures/after%i_%i.png' % (j, i))
                plt.close()

    np.save(log_dir + 'train_rew' + str(args.inner_lr) + '.npy', train_rew)
    plt.figure()
    plt.plot(train_rew)
    plt.show()
    plt.figure()
    plt.plot(train_rew)
    plt.savefig(log_dir + 'train_rew.png')

    return
		c = parameters[pi,3]
		d = parameters[pi,4]
		
		x00 = parameters[pi,14]
		x10 = parameters[pi,15]
		T2P = parameters[pi,13]
	       
		ls = hollingII_simulator(a, b, c,d, x00, x10, DT, T2P, noise, plot_dynamics)
		ls.run()
		D = ls.get_dynamics()
		
		E_prey = np.asarray(ls.ext_prey)
		E_pred = np.asarray(ls.ext_pred)

		## now do inference:
		S = sampler(si, D)
		S.sample()

		calc = timme_calculator(S, NUMBER_OF_BINS)
		results, err = calc.calculate()
		results = results[0:6]
		results = np.append(results, err[0])
		results = np.append(results, err[1])

                x0 = D[1,:]  # prey time series
                x1 = D[2,:]  # prey time series
                L  = np.ones(len(x0))

                a00 = np.mean(-a*L + b*x1/((x0+d)**2))   ## equala to \alpha_{00}
                a01 = np.mean(-b/(x0+d) )                ## equala to \alpha_{00}
                a10 = np.mean(c*d/((x0+d)**2))           ## equala to \alpha_{00}
Ejemplo n.º 16
0
#def train(args):

sp = spm.SentencePieceProcessor()
sp.Load("m3k.model")
sp_vocab = {sp.IdToPiece(i): i for i in range(sp.GetPieceSize())}
inv_sp_vocab = {value: key for key, value in sp_vocab.items()}
vocab = collections.defaultdict(lambda: len(vocab))

batch_size = 10
sequence_len = 200  #250#1601
#n_docs = 1063180
input_len = 35

#train_vectorizer=infinite_vectorizer(sp_vocab, DATA_PATH, batch_size, sequence_len, sp_model=sp)
train_vectorizer = sampler(sp,
                           vocab,
                           input_len=input_len,
                           output_len=sequence_len)

examples = [next(train_vectorizer) for i in range(10)]
example_input = [
    np.concatenate([inp[0] for inp, outp in examples]),
    np.concatenate([inp[1] for inp, outp in examples])
]
example_output = np.concatenate([outp for inp, outp in examples])

generation_model = GenerationModel(len(sp_vocab), input_len, sequence_len,
                                   args)

#from keras.utils import multi_gpu_model
#para_model=multi_gpu_model(generation_model.model, gpus=4)
		b = parameters[pi,2]
		c = parameters[pi,3]
		
		x00 = parameters[pi,13]
		x10 = parameters[pi,14]
		T2P = parameters[pi,12]

		ls = linear_simulator(a, b, c, x00, x10, DT, T2P, ni, plot_dynamics)
		ls.run()
		D = ls.get_dynamics()
		
		E_prey = np.asarray(ls.ext_prey)
		E_pred = np.asarray(ls.ext_pred)

		## now do inference:
		S = sampler(NUMBER_OF_SAMPLES, D)
		S.sample()

		calc = timme_calculator(S, NUMBER_OF_BINS)
		results, err = calc.calculate()
		results = results[0:6]
		results = np.append(results, err[0])
		results = np.append(results, err[1])
		## calculate relative error:
		re = []
		re.append(np.abs((results[0]-a)/a))
		re.append(np.abs((results[1]+a)/a))
		re.append(np.abs((results[2]+b)/b))
		re.append(np.abs((results[3]+1)/1.0))
		re.append(np.abs((results[4]-c)/c))
		re.append(np.abs(results[5]))
        G0[1, :] = binned_data[1, :] * binned_data[1, :]
        G0[2, :] = binned_data[1, :] * binned_data[2, :]

        X1 = binned_data[4, :]
        G1 = np.zeros((3, M))
        G1[0, :] = binned_data[2, :]
        G1[1, :] = binned_data[2, :] * binned_data[1, :]
        G1[2, :] = binned_data[2, :] * binned_data[2, :]

        J0 = np.dot(np.dot(X0, np.transpose(G0)), np.linalg.inv(np.dot(G0, np.transpose(G0))))
        J1 = np.dot(np.dot(X1, np.transpose(G1)), np.linalg.inv(np.dot(G1, np.transpose(G1))))

        J = np.asarray([J0, J1])
        # print("shape of J = ")
        # print(np.shape(J))
        return J

        # print(J0)
        # print(J1)


if __name__ == "__main__":

    S = sampler(100, "example_run.dynamics", "example_prey.extinctions", "example_pred.extinctions")
    S.sample()

    calc = timme_calculator(S, 2)
    calc.calculate()
    # calc.binning(np.asarray([[1,2,3,4,5,6,7,8, 9],[1, 7, 5, 3, 4, 6, 2, 8, 9], [1, 7, 5, 3, 4, 6, 2, 8, 9]]))  # testing
    # calc.bin_stats()
        J0 = np.dot(np.dot(X0, np.transpose(G0)),
                    np.linalg.inv(np.dot(G0, np.transpose(G0))))
        J1 = np.dot(np.dot(X1, np.transpose(G1)),
                    np.linalg.inv(np.dot(G1, np.transpose(G1))))

        J = np.asarray([J0, J1])

        # evaluate error function:
        err = (np.sum(np.abs(X0 - np.dot(J0, G0))),
               np.sum(np.abs(X1 - np.dot(J1, G1))))

        #print("shape of J = ")
        #print(np.shape(J))
        return (J, err)

        #print(J0)
        #print(J1)


if __name__ == '__main__':

    S = sampler(100, 'example_run.dynamics', 'example_prey.extinctions',
                'example_pred.extinctions')
    S.sample()

    calc = timme_calculator(S, 1)
    result = calc.calculate()
    print(result)
    #calc.binning(np.asarray([[1,2,3,4,5,6,7,8, 9],[1, 7, 5, 3, 4, 6, 2, 8, 9], [1, 7, 5, 3, 4, 6, 2, 8, 9]]))  # testing
    #calc.bin_stats()
        J1 =np.dot( np.dot(X1, np.transpose(G1)), np.linalg.inv (np.dot(G1, np.transpose(G1))))
        
        J = np.asarray([J0,J1])
	
	# evaluate error function:
	err = (np.sum(np.abs(X0 - np.dot(J0,G0))), np.sum(np.abs(X1 - np.dot(J1,G1))))

        #print("shape of J = ")
        #print(np.shape(J))
        return (J, err)
    
        #print(J0)
        #print(J1)
        
        
        
        
        
        
        
if __name__=='__main__':
    
    S = sampler(100, 'example_run.dynamics', 'example_prey.extinctions', 'example_pred.extinctions')
    S.sample()

    calc = timme_calculator(S, 1)
    result = calc.calculate()
    print(result)
    #calc.binning(np.asarray([[1,2,3,4,5,6,7,8, 9],[1, 7, 5, 3, 4, 6, 2, 8, 9], [1, 7, 5, 3, 4, 6, 2, 8, 9]]))  # testing
    #calc.bin_stats()