def main(args):

    # load model
    if 'full' in args.model:
        model = LFADS_full.load(args.model)
    else:
        model = LFADS.load(args.model)
    if args.gpu is not None:
        cuda.get_device(args.gpu).use()
        model.to_gpu(args.gpu)
        xp = cuda.cupy
    else:
        xp = np

    # generate completely new samples
    print("generate completely new samples from prior distribution...")

    # sample initial condition (g0)
    mus = xp.zeros((args.batch_size, model.generator.g_dims), dtype=xp.float32)
    sigmas = xp.ones((args.batch_size, model.generator.g_dims),
                     dtype=xp.float32)
    g0_bxd = F.gaussian(Variable(mus), Variable(sigmas))

    # inffered inputs are sampled from a Gaussian autoregressive prior
    xs = []
    for i in range(args.nsample):
        print("now generating %d'th sample among %d" % (i, args.nsample))
        if i == 0:
            u_i_bxd = model.generator.sample_u_1(xp.ndarray(None),
                                                 batch_size=args.batch_size,
                                                 prior_sample=True)
            g_i_bxd = model.generator(u_i_bxd, hx=g0_bxd)
        else:
            u_i_bxd = model.generator.sample_u_i(xp.ndarray(None),
                                                 u_i_bxd,
                                                 batch_size=args.batch_size,
                                                 prior_sample=True)
            g_i_bxd = model.generator(u_i_bxd, hx=g_i_bxd)
        f_i = model.generator.l_f(g_i_bxd)
        x_i = model.generator.sample_x_hat(f_i, calc_rec_loss=False)
        xs.append(cuda.to_cpu(x_i.data))

    # save
    data_dict = {'xs': xs}
    data_fname = args.model + '_prior_sampling.h5'
    with h5py.File(data_fname, 'w') as hf:
        for k, v in data_dict.items():
            hf.create_dataset(k, data=v, compression=False)
Exemple #2
0

if hps.num_steps_for_gen_ic > hps.num_steps: hps.num_steps_for_gen_ic = hps.num_steps



with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as session:

	#####################################################################################
	# train
	#####################################################################################
	#####################################################################################
	# build_model(hps, kind='train', datasets = datasets)
	#####################################################################################
	with tf.variable_scope("LFADS", reuse=None):
		model = LFADS(hps, kind='train', datasets=datasets) 
		
	tf.global_variables_initializer().run()
	session.run(model.learning_rate.initializer)
	
	#####################################################################################
	# model.train_model(datasets)
	#####################################################################################
	
	lr = session.run(model.learning_rate)
	lr_stop = hps.learning_rate_stop
	
	train_costs = []
	valid_costs = []
	learning_rates = []        
	
Exemple #3
0
def main(args):

    # load data
    print >> sys.stderr, 'Loading data...'
    dataset = util.read_datasets(args.data_path, args.data_fname_stem)
    train_data = dataset["train_data"]
    test_data = dataset["valid_data"]

    # load model
    if 'full' in args.model:
        model = LFADS_full.load(args.model)
    else:
        model = LFADS.load(args.model)
    if args.gpu is not None:
        cuda.get_device(args.gpu).use()
        model.to_gpu(args.gpu)

    # posterior sampling
    # encoder
    ndata = train_data.shape[0]
    x_hat_all = []
    f_all = []
    for i in range(ndata):
        print("now %d'th data among %d" % (i, ndata))
        x_data = train_data[i, :, :].astype(np.float32)
        x_data = x_data[np.newaxis, :, :]
        x_data = np.tile(x_data, (args.nsample, 1, 1))

        # copy data to GPU
        if args.gpu is not None:
            x_data = cuda.to_gpu(x_data)

        # create variable
        xs = []
        [xs.append(Variable(x.astype(np.float32))) for x in x_data]

        # encoder
        _, h_bxtxd = model.encoder(xs)
        h_bxtxd = F.stack(h_bxtxd, 0)
        d_dims = h_bxtxd.data.shape[2]

        # generator
        g0_bxd, _ = model.generator.sample_g0(
            F.concat(
                [h_bxtxd[:, 0, -d_dims / 2:], h_bxtxd[:, -1, :d_dims / 2]],
                axis=1))
        f0_bxd = model.generator.l_f(g0_bxd)

        # sampling
        x_hat = []
        f_trial = []

        for j in range(0, h_bxtxd[0].data.shape[0]):
            if j == 0:
                if 'full' in args.model:
                    con_i = model.controller(
                        F.concat((f0_bxd, h_bxtxd[:, j, :d_dims / 2],
                                  h_bxtxd[:, j, d_dims / 2:]),
                                 axis=1))
                    u_i_bxd, _ = model.generator.sample_u_1(con_i)
                    g_i_bxd = model.generator(u_i_bxd, hx=g0_bxd)
                else:
                    g_i_bxd = model.generator(F.concat([
                        h_bxtxd[:, j, :d_dims / 2], h_bxtxd[:, j, d_dims / 2:]
                    ],
                                                       axis=1),
                                              hx=g0_bxd)
            else:
                if 'full' in args.model:
                    con_i = model.controller(F.concat([
                        f_i, h_bxtxd[:, j, :d_dims / 2], h_bxtxd[:, j,
                                                                 d_dims / 2:]
                    ],
                                                      axis=1),
                                             hx=con_i)
                    u_i_bxd, _ = model.generator.sample_u_i(con_i, u_i_bxd)
                    g_i_bxd = model.generator(u_i_bxd, hx=g_i_bxd)
                else:
                    g_i_bxd = model.generator(F.concat([
                        h_bxtxd[:, j, :d_dims / 2], h_bxtxd[:, j, d_dims / 2:]
                    ],
                                                       axis=1),
                                              hx=g_i_bxd)

            f_i = model.generator.l_f(g_i_bxd)
            x_hat_i = model.generator.sample_x_hat(f_i,
                                                   xs=Variable(x_data[:,
                                                                      j, :]),
                                                   nrep=1,
                                                   calc_rec_loss=False)
            f_i = F.mean(f_i, axis=0)
            x_hat.append(cuda.to_cpu(x_hat_i.data))
            f_trial.append(cuda.to_cpu(f_i.data))
        x_hat_all.append(x_hat)
        f_all.append(f_trial)

    # save
    data_dict = {'x_hat_all': x_hat_all, 'f_all': f_all}
    data_fname = args.model + '_posterior_sampling.h5'
    with h5py.File(data_fname, 'w') as hf:
        for k, v in data_dict.items():
            hf.create_dataset(k, data=v, compression=False)
Exemple #4
0
def build_model(hps, kind="train", datasets=None):
    """Builds a model from either random initialization, or saved parameters.

  Args:
    hps: The hyper parameters for the model.
    kind: (optional) The kind of model to build.  Training vs inference require
      different graphs.
    datasets: The datasets structure (see top of lfads.py).

  Returns:
    an LFADS model.
  """

    build_kind = kind
    if build_kind == "write_model_params":
        build_kind = "train"
    with tf.variable_scope("LFADS", reuse=None):
        model = LFADS(hps, kind=build_kind, datasets=datasets)

    if not os.path.exists(hps.lfads_save_dir):
        print("Save directory %s does not exist, creating it." %
              hps.lfads_save_dir)
        os.makedirs(hps.lfads_save_dir)

    cp_pb_ln = hps.checkpoint_pb_load_name
    cp_pb_ln = 'checkpoint' if cp_pb_ln == "" else cp_pb_ln
    if cp_pb_ln == 'checkpoint':
        print("Loading latest training checkpoint in: ", hps.lfads_save_dir)
        saver = model.seso_saver
    elif cp_pb_ln == 'checkpoint_lve':
        print("Loading lowest validation checkpoint in: ", hps.lfads_save_dir)
        saver = model.lve_saver
    else:
        print("Loading checkpoint: ", cp_pb_ln, ", in: ", hps.lfads_save_dir)
        saver = model.seso_saver

    ckpt = tf.train.get_checkpoint_state(hps.lfads_save_dir,
                                         latest_filename=cp_pb_ln)

    session = tf.get_default_session()
    print("ckpt: ", ckpt)
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
        saver.restore(session, ckpt.model_checkpoint_path)
    else:
        print("Created model with fresh parameters.")
        if kind in [
                "posterior_sample_and_average", "prior_sample",
                "write_model_params"
        ]:
            print("Possible error!!! You are running ", kind, " on a newly \
      initialized model!")
            print("Are you sure you sure ", ckpt.model_checkpoint_path,
                  " exists?")

        tf.global_variables_initializer().run()

    if ckpt:
        train_step_str = re.search('-[0-9]+$',
                                   ckpt.model_checkpoint_path).group()
    else:
        train_step_str = '-0'

    fname = 'hyperparameters' + train_step_str + '.txt'
    hp_fname = os.path.join(hps.lfads_save_dir, fname)
    hps_for_saving = jsonify_dict(hps)
    utils.write_data(hp_fname, hps_for_saving, use_json=True)

    return model
Exemple #5
0
def main(args):
    if args.gpu is not None:
        cuda.get_device(args.gpu).use()
        xp = cuda.cupy
    else:
        xp = np

    try:
        os.makedirs(args.model)
    except:
        pass

    # set up logger
    logger = logging.getLogger()
    logging.basicConfig(level=logging.INFO)
    log_path = os.path.join(args.model, 'log')
    file_handler = logging.FileHandler(log_path)
    fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    file_handler.setFormatter(fmt)
    logger.addHandler(file_handler)

    # load data
    logger.info('Loading data...')
    dataset = util.read_datasets(args.data_path, args.data_fname_stem)

    # save hyperparameters
    with open(os.path.join(args.model, 'params'), 'w') as f:
        for k, v in vars(args).items():
            print >> f, '{}\t{}'.format(k, v)

    # create test set
    input_dims = dataset["train_data"].shape[2]

    # set NN
    # encoder
    encoder = GaussianEncoder(args.enc_n_layer, input_dims, args.enc_h_dims,
                              args.enc_dropout)

    # controller (set only if used)
    if args.con_h_dims > 0:
        con_input_dims = args.gen_f_dims + (args.enc_h_dims * 2)
        controller = GaussianController(con_input_dims, args.con_h_dims)

    # generator
    if args.con_h_dims > 0:
        gen_u_dims = args.con_h_dims
    else:
        gen_u_dims = args.enc_h_dims * 2
    generator = GaussianGenerator(gen_u_dims, args.gen_h_dims, args.gen_f_dims,
                                  args.gen_g_dims, input_dims, args.ar_tau,
                                  args.ar_noise_variance, args.batch_size, xp)

    if args.con_h_dims > 0:
        lfads = LFADS_full(encoder, controller, generator)
    else:
        lfads = LFADS(encoder, generator)

    lfads.save_model_def(args.model)

    train_lfads.train(lfads,
                      dataset,
                      args.optim,
                      dest_dir=args.model,
                      batch_size=args.batch_size,
                      max_epoch=args.epoch,
                      gpu=args.gpu,
                      save_every=args.save_every,
                      test_every=args.test_every,
                      alpha_init=args.alpha_init,
                      alpha_delta=args.alpha_delta,
                      l2_weight_con=args.l2_weight_con,
                      l2_weight_gen=args.l2_weight_gen)