def run_train():  # 2019-12-11 2044
    args = Arguments()
    """initialization"""
    gpu_id = args.gpu_id
    mod_dir = args.mod_dir
    actor_dim = args.actor_dim
    critic_dim = args.critic_dim
    memories_size = args.memories_size
    random_seed = args.random_seed
    is_remove = args.is_remove
    env_name = args.env_name

    print('  GPUid: %s' % gpu_id)
    print('  Model: %s' % mod_dir)
    whether_remove_history(remove=is_remove, mod_dir=mod_dir)
    '''init env'''
    env = gym.make(env_name)
    state_dim, action_dim, action_max, target_reward = get_env_info(env)
    eva_size = 100
    '''init mod'''
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
    agent = AgentDelayDDPG(state_dim, action_dim, actor_dim, critic_dim)
    agent.load_model(mod_dir)

    memories = Memories(memories_size, state_dim, action_dim)
    memories.load(mod_dir)

    torch.set_num_threads(8)
    torch.set_default_dtype(torch.float32)
    torch.manual_seed(random_seed)
    np.random.seed(random_seed)
    """train loop"""
    max_epoch = args.max_epoch
    max_step = args.max_step
    explore_noise = args.explore_noise
    policy_noise = args.policy_noise
    batch_size = args.batch_size
    update_gap = args.update_gap
    update_c = args.update_c
    gamma = args.gamma
    '''show and plot'''
    show_gap = 2**5  # print the Reward, actor_loss, critic_loss
    smooth_kernel = 2**3  # smooth the reward/loss curves
    recorders = list(
    )  # recorders.append((eva_reward, epoch_reward, actor_loss, critic_loss))
    recorders1 = list()  # recorders1.append((eva_reward, eva_std, iter_num))
    env_list = [gym.make(env_name) for _ in range(eva_size)]
    global_iter_num = 0

    start_time = show_time = timer()
    try:
        for epoch in range(max_epoch):
            epoch_reward, iter_num = agent.inactive_in_env(
                env,
                memories,
                max_step,
                explore_noise,
                action_max,
            )

            actor_loss, critic_loss = agent.update_parameter(
                memories,
                iter_num,
                batch_size,
                policy_noise,
                update_gap,
                update_c,
                gamma,
            )

            eva_reward = get_eva_reward(
                agent,
                env,
                max_step,
                action_max,
                target_reward,
            )

            recorders.append(
                (eva_reward, epoch_reward, actor_loss, critic_loss))
            global_iter_num += iter_num

            if timer() - show_time > show_gap:
                rewards = np.array(recorders[-smooth_kernel:])[:, 0:2]
                smooth_eva_r, smooth_epoch_r = np.average(rewards, axis=0)
                print(
                    "{:4}    |EvaR {:7.2f}    |EpoR {:7.2f}    |A {:6.2f}    C {:6.2f}"
                    .format(epoch, smooth_eva_r, smooth_epoch_r, actor_loss,
                            critic_loss))

                eva_reward, eva_std = get_eva_reward_batch(
                    agent, env_list, max_step, action_max)

                recorders1.append(
                    (eva_reward, eva_std, global_iter_num, epoch))
                show_time = timer(
                )  # reset show_time after get_eva_reward_batch !

            if eva_reward >= target_reward:
                print("########## Solved! ###########")
                print(
                    "{:4}    |EvaR {:7.2f}    |EpoR {:7.2f}    |A {:6.2f}    C {:6.2f}"
                    .format(epoch, eva_reward, epoch_reward, actor_loss,
                            critic_loss))
                break

    except KeyboardInterrupt:
        print("KeyboardInterrupt")
    print('TimeUsed:', int(timer() - start_time))
    agent.save_model(mod_dir)
    memories.save(mod_dir)
    np.save('%s/recorders.npy' % mod_dir, recorders)
    np.save('%s/recorders1.npy' % mod_dir, recorders1)
    print("Saved:", mod_dir)

    draw_plot(recorders, mod_dir, smooth_kernel)
Exemplo n.º 2
0
give a significant performance boost.

The ``cuda`` backends are only available for computers with a GPU.

"""

## Imports

# Numpy Backend
import numpy  # numpy has to be present

# Torch Backends (and flags)
try:
    import torch

    torch.set_default_dtype(torch.float64)  # we need more precision for FDTD
    torch._C.set_grad_enabled(False)  # we don't need gradients (for now)
    TORCH_AVAILABLE = True
    TORCH_CUDA_AVAILABLE = torch.cuda.is_available()
except ImportError:
    TORCH_AVAILABLE = False
    TORCH_CUDA_AVAILABLE = False


# Base Class
class Backend:
    """ Backend Base Class """

    # constants
    pi = numpy.pi
Exemplo n.º 3
0
    def __call__(self, trial):
        optim_args = copy.deepcopy(self.orig_args)

        #set all config hyperparams
        #general
        optim_args.epochs = trial.suggest_int("epochs",1,60,step=3)
        batch_size_power = trial.suggest_int("batch_size_power",6,9)
        optim_args.batch_size = 2**batch_size_power  # 64-512
        optim_args.learning_rate = trial.suggest_float("learning_rate",1e-5,1e-2,log=True)# logscale 1e-5 - 1e0

        # model
        optim_args.temperature = trial.suggest_float("temperature",0.001,0.2) # 0.01 - 0.5
        optim_args.init_size = trial.suggest_float("init_size",0.01,0.1) # 0.01-0.1
        optim_args.anneal_every = trial.suggest_int("anneal_every",10,100)
        optim_args.anneal_factor = trial.suggest_float("anneal_factor",0.7,1.0) # 0.1-1.0

        # dataset
        optim_args.similarity_metric = metric_glob
        optim_args.feature_dim = trial.suggest_int("feature_dim", 10,200)  # 10-200

        #Init algorithm
        logger = logging.getLogger()
        logger.setLevel(logging.INFO)

        # get saving directory
        # TODO: consider not saving.
        if optim_args.save:
            #save_dir = get_savedir(optim_args) + f"_trial{trial.number}"
            path_list = list(os.path.split(get_savedir(optim_args)))
            path_list[1] = f"{metric_glob}_trial{trial.number}_{path_list[1]}"
            save_dir=os.path.join(*path_list)
            logging.info("Save directory: " + save_dir)
            save_path = os.path.join(save_dir, "model_{}.pkl".format(optim_args.seed))
            if os.path.exists(save_dir):
                if os.path.exists(save_path):
                    logging.info("Model with the same configuration parameters already exists.")
                    logging.info("Exiting")
                    return
            else:
                os.makedirs(save_dir)
                with open(os.path.join(save_dir, "config.json"), 'w') as fp:
                    json.dump(optim_args.__dict__, fp)
            log_path = os.path.join(save_dir, "train_{}.log".format(optim_args.seed))
            hdlr = logging.FileHandler(log_path)
            formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
            hdlr.setFormatter(formatter)
            logger.addHandler(hdlr)

        # set seed
        logging.info("Using seed {}.".format(optim_args.seed))
        np.random.seed(optim_args.seed)
        torch.manual_seed(optim_args.seed)
        torch.cuda.manual_seed(optim_args.seed)

        # set precision
        logging.info("Using {} precision.".format(optim_args.dtype))
        if optim_args.dtype == "double":
            torch.set_default_dtype(torch.float64)

        # create dataset
        if optim_args.dataset == 'breast_cancer': #TODO: check how to optimize loading all the data each time
            x_all, y_true_all, similarities_all, label_dict = load_hypbc_multi_group(num_groups=1,
                                                                         num_data_samples=args.num_data_samples,
                                                                         feature_dim=args.feature_dim,
                                                                         method=args.similarity_metric,
                                                                         feature_correlation_thresh=args.feature_correlation_thresh,
                                                                         visualize=False)
            x = x_all[0]
            y_true = y_true_all[0]
            similarities = similarities_all[0]
        else:
            assert(False)

        print(similarities.shape)
        print(similarities)

        actual_num_samples = comb(len(y_true), 2) if optim_args.num_samples < 2 else optim_args.num_samples
        dataset = HCDataset(x, y_true, similarities, num_samples=actual_num_samples)
        dataloader = data.DataLoader(dataset, batch_size=optim_args.batch_size, shuffle=True, num_workers=0, pin_memory=True)

        # Generate the model.
        model = HypHC(dataset.n_nodes, optim_args.rank, optim_args.temperature, optim_args.init_size, optim_args.max_scale)
        model.to("cuda")

        # create optimizer
        Optimizer = getattr(optim, optim_args.optimizer)
        optimizer = Optimizer(model.parameters(), optim_args.learning_rate)

        # train model
        best_cost = np.inf
        best_model = None
        counter = 0
        logging.info("Start training")
        for epoch in range(optim_args.epochs):
            model.train()
            total_loss = 0.0
            with tqdm(total=len(dataloader), unit='ex') as bar:
                for step, (triple_ids, triple_similarities) in enumerate(dataloader):
                    # for param in model.parameters():
                    # print(param.data)
                    triple_ids = triple_ids.cuda()
                    triple_similarities = triple_similarities.cuda()
                    loss = model.loss(triple_ids, triple_similarities)
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()
                    bar.update(1)
                    bar.set_postfix(loss=f'{loss.item():.6f}')
                    total_loss += loss
            total_loss = total_loss.item() / (step + 1.0)
            logging.info("\t Epoch {} | average train loss: {:.6f}".format(epoch, total_loss))

            # keep best embeddings
            if (epoch + 1) % optim_args.eval_every == 0:
                model.eval()
                tree = model.decode_tree(fast_decoding=optim_args.fast_decoding)

                # save embedding and weights for this epoch
                model_path = os.path.join(save_dir, f"model_sd{optim_args.seed}_epch{epoch}.pkl")
                torch.save(model.state_dict(), model_path)
                img_path = os.path.join(save_dir, f"embedding_sd{optim_args.seed}_epch{epoch}.png")
                visualize_tree(model, tree, y_true, img_path,label_dict)

                cost = dasgupta_cost(tree, similarities)

                logging.info("{}:\t{:.4f}".format("Dasgupta's cost", cost))
                if cost < best_cost:
                    counter = 0
                    best_cost = cost
                    best_model = model.state_dict()
                else:
                    counter += 1
                    if counter == optim_args.patience:
                        logging.info("Early stopping.")
                        break

                trial.report(cost, epoch)  # report the values to optuna .
                if trial.should_prune():
                    raise optuna.exceptions.TrialPruned()

            # anneal temperature
            if (epoch + 1) % optim_args.anneal_every == 0:
                model.anneal_temperature(optim_args.anneal_factor)
                logging.info("Annealing temperature to: {}".format(model.temperature))
                for param_group in optimizer.param_groups:
                    param_group['lr'] *= optim_args.anneal_factor
                    lr = param_group['lr']
                logging.info("Annealing learning rate to: {}".format(lr))

        logging.info("Optimization finished.")
        if best_model is not None:
            # load best model
            model.load_state_dict(best_model)

        if optim_args.save:
            # save best embeddings
            logging.info("Saving best model at {}".format(save_path))
            torch.save(best_model, save_path)

        # evaluation
        model.eval()
        logging.info("Decoding embeddings.")
        tree = model.decode_tree(fast_decoding=optim_args.fast_decoding)
        cost = dasgupta_cost(tree, similarities)
        logging.info("{}:\t{:.4f}".format("Dasgupta's cost", cost))

        if optim_args.save:
            logger.removeHandler(hdlr)

        return best_cost
Exemplo n.º 4
0
d.item() # 只包含一个元素的tensor即可调用tensor.item,与形状无关

# a[0].item()  ->
# raise ValueError: only one element tensors can be converted to Python scalars

x = t.arange(0, 27).view(3,3,3)
x

x[[1,2], [1,2], [2, 0]]

x[[2, 1, 0], [0], [1]]

x[[0, 2], ...]

# 设置默认tensor,注意参数是字符串
t.set_default_dtype('torch.DoubleTensor')

a = t.Tensor(2, 3)
a.dtype # 现在a是DoubleTensor,dtype是float64

# 恢复之前的默认设置
t.set_default_dtype('torch.FloatTensor')

c = a.type_as(b)
c

a.new(2, 3) # 等价于torch.DoubleTensor(2,3),建议使用a.new_tensor

t.zeros_like(a, dtype=t.int) #可以修改某些属性

t.rand_like(a)
Exemplo n.º 5
0
rl.write('# H1:          {}\n'.format(MODEL_H1))
rl.write('# H2:          {}\n'.format(MODEL_H2))
rl.write('# H1 (critic): {}\n'.format(MODEL_C_H1))
rl.write('# H2 (critic): {}\n'.format(MODEL_C_H2))
rl.write('# batch_norm:  {}\n'.format(BATCH_NORM))
rl.write('# load_file:   {}\n'.format(load_file))
rl.write('# save_file:   {}\n'.format(save_file))
rl.flush()

# * ---------------- *
#   torch:
#    local computer was a laptop with no CUDA available
#    => feel free to change this, if you have a machine (with GPU)
# * ---------------- *
dtype = torch.float64
torch.set_default_dtype(dtype)
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU


class MSScaling(torch.nn.Module):
    def __init__(self, f=1.0):
        super().__init__()
        self.factor = float(f)

    def forward(self, input):
        return self.factor * input


class MSA(torch.nn.Module):
    def __init__(self,
Exemplo n.º 6
0
[1] https://gscontras.github.io/probLang/chapters/07-generics.html
"""

import torch

import argparse
import numbers
import collections

import pyro
import pyro.distributions as dist
import pyro.poutine as poutine

from search_inference import factor, HashingMarginal, memoize, Search

torch.set_default_dtype(
    torch.float64)  # double precision for numerical stability


def Marginal(fn):
    return memoize(lambda *args: HashingMarginal(Search(fn).run(*args)))


#######################
# models
#######################

# hashable params
Params = collections.namedtuple("Params", ["theta", "gamma", "delta"])


def discretize_beta_pdf(bins, gamma, delta):
Exemplo n.º 7
0
def main():
    args = parser.parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = 'cpu'


#     device = 'cuda' if torch.cuda.is_available() else 'cpu'

    hyperparams = load_parameters(args.hyperparameter_path)

    orion_hp_string = ''
    if args.lr:
        lr = args.lr
        hyperparams['optimizer']['lr_init'] = lr
        hyperparams['scheduler']['lr_min'] = lr * 1e-3
        orion_hp_string += 'lr= %.4f\n' % lr

    data_name = args.data_path.split('/')[-1]

    model_name = args.hyperparameter_path.split['/'][-1]
    mhp_list = [
        key.replace('size', '').replace('deep', 'd').replace(
            'obs', 'o').replace('_', '')[:4] + str(val)
        for key, val in hyperparams['model'].items() if 'size' in key
    ]

    mhp_list.sort()
    hyperparams['run_name'] = '_'.join(mhp_list)
    orion_hp_string = orion_hp_string.replace('\n', '-').replace(' ',
                                                                 '').replace(
                                                                     '=', '')
    orion_hp_string = '_orion-' + orion_hp_string
    hyperparams['run_name'] += orion_hp_string
    save_loc = '%s/%s/%s/%s/' % (args.output_dir, data_name, model_name,
                                 hyperparams['run_name'])

    if not os.path.exists(save_loc):
        os.makedirs(save_loc)

    # Setup DataLoader goes here
    data_dict = read_data(args.data_path)
    train_dl = torch.utils.data.DataLoader(SyntheticCalciumVideoDataset(
        traces=data_dict['train_fluor'],
        cells=data_dict['cells'],
        device=device),
                                           batch_size=args.batch_size,
                                           drop_last=True)
    valid_dl = torch.utils.data.DataLoader(SyntheticCalciumVideoDataset(
        traces=data_dict['valid_fluor'],
        cells=data_dict['cells'],
        device=device),
                                           batch_size=args.batch_size,
                                           drop_last=True)

    num_trials, num_steps, num_cells = data_dict['train_fluor'].shape
    num_cells, width, height = data_dict['cells'].shape

    model = Conv3d_LFADS_Net(
        input_dims=(num_steps, width, height),
        conv_dense_size=hyperparams['model']['conv_dense_size'],
        channel_dims=hyperparams['model']['channel_dims'],
        factor_size=hyperparams['model']['factor_size'],
        g_encoder_size=hyperparams['model']['g_encoder_size'],
        c_encoder_size=hyperparams['model']['c_encoder_size'],
        g_latent_size=hyperparams['model']['g_latent_size'],
        u_latent_size=hyperparams['model']['u_latent_size'],
        controller_size=hyperparams['model']['controller_size'],
        generator_size=hyperparams['model']['generator_size'],
        prior=hyperparams['model']['prior'],
        clip_val=hyperparams['model']['clip_val'],
        conv_dropout=hyperparams['model']['conv_dropout'],
        lfads_dropout=hyperparams['model']['lfads_dropout'],
        do_normalize_factors=hyperparams['model']['normalize_factors'],
        max_norm=hyperparams['model']['max_norm'],
        device=device)

    model = _CustomDataParallel(model).to(device)  #
    model.to(dtype=train_dl.dataset.dtype)
    torch.set_default_dtype(train_dl.dataset.dtype)

    transforms = trf.Compose([])

    loglikelihood = LogLikelihoodGaussian()
    objective = Conv_LFADS_Loss(
        loglikelihood=loglikelihood,
        loss_weight_dict={
            'kl': hyperparams['objective']['kl'],
            'l2': hyperparams['objective']['l2']
        },
        l2_con_scale=hyperparams['objective']['l2_con_scale'],
        l2_gen_scale=hyperparams['objective']['l2_gen_scale']).to(device)
    total_params = 0
    for ix, (name, param) in enumerate(model.named_parameters()):
        print(ix, name, list(param.shape), param.numel(), param.requires_grad)
        total_params += param.numel()

    print('Total parameters: %i' % total_params)

    optimizer = opt.Adam([p for p in model.parameters() if p.requires_grad],
                         lr=hyperparams['optimizer']['lr_init'],
                         betas=hyperparams['optimizer']['betas'],
                         eps=hyperparams['optimizer']['eps'])

    scheduler = LFADS_Scheduler(
        optimizer=optimizer,
        mode='min',
        factor=hyperparams['scheduler']['scheduler_factor'],
        patience=hyperparams['scheduler']['scheduler_patience'],
        verbose=True,
        threshold=1e-4,
        threshold_mode='abs',
        cooldown=hyperparams['scheduler']['scheduler_cooldown'],
        min_lr=hyperparams['scheduler']['lr_min'])

    TIME = torch._np.arange(0, num_steps * data_dict['dt'], data_dict['dt'])

    train_truth = {}
    if 'train_latent' in data_dict.keys():
        train_truth['latent'] = data_dict['train_latent']

    valid_truth = {}
    if 'valid_latent' in data_dict.keys():
        valid_truth['latent'] = data_dict['valid_latent']

    plotter = {
        'train': Plotter(time=TIME, truth=train_truth),
        'valid': Plotter(time=TIME, truth=valid_truth)
    }

    if args.use_tensorboard:
        import importlib
        #if importlib.util.find_spec('torch.utils.tensorboard'):
        if importlib.util.find_spec('tensorboardX'):
            tb_folder = save_loc + 'tensorboard/'
            if not os.path.exists(tb_folder):
                os.mkdir(tb_folder)
            elif os.path.exists(tb_folder) and args.restart:
                os.system('rm -rf %s' % tb_folder)
                os.mkdir(tb_folder)

            #from torch.utils.tensorboard import SummaryWriter
            from tensorboardX import SummaryWriter
            writer = SummaryWriter(tb_folder)
            rm_plotter = plotter
        else:
            writer = None
            rm_plotter = None
    else:
        writer = None
        rm_plotter = None

    run_manager = RunManager(model=model,
                             objective=objective,
                             optimizer=optimizer,
                             scheduler=scheduler,
                             train_dl=train_dl,
                             valid_dl=valid_dl,
                             transforms=transforms,
                             writer=writer,
                             plotter=rm_plotter,
                             max_epochs=args.max_epochs,
                             save_loc=save_loc,
                             do_health_check=args.do_health_check)

    run_manager.run()

    #     if importlib.find_loader('orion'):
    #          report_results([dict(name= 'valid_loss',
    #                              type= 'objective',
    #                              value= run_manager.best)])

    fig_folder = save_loc + 'figs/'

    if os.path.exists(fig_folder):
        os.system('rm -rf %s' % fig_folder)
    os.mkdir(fig_folder)

    model_to_plot = Conv3d_LFADS_Net(
        input_dims=(num_steps, width, height),
        conv_dense_size=hyperparams['model']['conv_dense_size'],
        channel_dims=hyperparams['model']['channel_dims'],
        factor_size=hyperparams['model']['factor_size'],
        g_encoder_size=hyperparams['model']['g_encoder_size'],
        c_encoder_size=hyperparams['model']['c_encoder_size'],
        g_latent_size=hyperparams['model']['g_latent_size'],
        u_latent_size=hyperparams['model']['u_latent_size'],
        controller_size=hyperparams['model']['controller_size'],
        generator_size=hyperparams['model']['generator_size'],
        prior=hyperparams['model']['prior'],
        clip_val=hyperparams['model']['clip_val'],
        conv_dropout=hyperparams['model']['conv_dropout'],
        lfads_dropout=hyperparams['model']['lfads_dropout'],
        do_normalize_factors=hyperparams['model']['normalize_factors'],
        max_norm=hyperparams['model']['max_norm'],
        device='cuda:0')
    state_dict = torch.load(save_loc + 'checkpoints/' + 'best.pth')
    model_to_plot.load_state_dict(state_dict['net'])
    model_to_plot = model_to_plot.to('cuda:0')
    import matplotlib
    matplotlib.use('Agg')

    fig_dict = plotter['valid'].plot_summary(model=model_to_plot,
                                             dl=run_manager.valid_dl,
                                             mode='video',
                                             num_average=4,
                                             save_dir=fig_folder)  #
    for k, v in fig_dict.items():
        if type(v) == matplotlib.figure.Figure:
            v.savefig(fig_folder + k + '.svg')
Exemplo n.º 8
0
    pass
try:
    os.makedirs(outf)
except OSError as e:
    if e.errno != errno.EEXIST:
        raise

try:
    os.makedirs(checkpointdir)
except OSError:
    pass

# CUDA everything
cudnn.benchmark = True
gpu = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.set_default_dtype(torch.float32)
if torch.cuda.is_available():
    torch.set_default_tensor_type('torch.cuda.FloatTensor')
else:
    torch.set_default_tensor_type('torch.FloatTensor')
print(gpu)

# load datasets
if opt.dataset == 'mnist':
    out_dir = 'dataset/MNIST'
    dataset = datasets.MNIST(root=out_dir,
                             train=True,
                             download=True,
                             transform=transforms.Compose([
                                 transforms.Resize(opt.imageSize),
                                 transforms.ToTensor(),
Exemplo n.º 9
0
 def tearDown(self):
     torch.set_default_dtype(self.prev_type)
Exemplo n.º 10
0
def main():
    # Parameters

    # Paths are written in UNIX-like notation!
    # So write `C:\Users\user\GANerator` as `C:/Users/user/GANerator` or `~/GANerator`.

    # All parameters that take classes also accept strings of the class.

    # Only parameters in the 'Data and Models' section will be saved and loaded!

    parser = argparse.ArgumentParser()
    # Experiment specific
    # ===================
    parser.add_argument('--num_imgs',
                        help='How many images to generate.',
                        default=30000)
    parser.add_argument(
        '--exp_name',
        nargs='+',
        help=
        "File names for this experiment. If `None` or `''`, `append_time` is always `True`.",
        default=None)
    parser.add_argument(
        '--append_time',
        nargs='+',
        help=
        "Append the current time to the file names (to prevent overwriting).",
        default=True)
    parser.add_argument(
        '--load_dir',
        nargs='+',
        help=
        "Directory to load saved files from. If `save_dir` is `None`, this also acts as `save_dir`.",
        default='.')
    parser.add_argument(
        '--save_dir',
        nargs='+',
        help="Directory to save to. If `None`, use the value of `load_dir`.",
        default='.')

    parser.add_argument(
        '--load_exp',
        nargs='+',
        help=
        "Load the models and parameters from this experiment (previous `exp_name`). Also insert the optionally appended time (WIP: if this value is otherwise ambiguous). Set the parameters `models_file` or `params_file` below to use file names. If set to `True`, use `exp_name`. If `False` or `None`, do not load.",
        default=False)
    parser.add_argument(
        '--params_file',
        nargs='+',
        help=
        "Load parameters from this path. Set to `False` to not load. Priority over `load_exp`. Set to `True` to ignore this so it does not override `load_exp`.",
        default=True)
    parser.add_argument(
        '--models_file',
        nargs='+',
        help=
        "Load models from this path. Set to `False` to not load. Priority over `load_exp`. Set to `True` to ignore this so it does not override `load_exp`.",
        default=True)
    parser.add_argument(
        '--load_weights_only',
        nargs='+',
        help=
        "Load only the models' weights. To continue training, set this to `False`.",
        default=True)

    parser.add_argument(
        '--save_params',
        nargs='+',
        help="Save the parameters in the 'Data and Models' section to a file.",
        default=False)
    parser.add_argument(
        '--save_weights_only',
        nargs='+',
        help=
        "Save only the models' weights. To continue training later, set this to `False`.",
        default=False)
    parser.add_argument(
        '--checkpoint_period',
        nargs='+',
        help=
        "After how many steps to save a model checkpoint. Set to `0` to only save when finished.",
        default=100)

    parser.add_argument(
        '--num_eval_imgs',
        nargs='+',
        help="How many images to generate for (temporal) evaluation.",
        default=64)

    # Hardware and Multiprocessing
    # ============================
    parser.add_argument(
        '--num_workers',
        nargs='+',
        help=
        "Amount of worker threads to create on the CPU. Set to `0` to use CPU count.",
        default=0)
    parser.add_argument(
        '--num_gpus',
        nargs='+',
        help=
        "Amount of GPUs to use. `None` to use all available ones. Set to `0` to run on CPU only.",
        default=None)
    parser.add_argument(
        '--cuda_device_id',
        nargs='+',
        help="ID of CUDA device. In most cases, this should be left at `0`.",
        default=0)

    # Reproducibility
    # ===============
    parser.add_argument(
        '--seed',
        nargs='+',
        help=
        "Random seed if `None`. The used seed will always be saved in `saved_seed`.",
        default=0)
    parser.add_argument(
        '--ensure_reproducibility',
        nargs='+',
        help=
        "If using cuDNN: Set to `True` to ensure reproducibility in favor of performance.",
        default=False)
    parser.add_argument(
        '--flush_denormals',
        nargs='+',
        help=
        "Whether to set denormals to zero. Some architectures do not support this.",
        default=True)

    # Data and Models
    # ===============
    # Only parameters in this section will be saved and updated when loading.

    parser.add_argument(
        '--dataset_root',
        nargs='+',
        help=
        "Path to the root folder of the data set. This value is only loaded if set to `None`!",
        default='~/datasets/ffhq')
    parser.add_argument(
        '--dataset_class',
        nargs='+',
        help=
        "Set this to the torchvision.datasets class (module `dsets`). This value is only loaded if set to `None`!",
        default=dsets.ImageFolder)
    parser.add_argument('--epochs',
                        nargs='+',
                        help="Number of training epochs.",
                        default=5)
    parser.add_argument(
        '--batch_size',
        nargs='+',
        help=
        "Size of each training batch. Strongly depends on other parameters.",
        default=512)
    parser.add_argument(
        '--img_channels',
        nargs='+',
        help=
        "Number of channels in the input images. Normally 3 for RGB and 1 for grayscale.",
        default=3)
    parser.add_argument(
        '--img_shape',
        nargs='+',
        help=
        "Shape of the output images (excluding channel dimension). Can be an integer to get squares. At the moment, an image can only be square sized and a power of two.",
        default=64)
    parser.add_argument(
        '--resize',
        nargs='+',
        help="If `True`, resize images; if `False`, crop (to the center).",
        default=True)

    parser.add_argument('--data_mean',
                        nargs='+',
                        help="Data is normalized to this mean (per channel).",
                        default=0.0)
    parser.add_argument(
        '--data_std',
        nargs='+',
        help="Data is normalized to this standard deviation (per channel).",
        default=1.0)
    parser.add_argument('--float_dtype',
                        nargs='+',
                        help="Float precision as `torch.dtype`.",
                        default=torch.float32)
    parser.add_argument(
        '--g_input',
        nargs='+',
        help="Size of the generator's random input vectors (`z` vector).",
        default=128)

    # GAN hacks
    parser.add_argument(
        '--g_flip_labels',
        nargs='+',
        help="Switch labels for the generator's training step.",
        default=False)
    parser.add_argument(
        '--d_noisy_labels_prob',
        nargs='+',
        help="Probability to switch labels when training the discriminator.",
        default=0.0)
    parser.add_argument(
        '--smooth_labels',
        nargs='+',
        help="Replace discrete labels with slightly different continuous ones.",
        default=False)

    # Values in this paragraph can be either a single value (e.g. an `int`) or a 2-`tuple` of the same type.
    # If a single value, that value will be applied to both the discriminator and generator network.
    # If a 2-`tuple`, the first value will be applied to the discriminator, the second to the generator.
    parser.add_argument(
        '--features',
        nargs='+',
        help="Relative size of the network's internal features.",
        default=64)
    parser.add_argument(
        '--optimizer',
        nargs='+',
        help="Optimizer class. GAN hacks recommends `(optim.SGD, optim.Adam)`.",
        default=optim.Adam)
    parser.add_argument(
        '--lr',
        nargs='+',
        help=
        "Optimizer learning rate. (Second optimizer argument, so not necessarily learning rate.)",
        default=0.0002)
    parser.add_argument(
        '--optim_param',
        nargs='+',
        help=
        "Third optimizer argument. (For example, `betas` for `Adam` or `momentum` for `SGD`.)",
        default=((0.5, 0.999), ))
    parser.add_argument(
        '--optim_kwargs',
        nargs='+',
        help="Any further optimizer keyword arguments as a dictionary.",
        default={})
    parser.add_argument(
        '--normalization',
        nargs='+',
        help=
        "Kind of normalization. Must be a `Norm` or in `('b', 'v', 's', 'i', 'a', 'n')`. Usually, spectral normalization is used in the discriminator while virtual batch normalization is used in the generator.",
        default=Norm.BATCH)
    parser.add_argument(
        '--activation',
        nargs='+',
        help=
        "Activation between hidden layers. GAN hacks recommends `nn.LeakyReLU`.",
        default=(nn.LeakyReLU, nn.ReLU))
    parser.add_argument('--activation_kwargs',
                        nargs='+',
                        help="Activation keyword arguments.",
                        default=({
                            'negative_slope': 0.2,
                            'inplace': True
                        }, {
                            'inplace': True
                        }))
    params = vars(parser.parse_args())
    for key, val in params.items():
        if type(val) is list:
            if len(val) == 1:
                params[key] = val[0]
            else:
                params[key] = tuple(val)

    # Process parameters

    num_imgs = int(params['num_imgs'])

    # Model parameters as tuples. If it is a tuple, give the class to return as well.
    # If the class is given as `'eval'`, the parameter is literally evaluated if either
    # the tuple or its content begins with a symbol in '({['.
    tuple_params = (
        ('features', int),
        ('optimizer', 'eval'),
        ('lr', float),
        ('optim_param', 'eval'),
        ('optim_kwargs', 'eval'),
        ('normalization', 'eval'),
        ('activation', 'eval'),
        ('activation_kwargs', 'eval'),
    )

    # Parameters that we do *not* want to save (or load).
    # We list these instead of the model parameters as those should be easier to extend.
    static_params = [
        'exp_name',
        'append_time',
        'load_dir',
        'save_dir',
        'load_exp',
        'params_file',
        'models_file',
        'load_weights_only',
        'save_params',
        'save_weights_only',
        'checkpoint_period',
        'num_workers',
        'num_gpus',
        'cuda_device_id',
        'seed',
        'ensure_reproducibility',
        'flush_denormals',
    ]

    def string_to_class(string):
        if type(string) is str:
            string = string.split('.')
            if len(string) == 1:
                m = __builtins__
            else:
                m = globals()[string[0]]
                for part in string[1:-1]:
                    m = getattr(m, part)
            return getattr(m, string[-1])
        else:
            return string

    def argstring(string):
        """
        Return a string converted to its value as if evaled or itself.

        `string` is converted to:
        - the corresponding boolean if it is `'True'` or `'False'`
        - None if `'None'`
        - nothing and returned as it is otherwise.
        """
        return {'True': True, 'False': False, 'None': None}.get(string, string)

    # Experiment name

    append_time = argstring(params['append_time'])
    exp_name = argstring(params['exp_name'])
    if not exp_name or append_time:
        if exp_name is not str:
            exp_name = ''
        exp_name = ''.join(
            (exp_name, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')))

    # Load parameters

    load_dir = argstring(params['load_dir'])
    save_dir = argstring(params['save_dir'])
    if save_dir is None:
        save_dir = load_dir

    load_exp = argstring(params['load_exp'])

    params_file = argstring(params['params_file'])
    load_params = params_file and (load_exp or type(params_file) is str)

    dataset_root = argstring(params['dataset_root'])
    dataset_class = string_to_class(params['dataset_class'])

    # Check whether these parameters are `None`.
    # If yes, check that parameters loading is enabled. Otherwise do not update them.
    if dataset_root is None:
        assert load_params, '`dataset_root` cannot be `None` if not loading parameters.'
    else:
        static_params.append('dataset_root')
    if dataset_class is None:
        assert load_params, '`dataset_class` cannot be `None` if not loading parameters.'
    else:
        static_params.append('dataset_class')

    if params_file and (load_exp or type(params_file) is str):
        if type(params_file) is str:
            params_path = Path(params_file)
        elif type(load_exp) is bool:  #
            params_path = Path('{}/params_{}.pt'.format(load_dir, exp_name))
        else:
            params_path = Path('{}/params_{}.pt'.format(load_dir, load_exp))

        params_path = params_path.expanduser()
        upd_params = torch.load(params_path)
        params.update(upd_params)
        del upd_params
    elif params_file == '':
        print(
            "`params_file` is an empty string (`''`). Parameters were not loaded. "
            'Set to `False` to suppress this warning or to `True` to let `load_exp` handle loading.'
        )

    # Hardware and multiprocessing

    num_gpus = argstring(params['num_gpus'])
    cuda_device_id = int(params['cuda_device_id'])
    if num_gpus is None:
        num_gpus = torch.cuda.device_count()
        print('Using {} GPUs.'.format(num_gpus))
    else:
        num_gpus = int(num_gpus)
    use_gpus = num_gpus > 0
    multiple_gpus = num_gpus > 1
    if use_gpus:
        assert torch.cuda.is_available(), 'CUDA is not available. ' \
                'Check what is wrong or set `num_gpus` to `0` to run on CPU.'  # Never check for this again
        device = torch.device('cuda:' + str(cuda_device_id))
    else:
        device = torch.device('cpu')

    num_workers = int(params['num_workers'])
    if not num_workers:
        num_workers = mp.cpu_count()
        print('Using {} worker threads.'.format(num_workers))

    # Load model

    models_file = argstring(params['models_file'])
    models_cp = None
    if models_file and (load_exp or type(models_file) is str):
        if type(models_file) is str:
            models_path = Path(models_file)
        elif type(load_exp) is bool:
            models_path = Path('{}/models_{}.tar'.format(load_dir, exp_name))
        else:
            models_path = Path('{}/models_{}.tar'.format(load_dir, load_exp))
        models_path = models_path.expanduser()
        models_cp = torch.load(models_path, map_location=device)
    elif models_file == '':
        print(
            "`models_file` is an empty string (`''`). Models were not loaded. "
            'Set to `False` to suppress this warning or to `True` to let `load_exp` handle loading.'
        )

    # Reproducibility

    seed = argstring(params['seed'])
    if seed is None:
        seed = np.random.randint(10000)
    else:
        seed = int(seed)
    print('Seed: {}.'.format(seed))
    params['saved_seed'] = seed
    np.random.seed(seed)
    torch.manual_seed(seed)

    ensure_reproducibility = argstring(params['ensure_reproducibility'])
    torch.backends.cudnn.deterministic = ensure_reproducibility
    if ensure_reproducibility:
        torch.backends.cudnn.benchmark = False  # This is the default but do it anyway

    flush_denormals = argstring(params['flush_denormals'])
    set_flush_success = torch.set_flush_denormal(flush_denormals)
    if flush_denormals and not set_flush_success:
        print('Not able to flush denormals. `flush_denormals` set to `False`.')
        flush_denormals = False

    # Dataset root

    dataset_root = Path(dataset_root).expanduser()

    # Floating point precision

    float_dtype = string_to_class(params['float_dtype'])
    if float_dtype is torch.float16:
        print(
            'PyTorch does not support half precision well yet. Be careful and assume errors.'
        )
    torch.set_default_dtype(float_dtype)

    # Parameters we do not need to process

    load_weights_only = argstring(params['load_weights_only'])
    save_weights_only = argstring(params['save_weights_only'])
    checkpoint_period = int(params['checkpoint_period'])
    num_eval_imgs = int(params['num_eval_imgs'])

    epochs = int(params['epochs'])
    batch_size = int(params['batch_size'])
    img_channels = int(params['img_channels'])
    resize = argstring(params['resize'])

    data_mean = float(params['data_mean'])
    data_std = float(params['data_std'])
    g_input = int(params['g_input'])

    g_flip_labels = argstring(params['g_flip_labels'])
    d_noisy_labels_prob = float(params['d_noisy_labels_prob'])
    smooth_labels = argstring(params['smooth_labels'])

    assert 0.0 <= d_noisy_labels_prob <= 1.0, \
            'Invalid probability for `d_noisy_labels`. Must be between 0 and 1 inclusively.'

    # Single or tuple parameters

    def param_as_ntuple(key, n=2, return_type=None):
        if return_type is None:

            def return_func(x):
                return x
        else:
            return_func = return_type
        val = params[key]
        if return_type == 'eval':
            if type(val) is str and val[0] in '({[':
                val = literal_eval(val)

            def return_func(x):
                if type(x) is str and x[0] in '({[':
                    return literal_eval(str(x))
                else:
                    return x

        if type(val) in (tuple, list):
            assert 0 < len(
                val
            ) <= n, 'Tuples should have length {} (`{}` is `{}`).'.format(
                n, key, val)
            if len(val) < n:
                if len(val) > 1:
                    print('`{}` is `{}`. Length is less than {}; '.format(
                        key, val, n) +
                          'last entry has been repeated to fit length.')
                return tuple(
                    map(return_func,
                        tuple(val) + (val[-1], ) * (n - len(val))))
            else:
                return tuple(map(return_func, val))
        return (return_func(val), ) * n

    def ispow2(x):
        log2 = np.log2(x)
        return log2 == int(log2)

    img_shape = param_as_ntuple('img_shape', return_type=int)
    assert img_shape[0] == img_shape[
        1], '`img_shape` must be square (same width and height).'
    assert ispow2(img_shape[0]), '`img_shape` must be a power of two (2^n).'

    d_params = {}
    g_params = {}
    for key in tuple_params:
        if type(key) is tuple:
            key, ret_type = key
            d_params[key], g_params[key] = param_as_ntuple(
                key, return_type=ret_type)
        else:
            d_params[key], g_params[key] = param_as_ntuple(key)

    # Normalization and class parameters

    for p in d_params, g_params:
        normalization = p['normalization']
        if isinstance(normalization,
                      str) and normalization.lower() in ('b', 'v', 's', 'i',
                                                         'a', 'n'):
            normalization = {
                'b': Norm.BATCH,
                'v': Norm.VIRTUAL_BATCH,
                's': Norm.SPECTRAL,
                'i': Norm.INSTANCE,
                'a': Norm.AFFINE_INSTANCE,
                'n': Norm.NONE
            }[normalization]
        if not isinstance(normalization, Norm):
            try:
                normalization = Norm(normalization)
            except ValueError:
                normalization = string_to_class(normalization)
            finally:
                assert isinstance(normalization, Norm), \
                        "Unknown normalization. Must be a `Norm` or in `('b', 'v', 's', 'i', 'a', 'n')`."
        p['normalization'] = normalization

        p['optimizer'] = string_to_class(p['optimizer'])
        p['activation'] = string_to_class(p['activation'])

    save_models_path_str = '{}/models_{}_{{}}_steps.tar'.format(
        save_dir, exp_name)

    # Generate example batch

    example_noise = torch.randn(batch_size, g_input, 1, 1, device=device)

    # Model helper methods

    @weak_module
    class VirtualBatchNorm2d(nn.Module):
        def __init__(self, num_features, eps=1e-5, affine=True):
            super().__init__()
            self.num_features = num_features
            self.eps = eps
            self.affine = affine
            if self.affine:
                self.weight = nn.Parameter(torch.Tensor(1, num_features, 1, 1))
                self.bias = nn.Parameter(torch.Tensor(1, num_features, 1, 1))
            else:
                self.register_parameter('weight', None)
                self.register_parameter('bias', None)
            self.reset_parameters(True)

        def reset_parameters(self, all=False):
            if self.affine:
                nn.init.uniform_(self.weight)
                nn.init.zeros_(self.bias)
            if all:
                self.in_coef = None
                self.ref_coef = None

        @weak_script_method
        def forward(self, input, ref_batch):
            self._check_input_dim(input)
            if self.in_coef is None:
                self._check_input_dim(ref_batch)
                self.in_coef = 1 / (len(ref_batch) + 1)
                self.ref_coef = 1 - self.in_coef

            mean, std, ref_mean, ref_std = self.calculate_statistics(
                input, ref_batch)
            return self.normalize(input, mean,
                                  std), self.normalize(ref_batch, ref_mean,
                                                       ref_std)

        @weak_script_method
        def calculate_statistics(self, input, ref_batch):
            in_mean, in_sqmean = self.calculate_means(input)
            ref_mean, ref_sqmean = self.calculate_means(ref_batch)

            mean = self.in_coef * in_mean + self.ref_coef * ref_mean
            sqmean = self.in_coef * in_sqmean + self.ref_coef * ref_sqmean

            std = torch.sqrt(sqmean - mean**2 + self.eps)
            ref_std = torch.sqrt(ref_sqmean - ref_mean**2 + self.eps)
            return mean, std, ref_mean, ref_std

        # TODO could be @staticmethod, but check @weak_script_method first
        @weak_script_method
        def calculate_means(self, batch):
            mean = torch.mean(batch, 0, keepdim=True)
            sqmean = torch.mean(batch**2, 0, keepdim=True)
            return mean, sqmean

        @weak_script_method
        def normalize(self, batch, mean, std):
            return ((batch - mean) / std) * self.weight + self.bias

        @weak_script_method
        def _check_input_dim(self, input):
            if input.dim() != 4:
                raise ValueError('expected 4D input (got {}D input)'.format(
                    input.dim()))

    def powers(n, b=2):
        """Yield `n` powers of `b` starting from `b**0`."""
        x = 1
        for i in range(n):
            x_old = x
            x *= b
            yield x_old, x

    def layer_with_norm(layer, norm, features):
        if norm is Norm.BATCH:
            return (layer, nn.BatchNorm2d(features))
        elif norm is Norm.VIRTUAL_BATCH:
            return (layer, VirtualBatchNorm2d(features))
        elif norm is Norm.SPECTRAL:
            return (nn.utils.spectral_norm(layer), )
        elif norm is Norm.INSTANCE:
            return (layer, nn.InstanceNorm2d(features))
        elif norm is Norm.AFFINE_INSTANCE:
            return (layer, nn.InstanceNorm2d(features, affine=True))
        elif norm is Norm.NONE:
            return (layer, )
        else:
            raise ValueError("Unknown normalization `'{}'`".format(norm))

    # Define and initialize generator

    # Generator

    class Generator(nn.Module):
        def __init__(self,
                     normalization,
                     activation,
                     activation_kwargs,
                     img_channels,
                     img_shape,
                     features,
                     g_input,
                     reference_batch=None):
            super().__init__()
            self.layers = self.build_layers(normalization, activation,
                                            activation_kwargs, img_channels,
                                            img_shape, features, g_input)
            if normalization is not Norm.VIRTUAL_BATCH:
                self.reference_batch = None  # we can test for VBN with this invariant
                self.layers = nn.Sequential(*self.layers)
            elif reference_batch is None:
                raise ValueError('Normalization is virtual batch norm, but '
                                 '`reference_batch` is `None` or missing.')
            else:
                self.reference_batch = reference_batch  # never `None`
                self.layers = nn.ModuleList(self.layers)

        @staticmethod
        def build_layers(norm, activation, activation_kwargs, img_channels,
                         img_shape, features, g_input):
            """
            Return a list of the layers for the generator network.

            Example for a 64 x 64 image:
            >>> Generator.build_layers(Norm.BATCH, nn.ReLU, {'inplace': True},
                                       img_channels=3, img_shape=(64, 64), features=64, g_input=128)
            [
                # input size is 128 (given by `g_input`)
                nn.ConvTranspose2d(g_input, features * 8, 4, 1, 0, bias=False),
                nn.BatchNorm2d(features * 8),
                nn.ReLU(True),
                # state size is (features * 8) x 4 x 4
                nn.ConvTranspose2d(features * 8, features * 4, 4, 2, 1, bias=False),
                nn.BatchNorm2d(features * 4),
                nn.ReLU(True),
                # state size is (features * 4) x 8 x 8
                nn.ConvTranspose2d(features * 4, features * 2, 4, 2, 1, bias=False),
                nn.BatchNorm2d(features * 2),
                nn.ReLU(True),
                # state size is (features * 2) x 16 x 16
                nn.ConvTranspose2d(features * 2, features, 4, 2, 1, bias=False),
                nn.BatchNorm2d(features),
                nn.ReLU(True),
                # state size is (features) x 32 x 32
                nn.ConvTranspose2d(features, img_channels, 4, 2, 1, bias=False),
                nn.Tanh()
                # output size is 3 x 64 x 64 (given by `img_channels` and `img_shape`)
            ]
            """
            j = 2**(int(np.log2(img_shape[0])) - 3)
            # input size is (g_input)
            layers = [
                *layer_with_norm(
                    nn.ConvTranspose2d(
                        g_input, features * j, 4, 1, 0, bias=False), norm,
                    features * j),
                activation(**activation_kwargs)
            ]
            # state size is (features * 2^n) x 4 x 4
            # each further layer halves feature size and doubles image size
            while j > 1:
                i = j
                j //= 2
                layers.extend((*layer_with_norm(
                    nn.ConvTranspose2d(
                        features * i, features * j, 4, 2, 1, bias=False), norm,
                    features * j), activation(**activation_kwargs)))
            # state size is (features) x (img_shape[0] / 2) x (img_shape[1] / 2)
            layers.extend((nn.ConvTranspose2d(features,
                                              img_channels,
                                              4,
                                              2,
                                              1,
                                              bias=False), nn.Tanh()))
            # output size is (img_channels) x (img_shape[0]) x (img_shape[1])
            return layers

        @weak_script_method
        def forward(self, input):
            # Separation is for performance reasons
            if self.reference_batch is None:
                return self.layers(input)
            else:
                # VBN
                ref_batch = self.reference_batch
                for layer in self.layers:
                    if not isinstance(layer, VirtualBatchNorm2d):
                        input = layer(input)
                        ref_batch = layer(ref_batch)
                    else:
                        input, ref_batch = layer(input, ref_batch)
                return input

    # Initialization

    def init_weights(module):
        if isinstance(module, ConvBase):
            nn.init.normal_(module.weight.data, 0.0, 0.02)
        elif isinstance(module, BatchNormBase):
            nn.init.normal_(module.weight.data, 1.0, 0.02)
            nn.init.constant_(module.bias.data, 0)

    g_net = Generator(g_params['normalization'], g_params['activation'],
                      g_params['activation_kwargs'], img_channels, img_shape,
                      g_params['features'], g_input,
                      example_noise.to(device,
                                       float_dtype)).to(device, float_dtype)

    # Load models' checkpoints

    if models_cp is not None:
        g_net.load_state_dict(models_cp['g_net_state_dict'])

    if multiple_gpus:
        g_net = nn.DataParallel(g_net, list(range(num_gpus)))

    if models_cp is None:
        g_net.apply(init_weights)

    real_label = 1
    fake_label = 0

    # Load optimizers' checkpoints

    if models_cp is not None:
        if not load_weights_only:
            try:
                g_optim_state_dict = models_cp['g_optim_state_dict']
            except KeyError:
                print(
                    "One of the optimizers' state dicts was not found; probably because "
                    "only the models' weights were saved. Set `load_weights_only` to `True`."
                )
            g_optimizer.load_state_dict(g_optim_state_dict)
            g_net.train()
        else:
            g_net.eval()

    def generate_fakes(batch_size, start_count, g_input, g_net, device,
                       float_dtype, zfill_len, save_dir):
        i = start_count
        noise = torch.randn(batch_size, g_input, 1, 1,
                            device=device).to(device, float_dtype)
        with torch.no_grad():
            fakes = g_net(noise).detach().cpu()

        for f in fakes:
            tvutils.save_image(
                f, Path('{}/{}.png'.format(save_dir,
                                           str(i).zfill(zfill_len))))
            i += 1

    # Save images generated on noise.
    zfill_len = len(str(num_imgs))

    num_full_gens = num_imgs // batch_size
    for i in map(lambda x: x * batch_size, range(num_full_gens)):
        generate_fakes(batch_size, i, g_input, g_net, device, float_dtype,
                       zfill_len, save_dir)
    generate_fakes(num_imgs - num_full_gens * batch_size,
                   num_full_gens * batch_size, g_input, g_net, device,
                   float_dtype, zfill_len, save_dir)
Exemplo n.º 11
0
 def setUp(self):
     self.prev_type = torch.get_default_dtype()
     torch.set_default_dtype(torch.float64)
Exemplo n.º 12
0
 def __exit__(self, *args):
     """Restor old dtype."""
     torch.set_default_dtype(self.old_dtype)
Exemplo n.º 13
0
 def __enter__(self):
     """Set new dtype."""
     self.old_dtype = torch.get_default_dtype()
     torch.set_default_dtype(self.new_dtype)
import numpy as np
import math
import torch as tf
import torch.utils.data
import torch.nn as nn
import torch.optim as optim
import os
import gc
import time
import sys
from ctypes import *
from class_and_function import *
from torch.utils.cpp_extension import load

default_dtype = tf.float32
tf.set_default_dtype(default_dtype)
tf.set_printoptions(precision=10)
device = tf.device('cuda' if torch.cuda.is_available() else 'cpu')
#device = tf.device('cpu')
#torch.set_printoptions(edgeitems=9999)

if (device != tf.device('cpu')):
    print("cuDNN version: ", tf.backends.cudnn.version())
    # tf.backends.cudnn.enabled = False
    #tf.backends.cudnn.benchmark = True
    MULTIPLIER = tf.cuda.device_count()
else:
    MULTIPLIER = 1

MULTIPLIER = 1
#if (hvd.rank() == 0):
Exemplo n.º 15
0
 def test_dtype_inference(self):
     # issue: https://github.com/pytorch/pytorch/issues/36834
     torch.set_default_dtype(torch.double)
     x = torch.tensor([3., 3. + 5.j])
     self.assertEqual(x.dtype, torch.cdouble)
def main(args):
    """
    :return:
    """

    warnings.filterwarnings('ignore')
    torch.set_default_dtype(torch.float64)

    extra = 'single'
    extra_dir = ''

    filename = args.dataname + '_' + args.model_name + '_' + extra
    task_type = 'classification'
    run_number = 1
    batches = [0,]
    epochs = 250
    n_properties = properties_map[args.dataname]

    with open('results/{}/{}/summary/{}_ensemble.txt'.format(args.dataname, task_type, filename), 'a') as f:
        r2_scores_list = []
        mlls_list = []
        rmses_list = []
        f1_scores_list = []
        roc_aucs_list = []
        roc_aucs_binary_list = []

        metric = 'rmse'
        percentiles = np.arange(100, 4, -5)
        dir_name = os.path.dirname(f.name)
        fig_pts = '{}/{}_{}_ensemble_confidence_curve.png'.format(dir_name, filename, metric)

        metric_model_mns = []
        metric_oracle_mns = []

        for batch in batches:
            metric_models = []
            metric_oracles = []
            f1_scores = []
            r2_scores = []
            roc_aucs = []
            roc_aucs_binary = []
            rmses = []
            mlls = []

            fprs = []
            tprs = []

            for p in range(n_properties):
                mns = []
                targets = []
                for i in range(run_number):
                    filestart = '{}{}_{}_{}_{}_'.format(args.dataname, args.num, args.model_name, (batch*run_number+i), p)
                    mn = np.load('results/{}/{}/{}/{}{}mean.npy'.format(args.dataname, task_type, args.model_name, extra_dir, filestart))
                    target = np.load('results/{}/{}/{}/{}{}target.npy'.format(args.dataname, task_type, args.model_name, extra_dir, filestart))
                    mns.append(mn)
                    targets.append(target)

                # Ensemble mean, var, target
                mean = np.mean(np.array(mns), axis=0)
                target = np.mean(np.array(targets), axis=0)

                roc_aucs.append(roc_auc_score(target, mean))
                r2_scores.append(r2_score(target, mean))
                rmses.append(np.sqrt(mean_squared_error(target, mean)))

            roc_aucs_list.append(np.mean(np.array(roc_aucs)))
            r2_scores_list.append(np.mean(np.array(r2_scores)))
            rmses_list.append(np.mean(np.array(rmses)))

        r2_scores_list = np.array(r2_scores_list)
        rmses_list = np.array(rmses_list)
        roc_aucs_list = np.array(roc_aucs_list)

        f.write('\n R^2 score: {:.4f}+- {:.4f}'.format(np.mean(r2_scores_list), np.std(r2_scores_list)))
        f.write('\n RMSE: {:.4f}+- {:.4f} \n'.format(np.mean(rmses_list), np.std(rmses_list)))
        f.write('\n ROC-AUC: {:.4f}+- {:.4f} \n'.format(np.mean(roc_aucs_list), np.std(roc_aucs_list)))

        f.flush()
Exemplo n.º 17
0
 def _setup(self):
     """
     Run any global setups for the pytorch lib.
     """
     torch.set_default_dtype(self.dtypemap["float"])
import math
import numpy as np
import matplotlib.pyplot as plt
import torch as tc
from torch.distributions import StudentT

# for the user to decide device number
device_number = 0
if tc.cuda.is_available():
    tc.cuda.set_device(device_number)

tc.set_default_dtype(tc.float64)

# wrapper: ch, chfield and basechange


class X:
    def __init__(self, x):
        self.x = x
        self.reidx = tc.ones_like(x)
        self.ncat = self.reidx.max().int().item()
        self.sums = self.x.sum()  # tc.bincount(self.reidx, self.x)[1:]


class P:
    def __init__(self, mutations: X, t_mh_nr_iter=100, taua=None, taub=None):
        self.t_mh_nr_iter = t_mh_nr_iter
        self.mutau_ihsf = 0
        self.taua = tc.ones(mutations.ncat + 1, 1) if taua is None else taua
        self.taub = tc.full([mutations.ncat, 1], 10.) if taub is None else taub
Exemplo n.º 19
0
 def setUp(self):
     super(TestTypePromotion, self).setUp()
     torch.set_default_dtype(torch.float32)
     self.device = 'cpu'
def train(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if int(args.double_precision):
        torch.set_default_dtype(torch.float64)
    if int(args.cuda) >= 0:
        torch.cuda.manual_seed(args.seed)
    args.device = 'cuda:' + str(args.cuda) if int(args.cuda) >= 0 else 'cpu'
    args.patience = args.epochs if not args.patience else  int(args.patience)
    logging.getLogger().setLevel(logging.INFO)
    if args.save:
        if not args.save_dir:
            dt = datetime.datetime.now()
            date = f"{dt.year}_{dt.month}_{dt.day}"
            models_dir = os.path.join(os.environ['LOG_DIR'], args.task, date)
            save_dir = get_dir_name(models_dir)
        else:
            save_dir = args.save_dir
        logging.basicConfig(level=logging.INFO,
                            handlers=[
                                logging.FileHandler(os.path.join(save_dir, 'log.txt')),
                                logging.StreamHandler()
                            ])

    logging.info(f'Using: {args.device}')
    logging.info("Using seed {}.".format(args.seed))

    reserve_mark = 0

    if args.task == 'nc':
        reserve_mark = 0
    else:
        args.task = 'nc'
        reserve_mark = 1
    # Load data
    data = load_data(args, os.path.join('data/', args.dataset))
    args.n_nodes, args.feat_dim = data['features'].shape
    if args.task == 'nc':
        Model = ADVNCModel
        args.n_classes = int(data['labels'].max() + 1)
        logging.info(f'Num classes: {args.n_classes}')
    else:
        args.nb_false_edges = len(data['train_edges_false'])
        args.nb_edges = len(data['train_edges'])
        if args.task == 'lp':
            Model = ADVLPModel
        else:
            Model = RECModel
            # No validation for reconstruction task
            args.eval_freq = args.epochs + 1

    #transfer loading
    if reserve_mark == 1:
        args.task = 'lp'
        # reset reserve mark
        reserve_mark = 0

    if args.task == 'lp':
        reserve_mark = 0
    else:
        args.task = 'lp'
        reserve_mark = 1

    data1 = load_data(args, os.path.join('data/', args.dataset))
    args.n_nodes, args.feat_dim = data1['features'].shape
    if args.task == 'nc':
        Model = ADVNCModel
        args.n_classes = int(data1['labels'].max() + 1)
        logging.info(f'Num classes: {args.n_classes}')
    else:
        print('*****')
        args.nb_false_edges = len(data1['train_edges_false'])
        args.nb_edges = len(data1['train_edges'])
        if args.task == 'lp':
            Model = ADVLPModel
        else:
            Model = RECModel
            # No validation for reconstruction task
            args.eval_freq = args.epochs + 1

    if reserve_mark == 1:
        args.task = 'nc'

    if args.task == 'nc':
        Model = ADVNCModel
    else:
        Model = ADVLPModel






    if not args.lr_reduce_freq:
        args.lr_reduce_freq = args.epochs

    # Model and optimizer
    model = Model(args)
    logging.info(str(model))
    optimizer = getattr(optimizers, args.optimizer)(params=model.parameters(), lr=args.lr,
                                                    weight_decay=args.weight_decay)
    optimizer_en = getattr(optimizers, args.optimizer)(params=model.encoder.parameters(), lr=args.lr,
                                                    weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer,
        step_size=int(args.lr_reduce_freq),
        gamma=float(args.gamma)
    )
    lr_scheduler_en = torch.optim.lr_scheduler.StepLR(
        optimizer_en,
        step_size=int(args.lr_reduce_freq),
        gamma=float(args.gamma)
    )
    tot_params = sum([np.prod(p.size()) for p in model.parameters()])
    logging.info(f"Total number of parameters: {tot_params}")
    if args.cuda is not None and int(args.cuda) >= 0 :
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
        model = model.to(args.device)
        for x, val in data.items():
            if torch.is_tensor(data[x]):
                data[x] = data[x].to(args.device)
        for x, val in data1.items():
            if torch.is_tensor(data1[x]):
                data1[x] = data1[x].to(args.device)
    # Train model
    t_total = time.time()
    counter = 0
    best_val_metrics = model.init_metric_dict()
    best_test_metrics = None
    best_emb = None
    for epoch in range(args.epochs):
        t = time.time()
        model.train()
        # if epoch%3==0:

        # model.save_net()
        # model.save_emb()
        # lr_scheduler.step()

    # if epoch%3==1:
    #     if epoch > 100:
        optimizer.zero_grad()
        # model.load_emb()
        embeddings1 = model.encode(data1['features'], data1['adj_train_norm'])
        train_metrics1 = model.compute_metrics1(embeddings1, data1, 'train')
        loss1 = train_metrics1['loss']
        loss1.backward()
        if args.grad_clip is not None:
            max_norm = float(args.grad_clip)
            all_params = list(model.parameters())
            for param in all_params:
                torch.nn.utils.clip_grad_norm_(param, max_norm)
        optimizer.step()
        # model.load_net()
    #     # lr_scheduler.step()

        optimizer.zero_grad()
        embeddings = model.encode(data['features'], data['adj_train_norm'])
        train_metrics = model.compute_metrics(embeddings, data, 'train')
        loss = -(train_metrics['loss'] - train_metrics['loss_shuffle'])
        loss.backward()
        optimizer.step()
    #
    # # if epoch%3==2:
        optimizer.zero_grad()
        embeddings2 = model.encode(data['features'], data['adj_train_norm']).detach_()
        train_metrics2 = model.compute_metrics(embeddings2, data, 'train')
        loss2 = (train_metrics2['loss'] - train_metrics2['loss_shuffle'])
        loss2.backward()
        optimizer.step()
        lr_scheduler.step()
        # if epoch<100:
        #     train_metrics2 = train_metrics
        if (epoch + 1) % args.log_freq == 0:
            logging.info(" ".join(['Epoch: {:04d}'.format(epoch + 1),
                                   'lr: {}'.format(lr_scheduler.get_lr()[0]),
                                   format_metrics(train_metrics1, 'train'),
                                   format_metrics(train_metrics2, 'train'),
                                   'time: {:.4f}s'.format(time.time() - t)
                                   ]))
            if not best_val_metrics == None:
                logging.info(" ".join(["Val set results:", format_metrics(best_val_metrics, 'val')]))
                logging.info(" ".join(["Test set results:", format_metrics(best_test_metrics, 'test')]))
        if (epoch + 1) % args.eval_freq == 0:
            model.eval()
            embeddings = model.encode(data['features'], data['adj_train_norm'])
            val_metrics = model.compute_metrics(embeddings, data, 'val')
            embeddings1 = model.encode(data1['features'], data1['adj_train_norm'])
            val_metrics1 = model.compute_metrics1(embeddings1, data1, 'val')
            if (epoch + 1) % args.log_freq == 0:
                logging.info(" ".join(['Epoch: {:04d}'.format(epoch + 1), format_metrics(val_metrics, 'val'), format_metrics(val_metrics1, 'val')]))

            embeddings = model.encode(data['features'], data['adj_train_norm'])
            test_metrics = model.compute_metrics(embeddings, data, 'test')
            embeddings1 = model.encode(data1['features'], data1['adj_train_norm'])
            test_metrics1 = model.compute_metrics1(embeddings1, data1, 'test')
            if (epoch + 1) % args.log_freq == 0:
                logging.info(" ".join(['Epoch: {:04d}'.format(epoch + 1), format_metrics(test_metrics, 'test'),
                                       format_metrics(test_metrics1, 'test')]))
            if model.has_improved(best_val_metrics, val_metrics1):
                best_test_metrics = model.compute_metrics1(embeddings1, data1, 'test')
                best_emb = embeddings1.cpu()
                if args.save:
                    np.save(os.path.join(save_dir, 'embeddings.npy'), best_emb.detach().numpy())
                best_val_metrics = val_metrics1
                counter = 0
            # else:
            #     counter += 1
            #     if counter == args.patience and epoch > args.min_epochs:
            #         logging.info("Early stopping")
            #         break

    logging.info("Optimization Finished!")
    logging.info("Total time elapsed: {:.4f}s".format(time.time() - t_total))
    if not best_test_metrics:
        model.eval()
        best_emb = model.encode(data['features'], data['adj_train_norm'])
        best_test_metrics = model.compute_metrics(best_emb, data, 'test')
    logging.info(" ".join(["Val set results:", format_metrics(best_val_metrics, 'val')]))
    logging.info(" ".join(["Test set results:", format_metrics(best_test_metrics, 'test')]))
    if args.save:
        np.save(os.path.join(save_dir, 'embeddings.npy'), best_emb.cpu().detach().numpy())
        if hasattr(model.encoder, 'att_adj'):
            filename = os.path.join(save_dir, args.dataset + '_att_adj.p')
            pickle.dump(model.encoder.att_adj.cpu().to_dense(), open(filename, 'wb'))
            print('Dumped attention adj: ' + filename)

        json.dump(vars(args), open(os.path.join(save_dir, 'config.json'), 'w'))
        torch.save(model.state_dict(), os.path.join(save_dir, 'model.pth'))
        logging.info(f"Saved model in {save_dir}")
Exemplo n.º 21
0
 def setUp(self):
     super(TestTypePromotionDefaultDouble, self).setUp()
     torch.set_default_dtype(torch.double)
Exemplo n.º 22
0
    def train(self,
              environment,
              model_type='DQN',
              writer=None,
              total_episodes=100,
              pretrain=100,
              batch_size=64,
              frame_skip=4,
              lr=1e-4,
              explore_start=1.0,
              explore_stop=0.01,
              decay_rate=0.001,
              gamma=0.99,
              freq=5):

        print('...Using device...', self.device)
        torch.set_default_dtype(torch.float)

        n_actions = len(self.actions)

        if model_type == 'DQN':
            self.model = DQN(stack_size=self.stack_size,
                             n_actions=n_actions).to(self.device)
            self.target_model = DQN(stack_size=self.stack_size,
                                    n_actions=n_actions).to(self.device)
        else:
            self.model = DDDQN(stack_size=self.stack_size,
                               n_actions=n_actions).to(self.device)
            self.target_model = DDDQN(stack_size=self.stack_size,
                                      n_actions=n_actions).to(self.device)

        self.target_model.eval()

        print('...Filling replay memory...')
        self._pretrain(environment, iter_num=pretrain)

        print('...Training loop...')
        max_tau = 150
        global_i = 0

        # act_to_ind = {tuple(action): i for i, action in enumerate(self.actions)}

        optimizer = torch.optim.RMSprop(self.model.parameters(), lr=lr)
        criterion = nn.MSELoss()
        kill_count_ma = deque(maxlen=10)

        for episode in range(total_episodes):
            tau = 0
            decay_step = 0
            episode_rewards = []
            episode_loss = 0
            episode_len = 1

            environment.game.new_episode()
            obs_cur, obs_prev = environment.init_observations()

            state = environment.get_state()
            state, stacked_frames = stack_frames(None, state, True,
                                                 self.stack_size,
                                                 self.resolution)

            is_finished = environment.is_episode_finished()

            while not is_finished:
                tau += 1
                decay_step += 1

                action, explore_probability = predict_action(
                    explore_start, explore_stop, decay_rate, decay_step, state,
                    self.model, self.device, self.actions)
                reward = environment.make_action(action, frame_skip)
                # print(reward)

                obs_cur = environment.get_observation_cur()
                reward = self._reshape_reward(reward, obs_cur, obs_prev)
                # print(reward)
                obs_prev = obs_cur.copy()

                is_finished = environment.is_episode_finished()

                episode_rewards.append(reward)

                if is_finished:
                    next_state = np.zeros((480, 640, 3), dtype='uint8')
                    next_state, stacked_frames = stack_frames(
                        stacked_frames, next_state, False, self.stack_size,
                        self.resolution)
                    episode_reward = np.sum(episode_rewards)
                    kill_count = obs_cur['kills']
                    kill_count_ma.append(kill_count)
                    print(
                        "Episode: %d, Total reward: %.2f, Kill count: %.1f, Train loss: %.4f, Explore p: %.4f"
                        % (episode, episode_reward, np.mean(kill_count_ma),
                           loss, explore_probability))
                    self.append_sample(state, action, next_state, reward,
                                       is_finished)
                    # self.memory.push(state, action, next_state, reward, is_finished)

                else:
                    next_state = environment.get_state()
                    next_state, stacked_frames = stack_frames(
                        stacked_frames, next_state, False, self.stack_size,
                        self.resolution)
                    self.append_sample(state, action, next_state, reward,
                                       is_finished)
                    # self.memory.push(state, action, next_state, reward, is_finished)
                    state = next_state
                    episode_len += 1

                self.model.train()

                transitions, idxs, is_weights = self.memory.sample(batch_size)

                batch = Transition(*zip(*transitions))
                states_mb = torch.cat(batch.state).to(self.device)
                actions_mb = torch.cat(batch.action).to(self.device)
                rewards_mb = torch.cat(batch.reward).to(self.device)
                next_states_mb = torch.cat(batch.next_state).to(self.device)
                dones_mb = torch.cat(batch.is_finished).to(self.device)

                q_next_state = self.model.forward(next_states_mb)
                q_target_next_state = self.target_model(next_states_mb)
                q_state = self.model.forward(states_mb)

                targets_mb = rewards_mb + (
                    gamma *
                    (1 - dones_mb) * torch.max(q_target_next_state, 1)[0])
                # actions_ids = [self.actions.index(action.int().tolist()[0]) for action in batch.action]
                # q_values_for_actions = q_state.contiguous()[np.arange(q_state.shape[0]), actions_ids]

                output = (q_state * actions_mb).sum(1)
                errors = torch.abs(output - targets_mb).cpu().data.numpy()

                # update priority
                for i in range(batch_size):
                    idx = idxs[i]
                    self.memory.update(idx, errors[i])

                optimizer.zero_grad()

                # loss = criterion(q_values_for_actions, targets_mb)
                loss = (torch.tensor(
                    is_weights, dtype=torch.float, device=self.device) *
                        criterion(output, targets_mb.detach())).mean()
                # loss = criterion(output, targets_mb.detach())
                loss.backward()

                for p in self.model.parameters():
                    p.grad.data.clamp_(-1, 1)

                optimizer.step()

                episode_loss += loss.item()

                # dump train metrics to tensorboard
                if writer is not None:
                    writer.add_scalar("loss/train", loss.item(), global_i)
                    writer.add_scalar("reward/train",
                                      torch.mean(rewards_mb).item(), global_i)

                if tau > max_tau:
                    self.target_model.load_state_dict(self.model.state_dict())
                    print('Target model updated')
                    tau = 0

                global_i += 1

            # dump episode metrics to tensorboard
            if writer is not None:
                writer.add_scalar("loss_episode/train",
                                  episode_loss / episode_len, episode)
                writer.add_scalar("reward_episode/train", episode_reward,
                                  episode)
                writer.add_scalar("kill_count/train", kill_count, episode)

            if (episode % freq) == 0:
                model_file = 'models/' + self.scenario + '/' + model_type + '_' + str(
                    episode) + '.pth'
                torch.save(self.model.state_dict(), model_file)
                print('\nSaved model to ' + model_file)
Exemplo n.º 23
0
def evaluate(**kwargs):
    torch.set_default_dtype(torch.float32)

    conf = ConfigFactory.parse_file(kwargs['conf'])
    exps_folder_name = kwargs['exps_folder_name']
    evals_folder_name = kwargs['evals_folder_name']
    eval_cameras = kwargs['eval_cameras']
    eval_rendering = kwargs['eval_rendering']

    expname = conf.get_string('train.expname') + kwargs['expname']
    scan_id = kwargs['scan_id'] if kwargs['scan_id'] != -1 else conf.get_int(
        'dataset.scan_id', default=-1)
    if scan_id != -1:
        expname = expname + '_{0}'.format(scan_id)

    if kwargs['timestamp'] == 'latest':
        if os.path.exists(
                os.path.join('../', kwargs['exps_folder_name'], expname)):
            timestamps = os.listdir(
                os.path.join('../', kwargs['exps_folder_name'], expname))
            if (len(timestamps)) == 0:
                print('WRONG EXP FOLDER')
                exit()
            else:
                timestamp = sorted(timestamps)[-1]
        else:
            print('WRONG EXP FOLDER')
            exit()
    else:
        timestamp = kwargs['timestamp']

    utils.mkdir_ifnotexists(os.path.join('../', evals_folder_name))
    expdir = os.path.join('../', exps_folder_name, expname)
    evaldir = os.path.join('../', evals_folder_name, expname)
    utils.mkdir_ifnotexists(evaldir)

    model = utils.get_class(
        conf.get_string('train.model_class'))(conf=conf.get_config('model'))
    if torch.cuda.is_available():
        model.cuda()

    dataset_conf = conf.get_config('dataset')
    if kwargs['scan_id'] != -1:
        dataset_conf['scan_id'] = kwargs['scan_id']
    eval_dataset = utils.get_class(conf.get_string('train.dataset_class'))(
        eval_cameras, **dataset_conf)

    # settings for camera optimization
    scale_mat = eval_dataset.get_scale_mat()
    if eval_cameras:
        num_images = len(eval_dataset)
        pose_vecs = torch.nn.Embedding(num_images, 7, sparse=True).cuda()
        pose_vecs.weight.data.copy_(eval_dataset.get_pose_init())

        gt_pose = eval_dataset.get_gt_pose()

    if eval_rendering:
        eval_dataloader = torch.utils.data.DataLoader(
            eval_dataset,
            batch_size=1,
            shuffle=False,
            collate_fn=eval_dataset.collate_fn)
        total_pixels = eval_dataset.total_pixels
        img_res = eval_dataset.img_res

    old_checkpnts_dir = os.path.join(expdir, timestamp, 'checkpoints')

    saved_model_state = torch.load(
        os.path.join(old_checkpnts_dir, 'ModelParameters',
                     str(kwargs['checkpoint']) + ".pth"))
    model.load_state_dict(saved_model_state["model_state_dict"])
    epoch = saved_model_state['epoch']

    if eval_cameras:
        data = torch.load(
            os.path.join(old_checkpnts_dir, 'CamParameters',
                         str(kwargs['checkpoint']) + ".pth"))
        pose_vecs.load_state_dict(data["pose_vecs_state_dict"])

    ####################################################################################################################
    print("evaluating...")

    model.eval()
    if eval_cameras:
        pose_vecs.eval()

    with torch.no_grad():
        if eval_cameras:
            gt_Rs = gt_pose[:, :3, :3].double()
            gt_ts = gt_pose[:, :3, 3].double()

            pred_Rs = rend_util.quat_to_rot(
                pose_vecs.weight.data[:, :4]).cpu().double()
            pred_ts = pose_vecs.weight.data[:, 4:].cpu().double()

            R_opt, t_opt, c_opt, R_fixed, t_fixed = get_cameras_accuracy(
                pred_Rs, gt_Rs, pred_ts, gt_ts)

            cams_transformation = np.eye(4, dtype=np.double)
            cams_transformation[:3, :3] = c_opt * R_opt
            cams_transformation[:3, 3] = t_opt

        mesh = plt.get_surface_high_res_mesh(
            sdf=lambda x: model.implicit_network(x)[:, 0],
            resolution=kwargs['resolution'])

        # Transform to world coordinates
        if eval_cameras:
            mesh.apply_transform(cams_transformation)
        else:
            mesh.apply_transform(scale_mat)

        # Taking the biggest connected component
        components = mesh.split(only_watertight=False)
        areas = np.array([c.area for c in components], dtype=np.float)
        mesh_clean = components[areas.argmax()]
        mesh_clean.export(
            '{0}/surface_world_coordinates_{1}.ply'.format(evaldir, epoch),
            'ply')

    if eval_rendering:
        images_dir = '{0}/rendering'.format(evaldir)
        utils.mkdir_ifnotexists(images_dir)

        psnrs = []
        for data_index, (indices, model_input,
                         ground_truth) in enumerate(eval_dataloader):
            model_input["intrinsics"] = model_input["intrinsics"].cuda()
            model_input["uv"] = model_input["uv"].cuda()
            model_input["object_mask"] = model_input["object_mask"].cuda()

            if eval_cameras:
                pose_input = pose_vecs(indices.cuda())
                model_input['pose'] = pose_input
            else:
                model_input['pose'] = model_input['pose'].cuda()

            split = utils.split_input(model_input, total_pixels)
            res = []
            for s in split:
                out = model(s)
                res.append({
                    'rgb_values': out['rgb_values'].detach(),
                })

            batch_size = ground_truth['rgb'].shape[0]
            model_outputs = utils.merge_output(res, total_pixels, batch_size)
            rgb_eval = model_outputs['rgb_values']
            rgb_eval = rgb_eval.reshape(batch_size, total_pixels, 3)

            rgb_eval = (rgb_eval + 1.) / 2.
            rgb_eval = plt.lin2img(rgb_eval, img_res).detach().cpu().numpy()[0]
            rgb_eval = rgb_eval.transpose(1, 2, 0)
            img = Image.fromarray((rgb_eval * 255).astype(np.uint8))
            img.save('{0}/eval_{1}.png'.format(images_dir,
                                               '%03d' % indices[0]))

            rgb_gt = ground_truth['rgb']
            rgb_gt = (rgb_gt + 1.) / 2.
            rgb_gt = plt.lin2img(rgb_gt, img_res).numpy()[0]
            rgb_gt = rgb_gt.transpose(1, 2, 0)

            mask = model_input['object_mask']
            mask = plt.lin2img(mask.unsqueeze(-1), img_res).cpu().numpy()[0]
            mask = mask.transpose(1, 2, 0)

            rgb_eval_masked = rgb_eval * mask
            rgb_gt_masked = rgb_gt * mask

            psnr = calculate_psnr(rgb_eval_masked, rgb_gt_masked, mask)
            psnrs.append(psnr)

        psnrs = np.array(psnrs).astype(np.float64)
        print("RENDERING EVALUATION {2}: psnr mean = {0} ; psnr std = {1}".
              format("%.2f" % psnrs.mean(), "%.2f" % psnrs.std(), scan_id))
Exemplo n.º 24
0
import numpy as np
import torch
import os
import matplotlib.pyplot as plt
#from torchdiffeq import odeint
from torchdiffeq import odeint_adjoint as odeint
from linear_memory.linear_memory import LinearMemory
import linear_memory.utils as ut
from import_utils import add_path

torch.set_default_dtype(torch.float64)

add_path('pyssa')
import pyssa.ssa as ssa
import pyssa.models.standard_models as sm

add_path('pymbvi')
from pymbvi.models.mjp.autograd_partition_specific_models import SimpleGeneExpression
from pymbvi.util import num_derivative, autograd_jacobian

# torch.manual_seed(2007301620)

load_path = os.path.dirname(
    os.path.realpath(__file__)) + '/data/learn_linear_ode_train.pt'
data = torch.load(load_path)
model_dict = data['model_state_dict']
loss_history = data['loss_history']
print(len(loss_history))

# get simulation model
pre, post, rates = sm.get_standard_model("simple_gene_expression")
Exemplo n.º 25
0
def process__workers(gpu_id, root_cwd, q_aggr, q_dist, args, **_kwargs):
    class_agent = args.class_agent
    env_name = args.env_name
    cwd = args.cwd
    net_dim = args.net_dim
    max_step = args.max_step
    # max_memo = args.max_memo
    max_epoch = args.max_epoch
    batch_size = args.batch_size * 1.5
    gamma = args.gamma
    update_gap = args.update_gap
    reward_scale = args.reward_scale

    cwd = '{}/{}_{}'.format(root_cwd, cwd, gpu_id)
    os.makedirs(cwd, exist_ok=True)
    os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
    random_seed = 42 + gpu_id
    np.random.seed(random_seed)
    torch.manual_seed(random_seed)
    torch.set_default_dtype(torch.float32)
    torch.set_num_threads(8)

    env = gym.make(env_name)
    is_solved = False

    class BufferArrayMP(BufferArray):
        def init_before_sample(self):
            q_aggr.put((self.memories, is_solved))
            # self.now_len = self.max_len if self.is_full else self.next_idx

        def random_sample(self, _batch_size, device=None):
            batch_arrays = q_dist.get()
            '''convert array into torch.tensor'''
            tensors = [
                torch.tensor(ary, device=device) for ary in batch_arrays
            ]
            return tensors

    '''init'''
    state_dim, action_dim, max_action, target_reward, is_discrete = get_env_info(
        env, is_print=True)
    agent = class_agent(env, state_dim, action_dim, net_dim)  # training agent
    buffer = BufferArrayMP(max_step, state_dim,
                           action_dim)  # experiment replay buffer
    recorder = Recorder(agent, max_step, max_action, target_reward, env_name,
                        **_kwargs)
    '''loop'''
    # with torch.no_grad():  # update replay buffer
    #     # rewards, steps = agent.update_buffer(
    #     #     env, buffer, max_step, max_action, reward_scale, gamma)
    #     rewards, steps = initial_exploration(
    #         env, buffer, max_step, max_action, reward_scale, gamma, action_dim)
    # recorder.show_reward(rewards, steps, 0, 0)
    try:
        for epoch in range(max_epoch):
            '''update replay buffer by interact with environment'''
            with torch.no_grad():  # for saving the GPU buffer
                rewards, steps = agent.update_buffer(env, buffer, max_step,
                                                     max_action, reward_scale,
                                                     gamma)
            '''update network parameters by random sampling buffer for stochastic gradient descent'''
            loss_a, loss_c = agent.update_parameters(buffer, max_step,
                                                     batch_size, update_gap)
            '''show/check the reward, save the max reward actor'''
            with torch.no_grad():  # for saving the GPU buffer
                '''NOTICE! Recorder saves the agent with max reward automatically. '''
                recorder.show_reward(rewards, steps, loss_a, loss_c)

                is_solved = recorder.check_reward(cwd, loss_a, loss_c)
            if is_solved:
                break
    except KeyboardInterrupt:
        print("raise KeyboardInterrupt while training.")
    # except AssertionError:  # for BipedWalker BUG 2020-03-03
    #     print("AssertionError: OpenAI gym r.LengthSquared() > 0.0f ??? Please run again.")
    #     return False

    train_time = recorder.print_and_save_npy(env_name, cwd)

    # agent.save_or_load_model(cwd, is_save=True)  # save max reward agent in Recorder
    # buffer.save_or_load_memo(cwd, is_save=True)

    draw_plot_with_npy(cwd, train_time)
    return True
Exemplo n.º 26
0
def train_vae(args, dtype=torch.float32):
    torch.set_default_dtype(dtype)
    state_dim = args.state_dim
    output_path = args.output_path
    # generate state pairs
    expert_traj_raw = list(pickle.load(open(args.expert_traj_path, "rb")))
    state_pairs = generate_pairs(expert_traj_raw,
                                 state_dim,
                                 args.size_per_traj,
                                 max_step=10,
                                 min_step=5)  # tune the step size if needed.
    # shuffle and split
    idx = np.arange(state_pairs.shape[0])
    np.random.shuffle(idx)
    state_pairs = state_pairs[idx, :]
    split = (state_pairs.shape[0] * 19) // 20
    state_tuples = state_pairs[:split, :]
    test_state_tuples = state_pairs[split:, :]
    print(state_tuples.shape)
    print(test_state_tuples.shape)

    goal_model = VAE(state_dim, latent_dim=128)
    optimizer_vae = torch.optim.Adam(goal_model.parameters(), lr=args.model_lr)
    save_path = '{}_softbc_{}_{}'.format(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"), args.env_name, \
                                                  args.beta)
    writer = SummaryWriter(log_dir=os.path.join(output_path, 'runs/' +
                                                save_path))

    if args.weight:
        state_dim = state_dim + 1

        state_tuples = torch.from_numpy(state_pairs).to(dtype)
        s, t = state_tuples[:, :state_dim - 1], state_tuples[:, state_dim:2 *
                                                             state_dim]

        state_tuples_test = torch.from_numpy(test_state_tuples).to(dtype)
        s_test, t_test = state_tuples_test[:, :state_dim -
                                           1], state_tuples_test[:,
                                                                 state_dim:2 *
                                                                 state_dim]
    else:
        state_tuples = torch.from_numpy(state_pairs).to(dtype)
        s, t = state_tuples[:, :state_dim], state_tuples[:, state_dim:2 *
                                                         state_dim]

        state_tuples_test = torch.from_numpy(test_state_tuples).to(dtype)
        s_test, t_test = state_tuples_test[:, :
                                           state_dim], state_tuples_test[:,
                                                                         state_dim:
                                                                         2 *
                                                                         state_dim]

    for i in range(1, args.iter + 1):
        loss = goal_model.train(s, t, epoch=args.epoch, optimizer=optimizer_vae, \
                                        batch_size=args.optim_batch_size, beta=args.beta, use_weight=args.weight)
        next_states = goal_model.get_next_states(s_test)
        if args.weight:
            val_error = (t_test[:, -1].unsqueeze(1) *
                         (t_test[:, :-1] - next_states)**2).mean()
        else:
            val_error = ((t_test[:, :-1] - next_states)**2).mean()
        writer.add_scalar('loss/vae', loss, i)
        writer.add_scalar('valid/vae', val_error, i)
        if i % args.lr_decay_rate == 0:
            adjust_lr(optimizer_vae, 2.)
        torch.save(
            goal_model.state_dict(),
            os.path.join(output_path,
                         '{}_{}_vae.pt'.format(args.env_name, str(args.beta))))
Exemplo n.º 27
0
    def train(self, environment, writer=None, total_episodes=20, frame_skip=4,
        actor_lr=1e-4, critic_lr=1e-4, freq=3):

        self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
        self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr)

        torch.set_default_dtype(torch.float)

        for episode in range(total_episodes):

            # init new episode
            environment.game.new_episode()
            state = environment.get_state()
            obs_cur, obs_prev = environment.init_observations()

            # prepare stack of frames
            state, stacked_frames = stack_frames(None, state, True, self.stack_size, self.resolution)
            state = Variable(state, requires_grad=True)

            # check episode is finished
            is_finished = environment.is_episode_finished()

            while not is_finished:
                # sample action from stochastic softmax policy
                policy, value = self.actor(state.to(self.device)), self.critic(state.to(self.device))

                action_id = policy.sample()
                log_prob = policy.log_prob(action_id).unsqueeze(0)
                action = self.actions[action_id]

                # make action and get reward
                reward = environment.make_action(action, frame_skip)
                obs_cur = environment.get_observation_cur()
                reward = self._reshape_reward(reward, obs_cur, obs_prev)
                obs_prev = obs_cur.copy()

                # fill memory
                self.log_probs.append(log_prob)
                self.rewards.append(reward)
                self.values.append(value)

                # check episode is finished
                is_finished = environment.is_episode_finished()

                if not is_finished:
                    # get new state
                    next_state = environment.get_state()
                    state, stacked_frames = stack_frames(
                        stacked_frames, next_state, False, self.stack_size, self.resolution)
                    state = Variable(state, requires_grad=True)

                else:
                    # every episode agent learns
                    print('Episode finished, training...')
                    actor_loss, critic_loss = self.train_on_episode()
                    episode_reward = sum(self.rewards)
                    kill_count = obs_cur['kills']
                    print(
                        "Episode: %d, Total reward: %.2f, Kill Count: %.1f, Actor loss: %.4f, Critic loss: %.4f" % (
                            episode, episode_reward, kill_count, actor_loss, critic_loss))
                    self.log_probs, self.rewards, self.values = [], [], []

                    # save model
                    if (episode % freq) == 0:
                        model_file = 'models/' + environment.scenario + '/' + 'A2C' + '_' + str(episode) + '.pth'
                        torch.save(self.actor.state_dict(), model_file)
                        print('Saved model to ' + model_file)
Exemplo n.º 28
0
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions import Normal, Categorical
import matplotlib.pyplot as plt
from tqdm import trange

torch.set_default_dtype(torch.double)

# ============================================================================================

# env_name = 'CartPole-v0' # Discrete
env_name = "InvertedPendulum-v2"  # Continous
# env_name = 'su_cartpole-v0'

# env_name = 'Walker2d-v2'
# env_name = 'lorenz-v0'
# Hard coded policy for the cartpole problem
# Will eventually want to build up infrastructure to develop a policy depending on:
# env.action_space
# env.observation_space

# policy = nn.Sequential(
#     nn.Linear(17, 64),
#     nn.LeakyReLU(),
#     nn.Linear(64, 64),
#     nn.LeakyReLU(),
#     nn.Linear(64, 64),
#     nn.LeakyReLU(),
Exemplo n.º 29
0
if args.dtype == 'float32':
    default_dtype = np.float32
    default_dtype_torch = torch.float32

elif args.dtype == 'float64':
    default_dtype = np.float64
    default_dtype_torch = torch.float64
else:
    raise ValueError('Unknown dtype: {}'.format(args.dtype))

np.seterr(all='raise')
np.seterr(under='warn')
np.set_printoptions(precision=8, linewidth=160)

torch.set_default_dtype(default_dtype_torch)
torch.set_printoptions(precision=8, linewidth=160)
torch.backends.cudnn.benchmark = True

if not args.seed:
    args.seed = np.random.randint(1, 10**8)
np.random.seed(args.seed)
torch.manual_seed(args.seed)

if args.cuda >= 0:
    os.environ['CUDA_VISIBLE_DEVICES'] = str(args.cuda)
args.device = torch.device('cpu' if args.cuda < 0 else 'cuda:0')

args.out_filename = None

Exemplo n.º 30
0
def learn(model, device, args, train_X, train_Y, test_X = None, test_Y = None, eval_train=False, eval_test=True, eval_freq = 100,  gamma = 0.999, decayEnabled = True, decayEpochStart = 10, suppressPrint = False):
    global NETWORK_DATATYPE
    torch.set_default_dtype(NETWORK_DATATYPE)
    model_training_orig = model.training
    setModelMode(model, True)
    
    #global device
  
    #if torch.cuda.is_available() and args.gpu ==1 : model.cuda() # mode model to cuda if available
    
    # I) setup optimiser & loss function
    if args.optimizer == OPTIMIZER_ADAM : optimizer = optim.Adam(model.parameters(), lr=args.learnRate, betas=(args.momentum, gamma), eps=EPSILON)   
    else : optimizer = optim.SGD(model.parameters(), lr=args.learnRate, momentum=args.momentum)
    # decide on loss function
    if train_Y[0].shape[1] == 1 : criterion = nn.MSELoss() # loss function for regression is MSE, regression only 1 column
    else : criterion = nn.CrossEntropyLoss() 
    
    # cosmetics: if we are classifying then we are evaluating via 'accuracy' where as if we are regressing, then we care about error
    if  train_Y[0].shape[1]  > 1 and isinstance(model[-1],nn.Softmax) : outPutType = OUT_MULTICLASS  # if its multicolumn AND we have softmax (IE  the cols sum to 1 for a prob), then its a multiclass classifcation problem
    elif  train_Y[0].shape[1]  > 1 : outPutType = OUT_MAE # if it has more than 1 column, but the final layer isn't softmax, then we can only evaluate this by Mean Average Error
    else : outPutType = OUT_REGRESSION # if the thing we are trying to predict has only 1 column, then it is a regression problem            
    if outPutType == OUT_REGRESSION :  evaluation = "prediction (r^2)" 
    elif outPutType == OUT_MULTICLASS : evaluation ="accuracy (%)"  
    else : evaluation = "MAE"                 
    
    # setup results & early stop logic ( record highest validation accuracy and its epoch)
    results = {}
    results["epochs"] = list()
    results["train_accuracy"]  = list()
    results["test_accuracy"]  = list()
    results['highestAcc'] = -1.
    results['highestAcc_epoch'] = -1

    eta = args.learnRate
    decay = 0.0
    if decayEnabled : decay = 0.001 #eta / args.epochs
    if suppressPrint == False : print ("Pytorch start for #epochs: " + str(args.epochs) + " with LR decay enabled at: " + str(decay) , flush=True  )
    t = 0
    while t < args.epochs: #for t in range(0, args.epochs):
        out_str = " | it: "  + str(t) # "[{0:4d}] ".format(t)
        start_time = time.time()

        # 1) Complete an entire training cycle: Forward then Backward propagation, then update weights, do this for ALL minibatches in sequence
        currentBatchNum = 0
        totalBatches = len(train_X)
        for batchNum in range(len(train_X)):
            b_data = train_X[batchNum] ; b_labels = train_Y[batchNum]
            #print ("batch: " + str(currentBatchNum) , flush=True  )

            # convert data to torch & move it to CUDA if available
            b_data = torch.from_numpy(b_data).to(device)
            b_labels = torch.from_numpy(b_labels).to(device)

            # perform full learning cycle, FP, BP and update weights
            #learnCycle(model, criterion, optimizer,args, b_data, b_labels)
            optimizer.zero_grad()   # zero the gradient buffers
            resetCachedActivations() # reset any saved activations before FP
            
            # Forward Propagate
            yhat = model(b_data) 
            loss = criterion(yhat, b_labels) # loss function
            
            # apply regularization to loss
            addRegularizerCosts(model, loss, args)
        
            # Backprop
            loss.backward()  
            
            # update Weights
            optimizer.step() 
            
            # update prograss bar
            barPos =  currentBatchNum / totalBatches # the bar position in %
            barPos = round(20 * barPos) # scale it to 1-20
            if suppressPrint == False : 
                sys.stdout.write('\r')
                sys.stdout.write("[%-20s] %d%%" % ('='*barPos, 5*barPos))
                sys.stdout.flush()  
            currentBatchNum += 1.0
        
 
        if t % eval_freq == 0 or t == args.epochs -1: # only evaluation fit every 100th or so iteration, as that is expensive
            results["epochs"].append(t) # add the epoch's number
            
            if eval_train or t == args.epochs -1:
                accuracy = evalAccuracy(model, device, args, outPutType, train_X, train_Y)  
                
                out_str =  out_str + " / Training " +evaluation +": "+ str( accuracy )                  
                results["train_accuracy"].append(accuracy)  

            if test_X is not None: # if a test set was supplied at all
                if eval_test or t == args.epochs -1:
                    accuracy = evalAccuracy(model, device, args, outPutType, test_X, test_Y, validEval = True) 
                    
                    if accuracy > results['highestAcc'] :
                        results['highestAcc'] = accuracy
                        results['highestAcc_epoch'] = t
                        
                    results["test_accuracy"].append(accuracy)
                    out_str =  out_str + " / Test " +evaluation +": "+  str( accuracy )
      
        #gc.collect()
        elapsed_time = time.time() - start_time 
        if suppressPrint == False : print(out_str + " / " + str( round(elapsed_time) ) + " secs (LR: " + str(eta) + ")" , flush=True)
        
        # update learning rate
        if t > decayEpochStart : eta = eta * 1/(1 + decay * t) # as t starts as 0, this will only kick in for the 3rd iteration

        t += 1
    setModelMode(model, model_training_orig)
    return ( { "results" : results})  
Exemplo n.º 31
0
def prep_conv3d_lfads(input_dims, hyperparams, device, dtype, dt):
    
    from synthetic_data import SyntheticCalciumVideoDataset
    from objective import Conv_LFADS_Loss, LogLikelihoodGaussian, LogLikelihoodPoissonSimplePlusL1
    from conv_lfads import Conv3d_LFADS_Net
    
    # model = Conv3d_LFADS_Net(input_dims      = input_dims,#(num_steps, width, height),
    #                          conv_dense_size = hyperparams['model']['conv_dense_size'],
    #                          channel_dims    = hyperparams['model']['channel_dims'],
    #                          factor_size     = hyperparams['model']['factor_size'],
    #                          g_encoder_size  = hyperparams['model']['g_encoder_size'],
    #                          c_encoder_size  = hyperparams['model']['c_encoder_size'],
    #                          g_latent_size   = hyperparams['model']['g_latent_size'],
    #                          u_latent_size   = hyperparams['model']['u_latent_size'],
    #                          controller_size = hyperparams['model']['controller_size'],
    #                          generator_size  = hyperparams['model']['generator_size'],
    #                          prior           = hyperparams['model']['prior'],
    #                          clip_val        = hyperparams['model']['clip_val'],
    #                          conv_dropout    = hyperparams['model']['conv_dropout'],
    #                          lfads_dropout   = hyperparams['model']['lfads_dropout'],
    #                          do_normalize_factors = hyperparams['model']['normalize_factors'],
    #                          max_norm        = hyperparams['model']['max_norm'],
    #                          device          = device).to(device)
    hyperparams['model']['obs']['tau']['value']/=float(dt)
    model = Conv3d_LFADS_Net(input_dims             = input_dims, 
                             channel_dims           = hyperparams['model']['channel_dims'], 
                             obs_encoder_size       = hyperparams['model']['obs_encoder_size'], 
                             obs_latent_size        = hyperparams['model']['obs_latent_size'],
                             obs_controller_size    = hyperparams['model']['obs_controller_size'], 
                             conv_dense_size        = hyperparams['model']['conv_dense_size'], 
                             factor_size            = hyperparams['model']['factor_size'],
                             g_encoder_size         = hyperparams['model']['g_encoder_size'], 
                             c_encoder_size         = hyperparams['model']['c_encoder_size'],
                             g_latent_size          = hyperparams['model']['g_latent_size'], 
                             u_latent_size          = hyperparams['model']['u_latent_size'],
                             controller_size        = hyperparams['model']['controller_size'], 
                             generator_size         = hyperparams['model']['generator_size'],
                             prior                  = hyperparams['model']['prior'],
                             obs_params             = hyperparams['model']['obs'],
                             deep_unfreeze_step     = hyperparams['model']['deep_unfreeze_step'], 
                             obs_early_stop_step    = hyperparams['model']['obs_early_stop_step'], 
                             generator_burn         = hyperparams['model']['generator_burn'], 
                             obs_continue_step      = hyperparams['model']['obs_continue_step'], 
                             ar1_start_step         = hyperparams['model']['ar1_start_step'], 
                             clip_val               = hyperparams['model']['clip_val'], 
                             max_norm               = hyperparams['model']['max_norm'], 
                             lfads_dropout          = hyperparams['model']['lfads_dropout'], 
                             conv_dropout           = hyperparams['model']['conv_dropout'],
                             do_normalize_factors   = hyperparams['model']['normalize_factors'], 
                             factor_bias            = hyperparams['model']['factor_bias'], 
                             device                 = device).to(device)
    
    # model = _CustomDataParallel(model).to(device)
    
    model.to(dtype=dtype)
    torch.set_default_dtype(dtype)
    
    obs_loglikelihood = LogLikelihoodGaussian()
    deep_loglikelihood = LogLikelihoodPoissonSimplePlusL1(dt=float(dt))
    objective = Conv_LFADS_Loss(obs_loglikelihood   = obs_loglikelihood,
                                deep_loglikelihood       = deep_loglikelihood,
                                loss_weight_dict         = {'kl_deep'    : hyperparams['objective']['kl_deep'],
                                                       'kl_obs'     : hyperparams['objective']['kl_obs'],
                                                       'l2'         : hyperparams['objective']['l2'],
                                                       'recon_deep' : hyperparams['objective']['recon_deep']},
                                l2_con_scale             = hyperparams['objective']['l2_con_scale'],
                                l2_gen_scale             = hyperparams['objective']['l2_gen_scale']).to(device)

    # objective = Conv_LFADS_Loss(loglikelihood=loglikelihood,
    #                             loss_weight_dict={'kl': hyperparams['objective']['kl'],
    #                                               'l2': hyperparams['objective']['l2']},
    #                                                l2_con_scale= hyperparams['objective']['l2_con_scale'],
    #                                                l2_gen_scale= hyperparams['objective']['l2_gen_scale']).to(device)
    
    
    return model, objective