def main(args):
    """Main train and evaluation function.

    Parameters
    ----------
    args: argparse.Namespace
        Arguments
    """
    formatter = logging.Formatter('%(asctime)s %(levelname)s - %(funcName)s: %(message)s',
                                  "%H:%M:%S")
    logger = logging.getLogger(__name__)
    logger.setLevel(args.log_level.upper())
    stream = logging.StreamHandler()
    stream.setLevel(args.log_level.upper())
    stream.setFormatter(formatter)
    logger.addHandler(stream)

    set_seed(args.seed)
    device = get_device(is_gpu=not args.no_cuda)
    exp_dir = os.path.join(RES_DIR, args.name)
    feature_dir = os.path.join(exp_dir, 'training_features')
    logger.info("Root directory for saving and loading experiments: {}".format(exp_dir))

    if not args.is_eval_only:
        create_safe_directory(feature_dir, logger=logger)

        # Setting number of epochs to 1, as we need to extract features
        args.epochs = 1
        args.batch_size = 1

        # PREPARES DATA
        data_loader = get_dataloaders(args.dataset,
                                       batch_size=args.batch_size,
                                       logger=logger, test=False)
        logger.info("Train {} with {} samples".format(args.dataset, len(data_loader.dataset)))

        # PREPARES MODEL
        args.img_size = get_img_size(args.dataset)  # stores for metadata
        model = load_model(exp_dir, filename='model.pt')
        logger.info('Num parameters in model: {}'.format(get_n_param(model)))

        # Extract Features

        model = model.to(device)  # make sure trainer and viz on same device
        fe = FeatureExtractor(model,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar)
        fe(data_loader,
                epochs=args.epochs,
                checkpoint_every=args.checkpoint_every, feature_dir=feature_dir)

        # SAVE MODEL AND EXPERIMENT INFORMATION
        # save_model(trainer.model, exp_dir, metadata=vars(args))
        print('Done.')
Exemplo n.º 2
0
def main(args):

    set_seed(args.seed)
    device = torch.device(
        'cuda:{}'.format(args.gpu) if torch.cuda.is_available() else 'cpu')
    exp_dir = os.path.join(RES_DIR, args.name)
    print("save and load experiments at : {}".format(exp_dir))

    if not args.is_eval_only:  #train

        create_directory(exp_dir)

        # PREPARES TRAINING DATA
        train_loader = get_dataloaders(args.dataset,
                                       batch_size=args.batch_size)

        ##############
        # PREPARES MODEL
        args.img_size = get_img_size(args.dataset)  # stores for metadata
        cs = [1, 64, 128, 1024]
        model = VLAE(args, args.latent_dim, cs)

        #TRAINS
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        model = model.to(device)  # make sure trainer and viz on same device

        gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)

        reg_coeff = [args.reg_coeff0, args.reg_coeff1, args.reg_coeff2]

        trainer = Trainer(model,
                          optimizer,
                          reg_coeff,
                          device=device,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar,
                          gif_visualizer=gif_visualizer)

        trainer(args,
                train_loader,
                epochs=args.epochs,
                checkpoint_every=args.checkpoint_every)

        #SAVE MODEL AND EXPERIMENT INFORMATION

        save_model(trainer.model, exp_dir, metadata=vars(args))
        print("Model has been saved")
Exemplo n.º 3
0
def get_samples(dataset, num_samples, idcs=[]):
    """ Generate a number of samples from the dataset.

    Parameters
    ----------
    dataset : str
        The name of the dataset.

    num_samples : int, optional
        The number of samples to load from the dataset

    idcs : list of ints, optional
        List of indices to of images to put at the begning of the samples.
    """
    data_loader = get_dataloaders(dataset, batch_size=1, shuffle=idcs is None)

    idcs += random.sample(range(len(data_loader.dataset)),
                          num_samples - len(idcs))
    samples = torch.stack([data_loader.dataset[i][0] for i in idcs], dim=0)
    print("Selected idcs: {}".format(idcs))

    return samples
Exemplo n.º 4
0
def main(args):
    """Main train and evaluation function.

    Parameters
    ----------
    args: argparse.Namespace
        Arguments
    """
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s - %(funcName)s: %(message)s', "%H:%M:%S")
    logger = logging.getLogger(__name__)
    logger.setLevel(args.log_level.upper())
    stream = logging.StreamHandler()
    stream.setLevel(args.log_level.upper())
    stream.setFormatter(formatter)
    logger.addHandler(stream)

    set_seed(args.seed)
    device = get_device(is_gpu=not args.no_cuda)
    exp_dir = os.path.join(RES_DIR, args.name)
    logger.info("Root directory for saving and loading experiments: {}".format(
        exp_dir))

    if not args.is_eval_only:

        create_safe_directory(exp_dir, logger=logger)

        if args.loss == "factor":
            logger.info(
                "FactorVae needs 2 batches per iteration. To replicate this behavior while being consistent, we double the batch size and the the number of epochs."
            )
            args.batch_size *= 2
            args.epochs *= 2

        # PREPARES DATA
        train_loader = get_dataloaders(args.dataset,
                                       batch_size=args.batch_size,
                                       logger=logger)
        logger.info("Train {} with {} samples".format(
            args.dataset, len(train_loader.dataset)))

        # PREPARES MODEL
        args.img_size = get_img_size(args.dataset)  # stores for metadata
        model = init_specific_model(args.model_type, args.img_size,
                                    args.latent_dim)
        logger.info('Num parameters in model: {}'.format(get_n_param(model)))

        # TRAINS
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        model = model.to(device)  # make sure trainer and viz on same device
        gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
        loss_f = get_loss_f(args.loss,
                            n_data=len(train_loader.dataset),
                            device=device,
                            **vars(args))
        trainer = Trainer(model,
                          optimizer,
                          loss_f,
                          device=device,
                          logger=logger,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar,
                          gif_visualizer=gif_visualizer)
        trainer(
            train_loader,
            epochs=args.epochs,
            checkpoint_every=args.checkpoint_every,
        )

        # SAVE MODEL AND EXPERIMENT INFORMATION
        save_model(trainer.model, exp_dir, metadata=vars(args))

    if args.is_metrics or not args.no_test:
        model = load_model(exp_dir, is_gpu=not args.no_cuda)
        metadata = load_metadata(exp_dir)
        # TO-DO: currently uses train datatset

        test_loader = get_dataloaders(metadata["dataset"],
                                      batch_size=args.eval_batchsize,
                                      shuffle=False,
                                      logger=logger)
        loss_f = get_loss_f(args.loss,
                            n_data=len(test_loader.dataset),
                            device=device,
                            **vars(args))

        use_wandb = False
        if use_wandb:
            loss = args.loss
            wandb.init(project="atmlbetavae", config={"VAE_loss": args.loss})
            if loss == "betaH":
                beta = loss_f.beta
                wandb.config["Beta"] = beta
        evaluator = Evaluator(model,
                              loss_f,
                              device=device,
                              logger=logger,
                              save_dir=exp_dir,
                              is_progress_bar=not args.no_progress_bar,
                              use_wandb=use_wandb)

        evaluator(test_loader,
                  is_metrics=args.is_metrics,
                  is_losses=not args.no_test)
Exemplo n.º 5
0
    
    m1, s1 = _calculate_activation_statistics(dataloader_original, length, model, batch_size, dims)
    print("Calculated m1 and s1")
    m2, s2 = _calculate_activation_statistics(dataloader_reconstructed, length, model, batch_size, dims)
    print("Calculated m2 and s2")

    fid_value = _calculate_frechet_distance(m1, s1, m2, s2)
    return fid_value

if __name__ == "__main__":
    import torch
    import random
    from disvae.utils.modelIO import load_model
    import argparse
    import logging
    import sys

    MODEL_PATH = sys.argv[2] # get the model path (e.g. "results/betaH_mnist")
    MODEL_NAME = "model.pt"
    GPU_AVAILABLE = True

    vae_model = load_model(directory=MODEL_PATH, is_gpu=GPU_AVAILABLE, filename=MODEL_NAME)

    mode = sys.argv[1] # get the name of the dataset you want to measure FID for
    
    dataloader1 = get_dataloaders(mode, batch_size=128)[0]

    fid_value = get_fid_value(dataloader1, vae_model)

    print("FID for ", mode, ": ", fid_value)
Exemplo n.º 6
0
def main(args: argparse.Namespace):
    """Main train and evaluation function."""
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s - %(funcName)s: %(message)s', "%H:%M:%S")
    logger = logging.getLogger(__name__)
    logger.setLevel("INFO")
    stream = logging.StreamHandler()
    stream.setLevel("INFO")
    stream.setFormatter(formatter)
    logger.addHandler(stream)

    set_seed(args.seed)
    device = get_device(is_gpu=not args.no_cuda)
    exp_dir = os.path.join(RES_DIR, args.name)
    logger.info(
        f"Root directory for saving and loading experiments: {exp_dir}")

    if not args.is_eval_only:

        create_safe_directory(exp_dir, logger=logger)

        if args.loss == "factor":
            logger.info(
                "FactorVae needs 2 batches per iteration." +
                "To replicate this behavior, double batch size and epochs.")
            args.batch_size *= 2
            args.epochs *= 2

        # PREPARES DATA
        train_loader = get_dataloaders(args.dataset,
                                       noise=args.noise,
                                       batch_size=args.batch_size,
                                       logger=logger)
        logger.info(
            f"Train {args.dataset} with {len(train_loader.dataset)} samples")

        # PREPARES MODEL
        args.img_size = get_img_size(args.dataset)  # stores for metadata
        model = VAE(args.img_size, args.latent_dim)
        logger.info(f'Num parameters in model: {get_n_param(model)}')

        # TRAINS
        optimizer = optim.Adam(model.parameters(), lr=args.lr)

        model = model.to(device)
        gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
        loss_f = get_loss_f(args.loss,
                            n_data=len(train_loader.dataset),
                            device=device,
                            **vars(args))

        if args.loss in ['tdGJS', 'tGJS']:
            loss_optimizer = optim.Adam(loss_f.parameters(), lr=args.lr)
        else:
            loss_optimizer = None
        print(loss_optimizer)
        trainer = Trainer(model,
                          optimizer,
                          loss_f,
                          device=device,
                          logger=logger,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar,
                          gif_visualizer=gif_visualizer,
                          loss_optimizer=loss_optimizer,
                          denoise=args.noise is not None)
        trainer(
            train_loader,
            epochs=args.epochs,
            checkpoint_every=args.checkpoint_every,
        )

        # SAVE MODEL AND EXPERIMENT INFORMATION
        save_model(trainer.model, exp_dir, metadata=vars(args))

    # Eval
    model = load_model(exp_dir, is_gpu=not args.no_cuda)
    metadata = load_metadata(exp_dir)

    test_loader = get_dataloaders(metadata["dataset"],
                                  noise=args.noise,
                                  train=False,
                                  batch_size=128,
                                  logger=logger)
    loss_f = get_loss_f(args.loss,
                        n_data=len(test_loader.dataset),
                        device=device,
                        **vars(args))
    evaluator = Evaluator(model,
                          loss_f,
                          device=device,
                          is_metrics=args.is_metrics,
                          is_train=False,
                          logger=logger,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar,
                          denoise=args.noise is not None)
    evaluator(test_loader)

    # Train set also
    test_loader = get_dataloaders(metadata["dataset"],
                                  train=True,
                                  batch_size=128,
                                  logger=logger)
    loss_f = get_loss_f(args.loss,
                        n_data=len(test_loader.dataset),
                        device=device,
                        **vars(args))
    evaluator = Evaluator(model,
                          loss_f,
                          device=device,
                          is_metrics=args.is_metrics,
                          is_train=True,
                          logger=logger,
                          save_dir=exp_dir,
                          is_progress_bar=not args.no_progress_bar)
    evaluator(test_loader)
Exemplo n.º 7
0
def main(args):
    """Main train and evaluation function.

    Parameters
    ----------
    args: argparse.Namespace
        Arguments
    """

    # Logging info
    formatter = logging.Formatter('%(asctime)s %(levelname)s - '
                                  '%(funcName)s: %(message)s',
                                  '%H:%M:%S')
    logger = logging.getLogger(__name__)
    logger.setLevel('INFO')
    stream = logging.StreamHandler()
    stream.setLevel('INFO')
    stream.setFormatter(formatter)
    logger.addHandler(stream)

    set_seed(args.seed)
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
    model_name = f'{args.name}_lr{args.lr}_z{args.latent_dim}' \
                 + f'_h{args.hidden_dim}_p{args.p_dropout}'
    model_dir = os.path.join(args.results, model_name)
    logger.info(f'Directory for saving and loading models: {model_dir}')

    if not args.eval:
        # Model directory
        new_model_dir(model_dir, logger=logger)

        # Dataloaders
        train_loader, valid_loader = get_dataloaders(
            args.data, args.t_hours, args.n_bins,
            validation=True, dynamic=args.dynamic,
            batch_size=args.bs, logger=logger)
        logger.info(
            f'Train {args.model_type}-{args.t_hours} ' +
            f'with {len(train_loader.dataset)} samples')

        # Load model
        n_tokens = len(np.load(
            os.path.join(
                args.data, '_dicts', f'{args.t_hours}_{args.n_bins}.npy'),
            allow_pickle=True).item())
        model = init_model(
            args.model_type, n_tokens, args.latent_dim, args.hidden_dim,
            p_dropout=args.p_dropout, dt=args.dt,
            weighted=args.weighted, dynamic=args.dynamic)
        logger.info(f'#params in model: {get_n_param(model)}')

        # Optimizer
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
        loss_f = BCE()
        model = model.to(device)

        # Training
        trainer = Trainer(
            model, loss_f, optimizer,
            device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)
        trainer.train(
            train_loader, valid_loader,
            epochs=args.epochs, early_stopping=args.early_stopping)

        # Save model
        metadata = vars(args)
        metadata['n_tokens'] = n_tokens
        save_model(trainer.model, model_dir, metadata=metadata)

    if args.test:
        # Load model
        model = load_model(model_dir, is_gpu=args.cuda)
        metadata = load_metadata(model_dir)

        # Dataloader
        test_loader, _ = get_dataloaders(
            metadata['data'], metadata['t_hours'], metadata['n_bins'],
            validation=False, dynamic=metadata['dynamic'], batch_size=128,
            shuffle=False, logger=logger)

        # Evaluate
        loss_f = BCE()
        evaluator = Trainer(
            model, loss_f,
            device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)
        evaluator._valid_epoch(test_loader)