Beispiel #1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('data_dir', help='Path to image files.', type=str)
    parser.add_argument('--save_dir', dest="save_dir", type=str, action="store",
                        default="./", help="Directory to save checkpoints")
    parser.add_argument('--arch', dest="arch", type=str, action="store",
                        default="densenet121", help="Architecture type default is densenet121")
    parser.add_argument('--learning_rate', dest="learning_rate", type=float, action="store", default=0.003)
    parser.add_argument('--epochs', dest="epochs", type=int, action="store", default=5)
    parser.add_argument('--hidden_units', dest="hidden_units", type=int, nargs='+', action="store", default=[512])
    parser.add_argument('--gpu', action='store_true')
    num_outputs = 102
    args = parser.parse_args()
    device = utils.get_device(args.gpu)
    dataloaders, class_to_idx = utils.get_dataloaders(args.data_dir)
    model, optimizer, hidden_layers = utils.get_model_and_optimizer(
        args.arch, args.learning_rate,
        num_outputs, device, args.hidden_units
    )
    if not model:
        return
    
    model.class_to_idx = class_to_idx
    with active_session():
        utils.train_model(
            model, optimizer, dataloaders, device,
            epochs=args.epochs, print_every=20
        )
        
    utils.save_model(model, args.learning_rate, args.epochs, optimizer, num_outputs, args.hidden_units, args.save_dir)
    plot_real_vs_fake

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('config_file', help='config file name')
    args = parser.parse_args()

    # Root directory for dataset
    cfg = yaml.load(open(args.config_file))

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    dataloader = get_dataset(cfg, shuffle=False)
    original_lab_df, labels = get_lab_df('data/celeba/output_classif.csv')

    netE, optimizerE = get_model_and_optimizer(VariationalEncoder,
                                               cfg["enc_path"], cfg)
    netG, optimizerG = get_model_and_optimizer(Generator, cfg["gen_path"], cfg)

    emotion_latents = defaultdict(list)
    # lab_iter = iter(labs)
    # output_file = open("data/latent_to_emotion.csv", "w+")
    emotion_order = ["Angry", "Happy", "Neutral", "Sad", "Surprise", "Fear"]
    average_emotion = {}
    original = []
    post_recon = []
    with torch.no_grad():
        for i, data in tqdm(enumerate(dataloader, 0)):
            if not torch.cuda.is_available(
            ) and i > 500 and cfg["speedup_emot_change"]:
                break
            X = data[0].to(device)
Beispiel #3
0
                np.save(os.path.join(opts.log_dir, 'test_masks', wsi_name), save_array)
                mask_pil = Image.fromarray(save_mask)
                mask_pil.save(os.path.join(opts.log_dir, wsi_name + '_mask.png'))


if __name__ == '__main__':
    opts = get_options()
    pprint(vars(opts))
    # Run horovod init
    init(opts)
    file_writer = setup_logger(opts)


    train_sampler, valid_sampler, test_sampler, preprocessor = start(opts)

    model, optimizer, compression = get_model_and_optimizer(opts)

    if opts.evaluate:
        assert opts.model_dir, "ValueError: No model_dir given for evaluation (--model_dir <type=str>)"

        if hvd.rank() == 0:
            print('Preparing evaluation...')
        test(opts, model, optimizer, file_writer, compression, test_sampler)
        if hvd.rank() == 0:
            print('Evaluation is done')
    else:
        if hvd.rank() == 0:
            print('Preparing training...')
        train(opts, model, optimizer, file_writer, compression, train_sampler, valid_sampler,preprocessor)
        if hvd.rank() == 0:
            print('Training is done')
from models import Generator, Discriminator, VariationalEncoder, DiscriminatorWGAN
from utils import get_dataset, get_model_and_optimizer, save_images, reparameterize, loss_function_kld, \
    plot_real_vs_fake

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('config_file', help='config file name')
    args = parser.parse_args()

    # Root directory for dataset
    cfg = yaml.load(open(args.config_file))
    dataloader = get_dataset(cfg)
    device = torch.device("cuda:0" if torch.cuda.is_available() > 0 else "cpu")

    netG, optimizerG = get_model_and_optimizer(Generator, cfg["gen_path"], cfg)
    netD, optimizerD = get_model_and_optimizer(DiscriminatorWGAN, cfg["dis_path"], cfg)
    netE, optimizerE = get_model_and_optimizer(VariationalEncoder, cfg["enc_path"], cfg)

    calc_BCE_loss = nn.BCELoss()
    calc_MSE_loss = nn.MSELoss()

    test_batch = next(iter(dataloader))

    test_imgs = test_batch[0].to(device)[:64]
    iteration = 0
    diter = 5
    start = time()
    # label = torch.full((64,), cfg["real_label"], device=device)

    for epoch in range(cfg["num_epoch"]):
Beispiel #5
0
def train(config, model_dir):
    """Given a Configuration object containing training settings and hyperparameters, the train method launches a Trainer instance
    which trains a neural network model.
    
    Arguments:
        config {Configuration} -- Configuration object of settings, from a JSON file.
        model_dir {string} -- Path to the target directory of logs and results
    
    Returns:
        loss {double} -- The final validation or training loss, depending on the Trainer object.
    """

    # Record time
    now = datetime.now()

    # Set seed
    if config.cuda:
        torch.cuda.manual_seed(2222)
    else:
        torch.manual_seed(2222)

    params = config.params
    pretrain = config.pretrain
    data_path = config.data_path
    source = config.source
    mode = config.mode
    trainer_name = config.trainer_name
    model_name = config.model_name
    optimizer_name = config.optimizer_name

    n_epochs = params.n_epochs
    lr = params.lr
    momentum = params.momentum
    batch_size = params.batch_size

    use_transform = utils.str_to_bool(config.transform)
    use_val = utils.str_to_bool(config.validation)

    # Define a loss function. reduction='none' is elementwise loss, later summed manually
    criterion = nn.MSELoss(reduction='none')

    ############### INITIALISE MODEL AND OPTIMIZER ######################
    # Define a model and optimizer pair
    model, optimizer = utils.get_model_and_optimizer(model_name,
                                                     optimizer_name, pretrain,
                                                     params)

    ############## GET DATALOADERS ########################
    # Get dataset of recovery curves
    logging.info("Loading the datasets...")
    dataset = utils.get_dataset(source, data_path, model_dir, mode,
                                use_transform, params)
    logging.info("- Loading complete.")

    # Initialize a Regressor training object
    logging.info("Initializing trainer object...")
    trainer = utils.get_trainer(trainer_name, model, config, criterion,
                                optimizer, dataset, model_dir)
    logging.info("- Initialization complete.")

    ################ TRAIN THE MODEL ######################
    logging.info("Starting training for {} epoch(s)...".format(n_epochs))
    trainer.train()
    logging.info("- Training complete.")

    torch.save(trainer.model,
               os.path.join(model_dir,
                            now.strftime("%Y%m%d-%H%M") + ".pt"))

    return trainer.loss
Beispiel #6
0
import tensorflow as tf
import numpy as np
from pprint import pprint
import pdb
from model import DCGMM
from options import get_options
from dataset_utils import get_train_and_val_dataset, get_template_and_image_dataset
from utils import get_model_and_optimizer, setup_normalizing_run
from logging_utils import setup_logger
from train import train
from eval import eval_mode

if __name__ == '__main__':
    opts = get_options()
    pprint(vars(opts))

    # Start running training
    if not opts.eval_mode:
        tb_logger, logdir = setup_logger(opts)
        e_step, m_step, optimizer = get_model_and_optimizer(opts)
        train_dataset, val_dataset = get_train_and_val_dataset(opts)
        train(opts, e_step, m_step, optimizer, train_dataset, val_dataset,
              tb_logger, logdir)

    # Start running inference
    else:
        setup_normalizing_run(opts)
        e_step, m_step, _ = get_model_and_optimizer(opts)
        template_dataset, image_dataset = get_template_and_image_dataset(opts)
        eval_mode(opts, e_step, m_step, template_dataset, image_dataset)