argparser.add_argument('--reduce_dim', default=False, type=ast.literal_eval)

  argparser.add_argument('--lr', default=1e-4, type=float)
  argparser.add_argument('--smoothing_epsilon', default=0.1, type=float)
  argparser.add_argument('--save_path', default='convnet/', type=str)
  argparser.add_argument('--plot_attention', default=False, type=ast.literal_eval)
  argparser.add_argument('--eval_step', default=10, type=int)
  argparser.add_argument('--max_epochs', default=500, type=int)
  argparser.add_argument('--train_acc_step', default=5, type=int)
  argparser.add_argument('--load_model', default=False, type=ast.literal_eval)
  argparser.add_argument('--clip_grad', default=False, type=ast.literal_eval)
  argparser.add_argument('--weight_decay', default=0., type=float)  # L2 regularization -> 0.01
  argparser.add_argument('--l1_reg', default=0., type=float)  # L1 regularization -> 0.001
  argparser.add_argument('--warmup', default=100, type=int)
  argparser.add_argument('--clip_grad_val', default=0.1, type=float)

  argparser.add_argument('--logfile', default='_convnet_experiments_feedback_logs.txt', type=str)
  args = argparser.parse_args()

  # logging.basicConfig(stream=sys.stdout, level=logging.INFO)
  logging.basicConfig(filename=args.logfile, filemode='a', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

  settings = u.populate_configuration(settings, vars(args))

  global plotter
  plotter = u.VisdomPlotter(env_name='ConvNet Experiments')

  if not os.path.isdir(settings['save_path']):
    os.makedirs(settings['save_path'])

  launch_experiment(settings)
Exemplo n.º 2
0
parser.add_argument('-plot', action="store_true", default=False, help='Enable plotting')
parser.add_argument('-trainTestBS', type=int, help='Ratio of Train BS to test BS')

args = parser.parse_args()
device = torch.device('cuda')

# torch.manual_seed(0)
# torch.cuda.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(0)
torch.backends.cudnn.benchmark = True

plotter = None
if args.plot == True:
    plotter = utils.VisdomPlotter(env_name=args.name, modality='hallucinating')
    plotter.argsTile(args.__dict__)

# Ready the training and validation data
dataset = dataDef.twoStreamDataset(args.modality1, args.modality2)
trainSampler, validationSampler = dataDef.testTrainSplit(len(dataset), args.valRatio)
trainLoader = torch.utils.data.DataLoader(dataset, sampler=trainSampler, batch_size=args.batchSize, num_workers=8)
valLoader = torch.utils.data.DataLoader(dataset, sampler=validationSampler, batch_size=args.trainTestBS*args.batchSize, num_workers=8)

# Initialize our network
if args.modality1 == 'PAN':
    net1 = modelDef.panNet()
    net2 = modelDef.msNet()
    model = modelDef.halucinationNet(net1, net2)
    testModel = modelDef.panNet()
elif args.modality1 == 'MS':
Exemplo n.º 3
0
    #Data transform
    data_transform = transforms.Compose([
        transforms.Resize((config.hyperparameters.image_size,
                           config.hyperparameters.image_size),
                          interpolation=1),
        transforms.ToTensor()
    ])

    # Get data loaders
    test_loaders_list = dataset_utils.dataloaders.get_testevaluators(
        config, data_transform, batch_size, test_folds, nrof_folds,
        ['bbt_ep01'])

    plotter = utils.VisdomPlotter(config.visdom.server,
                                  env_name='evaluation',
                                  port=config.visdom.port)

    if not test_loaders_list:
        print('No datasets selected for evaluation.')
        print('Evaluation terminated.')
        exit(0)

    # Launch evaluation
    for test_name, test_loader, eval_function in test_loaders_list:
        print('\nEvaluation on {}'.format(test_name))
        eval_function(test_loader,
                      model,
                      device,
                      test_name,
                      plotter=plotter,
Exemplo n.º 4
0
    argparser.add_argument('--lr', default=1e-4, type=float)
    argparser.add_argument('--smoothing_epsilon', default=0.1, type=float)
    argparser.add_argument('--save_path', default='transformer/', type=str)
    argparser.add_argument('--plot_attention',
                           default=False,
                           type=ast.literal_eval)
    argparser.add_argument('--eval_step', default=10, type=int)
    argparser.add_argument('--max_epochs', default=500, type=int)
    argparser.add_argument('--num_device', default=0, type=int)

    argparser.add_argument('--logfile',
                           default='_transformer_experiments_logs.txt',
                           type=str)
    args = argparser.parse_args()

    # logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    logging.basicConfig(filename=args.logfile,
                        filemode='a',
                        level=logging.INFO,
                        format='%(asctime)s - %(levelname)s - %(message)s')

    settings = u.populate_configuration(settings, vars(args))

    global plotter
    plotter = u.VisdomPlotter(env_name='Transformer Experiments')

    if not os.path.isdir(settings['save_path']):
        os.makedirs(settings['save_path'])

    launch_experiment(settings)
    with open(args.config) as json_config_file:
        config = utils.AttrDict(json.load(json_config_file))
    print('Description of video {}.'.format(config.dataset.movie.movie_path))
    print('CONFIGURATION:\t{}'.format(args.config))

    # Load model
    print('Loading model from checkpoint {}'.format(config.model.checkpoint_path))
    checkpoint = torch.load(config.model.checkpoint_path)
    embedding_size = checkpoint['embedding_size']

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    model = models.load_model(config.model.model_arch,
                              device,
                              embedding_size=embedding_size)
    model.load_state_dict(checkpoint['model_state_dict'])

    plotter = utils.VisdomPlotter(config.visdom.server, env_name='video_annotation', port=config.visdom.port)

    vd_utils.annotate_video(config.dataset.movie.movie_path,
                            config.dataset.movie.dataset_path,
                            config.output.video_dir,
                            model,
                            device,
                            max_frame=config.dataset.movie.num_frame,
                            tracker_max_age=config.hyperparameters.tracker_max_age,
                            plotter=plotter)
import numpy as np
import utils
import pandas as pd
import argparse
import pickle
import os

parser = argparse.ArgumentParser(
    description='Consistency driven Multi-Lable Image Classification')
parser.add_argument('--name',
                    default='',
                    type=str,
                    help='Performance filename')
args = parser.parse_args()

plotter = utils.VisdomPlotter(env_name=args.name)

paramFile = os.path.join('runs', args.name, "parameters.csv")
perfFile = os.path.join('runs', args.name, "performance.csv")
mediaFile = os.path.join('runs', args.name, "media.pkl")

# Display parameters
paramsDict = {}
for line in open(paramFile):
    key, value = line.split(',')
    paramsDict.update({key: value})

plotter.argsTile(paramsDict)

# Display performance
performanceDF = pd.read_csv(perfFile, sep=',', header=0)
    argparser.add_argument('--max_dec_in_seq_len', default=600, type=int)
    argparser.add_argument('--scaling', default=True, type=ast.literal_eval)

    argparser.add_argument('--lr', default=1e-4, type=float)
    argparser.add_argument('--save_path', default='pretraining/', type=str)
    argparser.add_argument('--eval_step', default=10, type=int)
    argparser.add_argument('--train_score_step', default=2, type=int)
    argparser.add_argument('--max_epochs', default=50, type=int)

    argparser.add_argument('--logfile',
                           default='_transformer_pretraining_logs.txt',
                           type=str)
    args = argparser.parse_args()

    # logging.basicConfig(stream=sys.stdout, level=logging.INFO)
    logging.basicConfig(filename=args.logfile,
                        filemode='a',
                        level=logging.INFO,
                        format='%(asctime)s - %(levelname)s - %(message)s')

    settings = u.populate_configuration(settings, vars(args))

    global plotter
    plotter = u.VisdomPlotter(env_name='Pretrainer Plots')

    if not os.path.isdir(settings['save_path']):
        os.mkdir(settings['save_path'])

    rep = input('Start Transformer Parrot Pretraining? (y or n): ')
    if rep == 'y':
        pretrain_transformer_parrot(settings)
Exemplo n.º 8
0
                    type=int,
                    help='Ratio of Train BS to test BS')

args = parser.parse_args()
device = torch.device('cuda')

# torch.manual_seed(0)
# torch.cuda.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(0)
torch.backends.cudnn.benchmark = True

plotter = None
if args.plot == True:
    plotter = utils.VisdomPlotter(env_name=args.name, modality='twoStream')
    plotter.argsTile(args.__dict__)

# Ready the training and validation data
dataset = dataDef.twoStreamDataset(args.modality1, args.modality2)
trainSampler, validationSampler = dataDef.testTrainSplit(
    len(dataset), args.valRatio)
trainLoader = torch.utils.data.DataLoader(dataset,
                                          sampler=trainSampler,
                                          batch_size=args.batchSize,
                                          num_workers=8)
valLoader = torch.utils.data.DataLoader(dataset,
                                        sampler=validationSampler,
                                        batch_size=args.trainTestBS *
                                        args.batchSize,
                                        num_workers=8)
Exemplo n.º 9
0
def main(args):

    print('CONFIGURATION:\t{}'.format(args.config))
    with open(args.config) as json_config_file:
        config = utils.AttrDict(json.load(json_config_file))

    # Set up output directory
    # subdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')
    subdir = config.visdom.environment_name
    model_dir = os.path.join(os.path.expanduser(config.output.output_dir), subdir)
    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)
    else:
        raise Exception('Environment name {} already taken.'.format(subdir))
    config_filename = path_leaf(args.config)
    copyfile(args.config, os.path.join(model_dir, config_filename))

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    data_transform = transforms.Compose([
        transforms.Resize((config.hyperparameters.image_size, config.hyperparameters.image_size), interpolation=1),
        transforms.ToTensor()
    ])

    test_batch_size = (config.hyperparameters.people_per_batch * config.hyperparameters.images_per_person) // 2
    nrof_folds = config.dataset.cross_validation.num_fold
    fold_tool = utils.FoldGenerator(nrof_folds,
                                    config.dataset.cross_validation.num_train_folds,
                                    config.dataset.cross_validation.num_val_folds)
    train_folds, val_folds, test_folds = fold_tool.get_fold()

    ###########################
    # SET UP DATALOADERS HERE #
    ###########################

    source_loader = dataset_utils.get_coxs2v_trainset(config.dataset.coxs2v.still_dir,
                                                      config.dataset.coxs2v.video2_dir,
                                                      config.dataset.coxs2v.video2_pairs,
                                                      train_folds,
                                                      nrof_folds,
                                                      data_transform,
                                                      config.hyperparameters.people_per_batch,
                                                      config.hyperparameters.images_per_person)

    target_loader = dataset_utils.get_coxs2v_trainset(config.dataset.coxs2v.still_dir,
                                                      config.dataset.coxs2v.video4_dir,
                                                      config.dataset.coxs2v.video4_pairs,
                                                      val_folds,
                                                      nrof_folds,
                                                      data_transform,
                                                      config.hyperparameters.people_per_batch,
                                                      config.hyperparameters.images_per_person)

    test_loaders_list = dataloaders.get_testevaluators(config,
                                                       data_transform,
                                                       test_batch_size,
                                                       test_folds,
                                                       nrof_folds,
                                                       is_vggface2=False,
                                                       is_lfw=True,
                                                       is_cox_video1=False,
                                                       is_cox_video2=True,
                                                       is_cox_video3=False,
                                                       is_cox_video4=True)
    ###################
    # DATALOADERS END #
    ###################

    #Set up training model
    print('Building training model')
    if config.model.checkpoint:
        print('Loading from checkpoint {}'.format(config.model.checkpoint_path))
        checkpoint = torch.load(config.model.checkpoint_path)
        embedding_size = checkpoint['embedding_size']
        # start_epoch = checkpoint['epoch']
        start_epoch = 0
    else:
        embedding_size = config.model.embedding_size
        start_epoch = 0

    model = models.load_model(config.model.model_arch,
                              embedding_size=embedding_size,
                              imgnet_pretrained=config.model.pretrained_imagenet)

    optimizer = optim.SGD(model.parameters(), lr=config.hyperparameters.learning_rate, momentum=0.9, nesterov=True, weight_decay=2e-4)

    scheduler = lr_scheduler.ExponentialLR(optimizer, config.hyperparameters.learning_rate_decay_factor)

    if config.model.checkpoint:
        model.load_state_dict(checkpoint['model_state_dict'])
        # optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        # scheduler.load_state_dict(checkpoint['scheduler_state_dict'])

    model = model.to(device)

    plotter = utils.VisdomPlotter(env_name=config.visdom.environment_name, port=config.visdom.port)

    print('Quadruplet loss training mode.')
    miner = miners.SemihardNegativeQuadrupletSelector(config.hyperparameters.margin)

    loss = losses.QuadrupletLoss2(config.hyperparameters.margin,
                                  config.hyperparameters.margin2,
                                  lamda=config.hyperparameters.lamda)

    trainer = Quadruplet_Trainer(model,
                                 miner,
                                 loss,
                                 optimizer,
                                 scheduler,
                                 device,
                                 plotter,
                                 config.hyperparameters.margin,
                                 config.model.embedding_size,
                                 config.visdom.log_interval)

    # Loop over epochs
    print('Training Launched.')
    for epoch in range(start_epoch, config.hyperparameters.n_epochs):

        # Validation
        for test_name, test_loader in test_loaders_list:
            print('\nEvaluation on {}'.format(test_name))
            trainer.Evaluate(test_loader,
                             name=test_name,
                             nrof_folds=nrof_folds,
                             val_far=config.hyperparameters.val_far)

        # Training
        print('\nTrain Epoch {}'.format(epoch))
        trainer.Train_Epoch(source_loader, target_loader)

        # Save model
        if not (epoch + 1) % config.output.save_interval:
            if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
                os.makedirs(model_dir)
            model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
            print('\nSave model at {}'.format(model_file_path))

            torch.save({'epoch': epoch,
                        'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'scheduler_state_dict': scheduler.state_dict(),
                        'embedding_size': config.model.embedding_size
                        }, model_file_path)

    print('Finish.')
Exemplo n.º 10
0
                    type=int,
                    help='Ratio of Train BS to test BS')

args = parser.parse_args()
device = torch.device('cuda')

# torch.manual_seed(0)
# torch.cuda.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(0)
torch.backends.cudnn.benchmark = True

plotter = None
if args.plot == True:
    plotter = utils.VisdomPlotter(env_name=args.name, modality='finalNet')
    plotter.argsTile(args.__dict__)

# Ready the training and validation data
dataset = dataDef.twoStreamDataset(args.modality1, args.modality1)
trainSampler, validationSampler = dataDef.testTrainSplit(
    len(dataset), args.valRatio)
trainLoader = torch.utils.data.DataLoader(dataset,
                                          sampler=trainSampler,
                                          batch_size=args.batchSize,
                                          num_workers=8)
valLoader = torch.utils.data.DataLoader(dataset,
                                        sampler=validationSampler,
                                        batch_size=args.trainTestBS *
                                        args.batchSize,
                                        num_workers=8)
def main(args):

    print('Feature extractor training.')
    print('CONFIGURATION:\t{}'.format(args.config))
    with open(args.config) as json_config_file:
        config = utils.AttrDict(json.load(json_config_file))

    # Set up output directory
    experiment_name = generate_experiment_name(config)
    model_dir = os.path.join(os.path.expanduser(config.output.output_dir), experiment_name)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    print('Model saved at {}'.format(model_dir))

    config_filename = path_leaf(args.config)
    copyfile(args.config, os.path.join(model_dir, config_filename))

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    source_loader = dataloaders.get_traindataloaders(config.source_dataset,
                                                    config)
    target_loader = dataloaders.get_traindataloaders(config.target_dataset,
                                                     config)
    evaluators_list = dataloaders.get_evaluators(config.evaluation_datasets,
                                                 config)

    # Set up training model
    print('Building training model')
    if config.model.checkpoint:
        checkpoint_path = config.model.checkpoint_path
    else:
        checkpoint_path = None
    model = models.load_model(config.model.model_arch,
                              device,
                              checkpoint_path=checkpoint_path,
                              embedding_size=config.model.embedding_size,
                              imgnet_pretrained=config.model.pretrained_imagenet)

    optimizer = optim.SGD(model.parameters(), lr=config.hyperparameters.learning_rate, momentum=0.9, nesterov=True, weight_decay=2e-4)

    scheduler = lr_scheduler.ExponentialLR(optimizer, config.hyperparameters.learning_rate_decay_factor)

    model = model.to(device)

    plotter = utils.VisdomPlotter(config.visdom.server ,env_name=experiment_name, port=config.visdom.port)

    print('Fitting source dataset.')
    gmixture = clustering.distance_supervised_gaussian_mixture(source_loader,
                                                               model,
                                                               device,
                                                               _plotter=plotter,
                                                               name='Source Gaussians')

    print('Fitting target dataset.')
    clustering.update_gaussian_mixture(gmixture,
                                       target_loader,
                                       model,
                                       device,
                                       _plotter=plotter,
                                       name='Target Gaussians')

    print('DualTriplet loss training mode.')
    miner = miners.get_miner(config.miner,
                             config.hyperparameters.margin,
                             config.hyperparameters.people_per_batch,
                             plotter,
                             deadzone_ratio=config.hyperparameters.deadzone_ratio)
    miner.gmixture = gmixture

    loss = losses.DualtripletLoss(config.hyperparameters.margin,
                                  config.hyperparameters.lamda,
                                  plotter)

    model_trainer = trainer.Dualtriplet_Trainer(model,
                                                miner,
                                                loss,
                                                optimizer,
                                                scheduler,
                                                device,
                                                plotter,
                                                config.hyperparameters.margin,
                                                config.model.embedding_size,
                                                batch_size=config.hyperparameters.batch_size)

    if not os.path.isdir(model_dir):  # Create the model directory if it doesn't exist
        os.makedirs(model_dir)

    # Loop over epochs
    epoch = 0
    print('Training Launched.')
    while epoch < config.hyperparameters.n_epochs:

        # Validation
        for evaluator in evaluators_list:
            print('\nEvaluation on {}'.format(evaluator.test_name))
            evaluator.evaluate(model,
                               device,
                               plotter=plotter,
                               epoch=epoch)

        # Training
        print('\nExperimentation {}'.format(config.experiment))
        print('Train Epoch {}'.format(epoch))
        model_trainer.Train_Epoch(source_loader, target_loader, epoch)

        # Save model
        # if not (epoch + 1) % config.output.save_interval:
        #
        #     model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
        #     print('\nSave model at {}'.format(model_file_path))
        #     torch.save({'epoch': epoch,
        #                 'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
        #                 'optimizer_state_dict': optimizer.state_dict(),
        #                 'scheduler_state_dict': scheduler.state_dict(),
        #                 'embedding_size': config.model.embedding_size
        #                 }, model_file_path)

        epoch += 1

    model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
    print('\nSave model at {}'.format(model_file_path))
    torch.save({'epoch': epoch,
                'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'embedding_size': config.model.embedding_size
                }, model_file_path)
    print('Finish.')
Exemplo n.º 12
0
def main(args):

    print('Feature extractor training.')
    print('CONFIGURATION:\t{}'.format(args.config))
    with open(args.config) as json_config_file:
        config = utils.AttrDict(json.load(json_config_file))

    # Set up output directory
    experiment_name = generate_experiment_name(config)
    model_dir = os.path.join(os.path.expanduser(config.output.output_dir), experiment_name)
    if not os.path.exists(model_dir):
        os.makedirs(model_dir)
    print('Model saved at {}'.format(model_dir))

    config_filename = utils.path_leaf(args.config)
    copyfile(args.config, os.path.join(model_dir, config_filename))

    # CUDA for PyTorch
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")
    # device = torch.device("cpu")

    # Get dataloaders
    train_loader = dataloaders.get_traindataloaders(config.train_dataset,
                                                    config)
    evaluators_list = dataloaders.get_evaluators(config.evaluation_datasets,
                                                config)

    # Set up training model
    print('Building training model')
    if config.model.checkpoint:
        checkpoint_path = config.model.checkpoint_path
    else:
        checkpoint_path = None
    model = models.load_model(config.model.model_arch,
                              device,
                              checkpoint_path=checkpoint_path,
                              embedding_size=config.model.embedding_size,
                              imgnet_pretrained=config.model.pretrained_imagenet)

    optimizer = optim.SGD(model.parameters(), lr=config.hyperparameters.learning_rate, momentum=0.9, nesterov=True, weight_decay=2e-4)

    # scheduler = lr_scheduler.StepLR(optimizer, 5, gamma=0.1)
    # scheduler = lr_scheduler.ExponentialLR(optimizer, config.hyperparameters.learning_rate_decay_factor)
    scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.hyperparameters.n_epochs, eta_min=1e-6)

    plotter = utils.VisdomPlotter(config.visdom.server, env_name=experiment_name, port=config.visdom.port)

    miner = miners.FunctionSemihardTripletSelector(config.hyperparameters.margin, plotter)

    loss = nn.TripletMarginLoss(config.hyperparameters.margin, swap=config.hyperparameters.triplet_swap)

    my_trainer = trainer.Triplet_Trainer(model,
                                         miner,
                                         loss,
                                         optimizer,
                                         scheduler,
                                         device,
                                         plotter,
                                         config.hyperparameters.margin,
                                         config.model.embedding_size,
                                         evaluation.pair_evaluate,
                                         batch_size=config.hyperparameters.batch_size)

    # Loop over epochs
    epoch = 0
    print('Training Launched.')
    while epoch < config.hyperparameters.n_epochs:

        # Validation
        for evaluator in evaluators_list:
            print('\nEvaluation on {}'.format(evaluator.test_name))
            evaluator.evaluate(model,
                               device,
                               plotter=plotter,
                               epoch=epoch)

        # Training
        print('\nTrain Epoch {}'.format(epoch))
        my_trainer.Train_Epoch(train_loader, epoch)

        # Save model
        if not (epoch + 1) % config.output.save_interval:
            model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
            print('\nSave model at {}'.format(model_file_path))

            torch.save({'epoch': epoch,
                        'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
                        'optimizer_state_dict': optimizer.state_dict(),
                        'scheduler_state_dict': scheduler.state_dict(),
                        'embedding_size': config.model.embedding_size,
                        }, model_file_path)

        epoch += 1

    # Final save.
    model_file_path = os.path.join(model_dir, 'model_{}.pth'.format(epoch))
    print('\nSave model at {}'.format(model_file_path))
    torch.save({'epoch': epoch,
                'model_state_dict': utils.state_dict_to_cpu(model.state_dict()),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'embedding_size': config.model.embedding_size,
                }, model_file_path)
    print('Finish.')

    return model
Exemplo n.º 13
0
                    type=int,
                    help='Ratio of Train BS to test BS')

args = parser.parse_args()
device = torch.device('cuda')

# torch.manual_seed(0)
# torch.cuda.manual_seed(0)
# torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
# np.random.seed(0)
torch.backends.cudnn.benchmark = True

plotter = None
if args.plot == True:
    plotter = utils.VisdomPlotter(env_name=args.name, modality=args.modality)
    plotter.argsTile(args.__dict__)

# Ready the training and validation data
dataset = dataDef.genericDataset(args.modality)
trainSampler, validationSampler = dataDef.testTrainSplit(
    len(dataset), args.valRatio)
trainLoader = torch.utils.data.DataLoader(dataset,
                                          sampler=trainSampler,
                                          batch_size=args.batchSize,
                                          num_workers=8)
valLoader = torch.utils.data.DataLoader(dataset,
                                        sampler=validationSampler,
                                        batch_size=args.trainTestBS *
                                        args.batchSize,
                                        num_workers=8)