Exemple #1
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    config = fetch_args()

    # create the experiments dirs
    create_dirs([config.tensorboard_log_dir, config.checkpoint_dir])

    print('Create the data generator.')
    train_data_loader = DataLoader(config, 'train')
    valid_data_loader = DataLoader(config, 'valid')

    print('Create the model.')
    model = Model(config)
    if config.pretrained_model_checkpoint is not None:
        model.load(config.pretrained_model_checkpoint)
    
    if config.evaluate:
        print('Predicting on test set.')
        test_data_loader = DataLoader(config, 'test')
        evaluator = Evaluator(model.model, test_data_loader, config)
        evaluator.evaluate()
        exit(0)

    print('Create the trainer')
    trainer = Trainer(model.model, train_data_loader, valid_data_loader, config)

    print('Start training the model.')
    trainer.train()
def main():
    # capture the config path from the run arguments
    # then process the JSON config file
    try:
        args = get_args()
        config = process_config_file(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # create the experiment dirs
    create_dirs([config.summary_dir, config.checkpoints_dir])
    # create tf session
    sess = tf.Session()
    # create data loader
    data = DataLoader(config)

    # create an instance of the model
    model = Model(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and pass all the components to it
    trainer = Trainer(sess, model, data, logger, config)
    # load model if it exists
    model.load(sess)
    # train the model
    trainer.train()
Exemple #3
0
def eval():
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # Get Model
    model_types = import_module('models.' + config.architecture + '_model')
    Model = getattr(model_types, 'Model')

    # create tensorflow session
    sess = tf.Session()

    # create your data generator
    data_loader = DataGenerator(config, eval_phase=True, eval_on_test_data=True)

    # create instance of the model you want
    model = Model(data_loader, config)

    # create trainer and path all previous components to it
    trainer = Trainer(sess, model, config, None, data_loader, load_best=True)

    # here we evaluate on the test dataset
    test_loss, test_cer = trainer.test(tqdm_enable=True)
    print('\nTest set Loss:', test_loss)
    print('Test set CER:', round(test_cer*100, 2), '%')
Exemple #4
0
def init() -> None:
    """
    The main function of the project used to initialise all the required classes
    used when training the model
    """
    # get input arguments
    args = get_args()
    # get static config information
    config = process_config()
    # combine both into dictionary
    config = {**config, **args}

    # initialise model
    model = VGG16(config)
    # create your data generators for each mode
    train_data = TFRecordShardLoader(config, mode="train")

    val_data = TFRecordShardLoader(config, mode="val")

    test_data = TFRecordShardLoader(config, mode="test")

    # initialise the estimator
    trainer = Trainer(config, model, train_data, val_data, test_data)

    # start training
    trainer.run()
 def __init__(self, params, session, name="GAN", mode="train"):
     self.generator = tf.make_template(name + '/generator', Generator)
     self.discriminator = tf.make_template(name + '/discriminator', Discriminator)
     Trainer.__init__(self, session, params, name=name, mode=mode)
     self.build_eval_graph()
     self.train_X = np.transpose(self.train_X, (0, 3, 1, 2))
     self.test_X = np.transpose(self.test_X, (0, 3, 1, 2))
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_devices"] = config.gpu
    import tensorflow as tf

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpu
    sess = tf.Session(config=gpuconfig)
    # create your data generator
    data = DataGenerator(config)

    # create an instance of the model you want
    model = invariant_basic(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config)
    # load model if exists
    model.load(sess)
    # here you train your model
    trainer.train()
Exemple #7
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file

    # try:
    #     args = get_args()
    #     config = process_config(args.config)

    # except:
    #     print("missing or invalid arguments")
    #     exit(0)

    config = get_config_from_json('configs/config.json')
    pdb.set_trace()
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = Model(config)
    # create your data generator
    data = DataGenerator(config)
    # create tensorboard logger
    logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = Trainer(sess, model, data, config, logger)

    # here you train your model
    trainer.train()
Exemple #8
0
def main():
    # capture the config path from the run arguments
    # then process the json configration file

    # try:
    #     args = get_args()
    #     config = process_config(args.config)

    # except:
    #     print("missing or invalid arguments")
    #     exit(0)

    config, _ = get_config_from_json('configs/config.json')
    # create the experiments dirs
    # create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
    sess = tf.Session()
    # create instance of the model you want
    model = Model(config)
    # create your data generator
    data = DataGenerator(config)
    # create tensorboard logger
    # logger = Logger(sess, config)
    # create trainer and path all previous components to it
    trainer = Trainer(sess, config, model, data)

    for epoch in range(150):
        losses, samples, likelihood, diversity = trainer.train_epoch()
        print(likelihood, diversity)
Exemple #9
0
 def __init__(self, args, worker_id, tasks, par_list):
     super().__init__()
     self.tasks = tasks
     if type(args.gpu_number) == list:
         args.device = torch.device(
             'cuda', args.gpu_number[worker_id % len(args.gpu_number)])
     else:
         args.device = torch.device('cuda', args.gpu_number)
     self.trainer = Trainer(args, worker_id=worker_id)
     self.par_list = par_list
Exemple #10
0
def main(args):
    export_root, args = setup_experiments(args)
    device = args.device
    model_checkpoint_path = os.path.join(export_root, 'models')

    dataloaders = dataloaders_factory(args)
    model = model_factory(args)

    writer = SummaryWriter(os.path.join(export_root, 'logs'))

    train_loggers = [
        MetricGraphPrinter(writer,
                           key='ce_loss',
                           graph_name='ce_loss',
                           group_name='Train'),
        MetricGraphPrinter(writer,
                           key='epoch',
                           graph_name='Epoch',
                           group_name='Train')
    ]
    val_loggers = [
        MetricGraphPrinter(writer,
                           key='mean_iou',
                           graph_name='mIOU',
                           group_name='Validation'),
        MetricGraphPrinter(writer,
                           key='acc',
                           graph_name='Accuracy',
                           group_name='Validation'),
        RecentModelLogger(model_checkpoint_path),
        BestModelLogger(model_checkpoint_path, metric_key='mean_iou'),
    ]

    # criterion = nn.CrossEntropyLoss(ignore_index=IGNORE_LABEL, weight=torch.Tensor(CLASS_WEIGHT).to(device))
    criterion = nn.CrossEntropyLoss()
    optimizer = create_optimizer(model, args)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.decay_step,
                                          gamma=args.gamma)

    trainer = Trainer(model,
                      dataloaders,
                      optimizer,
                      criterion,
                      args.epoch,
                      args,
                      num_classes=42,
                      log_period_as_iter=args.log_period_as_iter,
                      train_loggers=train_loggers,
                      val_loggers=val_loggers,
                      lr_scheduler=scheduler,
                      device=device)
    trainer.train()
    writer.close()
Exemple #11
0
def train(cfg):
    # output
    output_dir = cfg.OUTPUT_DIR
    if os.path.exists(output_dir):
        raise KeyError("Existing path: ", output_dir)
    else:
        os.makedirs(output_dir)

    with open(os.path.join(output_dir, 'config.yaml'), 'w') as f_out:
        print(cfg, file=f_out)

    # logger
    logger = make_logger("project", output_dir, 'log')

    # device
    num_gpus = 0
    if cfg.DEVICE == 'cuda':
        os.environ['CUDA_VISIBLE_DEVICES'] = cfg.DEVICE_ID
        num_gpus = len(cfg.DEVICE_ID.split(','))
        logger.info("Using {} GPUs.\n".format(num_gpus))
    cudnn.benchmark = True
    device = torch.device(cfg.DEVICE)

    # data
    train_loader, query_loader, gallery_loader, num_classes = make_loader(cfg)

    # model
    model = make_model(cfg, num_classes=num_classes)
    if num_gpus > 1:
        model = nn.DataParallel(model)

    # solver
    criterion = make_loss(cfg, num_classes)
    optimizer = make_optimizer(cfg, model)
    scheduler = make_scheduler(cfg, optimizer)

    # do_train
    trainer = Trainer(model=model,
                      optimizer=optimizer,
                      criterion=criterion,
                      logger=logger,
                      scheduler=scheduler,
                      device=device)

    trainer.run(start_epoch=0,
                total_epoch=cfg.SOLVER.MAX_EPOCHS,
                train_loader=train_loader,
                query_loader=query_loader,
                gallery_loader=gallery_loader,
                print_freq=cfg.SOLVER.PRINT_FREQ,
                eval_period=cfg.SOLVER.EVAL_PERIOD,
                out_dir=output_dir)

    print('Done.')
def train_all():
    print("Training all!")
    deep_dog = DeepDog()
    Trainer(deep_dog)

    cnn = CNN()
    Trainer(cnn)

    cnn_pool = CNNPool()
    Trainer(cnn_pool)

    cnn_dropout = CNNDropout()
    Trainer(cnn_dropout)
Exemple #13
0
def main(unused_argv):
    tf.logging.set_verbosity(3)

    try:
        config = process_config()

    except:
        exit(0)

    create_dirs(config, [config.checkpoint_dir, config.evaluate_dir, config.presentation_dir, config.summary_dir])

    session = tf.Session()
    K.set_session(session)

    if config.mode == "evaluate":
        model = Unet(config, is_evaluating=True)
        trainer = Trainer(config, None, None, model, session)

        sat_data = [Aracati.load_data(file, is_grayscale=False) for file in
                    sorted(glob.glob("./datasets/aracati/test/input/*.png"))]
        sat_data = [sat_data[i:i+1] for i in range(len(sat_data))]

        model.load(session)
        trainer.evaluate_data(sat_data, Aracati.save_data)

    else:
        data = Aracati(config)
        model = Unet(config)
        logger = Logger(config, session)
        trainer = Trainer(config, data, logger, model, session)

        if config.mode == "restore":
            model.load(session)

        trainer.train()
Exemple #14
0
def main():
    #%%directories and configs
    model_name = "RNN_points_256units_residual_preFCNN_GRID"
    SAVEDIR = os.getcwd() + "\\TEST\\" +model_name + "\\saved_model\\"
    LOGDIR =  os.getcwd() + "\\TEST\\" +model_name + "\\"
    
    data_opts = cfg.default_data_opts()
    model_config = cfg.default_model_config(data_opts, SAVEDIR)
    trainer_cfg = cfg.default_trainer_config(LOGDIR)
    #%% Load training and eval data
    train_path = os.getcwd() + "\\DATA\\Training.mat" 
    test_path = os.getcwd() + "\\DATA\\Testing.mat" 
    umx_path = os.getcwd() + "\\DATA\\UMX_spectra.mat""
    
    training_data = DataHandler(data_opts, trainer_cfg, train_path, umx_path, train_flag = True)
    test_val_data = DataHandler(data_opts, trainer_cfg, test_path, umx_path, train_flag = False, split = 0.1)
    
    #%%TRAIN AND VALIDATE==========================================================
    sess = tf.Session()
    model = RNN_Model(model_config)
    
    trainer = Trainer(sess, model, training_data, test_val_data, trainer_cfg)
    
    trainer.train()
    
    #%% RESTORE THE BEST MODEL AND TEST
    model.load(sess)
    
    features_batch, labels_batch, sp_batch, so2_batch = test_val_data.get_test_data()
                    
    input_dict = {model.NN_input_features: features_batch, 
                  model.NN_input_labels: labels_batch,
                  model.NN_input_train: False}
    
    eig_out_dict = sess.run(model.predictions, feed_dict=input_dict)
    
    #flatten the output and the data
    eig_out = eig_out_dict['eigenparams']
    out_flat = ut.flatten_data(eig_out, model_config)
    spectra_flat = ut.flatten_data(sp_batch, model_config)
    so2_labels_flat = ut.flatten_data(so2_batch, model_config)
    
    # convert to so2 and calculate error values
    so2 = ut.eigenparams_to_so2(spectra_flat, out_flat, test_val_data.umx_sp)
    abs_err_so2 = np.abs(so2 - so2_labels_flat)    
    mean_abs_err_so2 = np.mean(abs_err_so2)
    std_abs_err_so2 = np.std(abs_err_so2)
    
    print('mean so2 TEST error: %f' % mean_abs_err_so2)
Exemple #15
0
 def __init__(self, args, worker_id, iter, best_iter, best_score,
              population, finish_tasks):
     super().__init__()
     self.iter = iter
     self.best_iter = best_iter
     self.verbose = args.verbose
     self.best_score = best_score
     self.population = population
     self.finish_tasks = finish_tasks
     if type(args.gpu_number) == list:
         args.device = torch.device(
             'cuda', args.gpu_number[worker_id % len(args.gpu_number)])
     else:
         args.device = torch.device('cuda', args.gpu_number)
     self.trainer = Trainer(args, worker_id=worker_id)
Exemple #16
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("missing or invalid arguments")
        exit(0)

    # Get Model
    model_types = import_module('models.' + config.architecture + '_model')
    Model = getattr(model_types, 'Model')

    # create the experiments dirs
    if config.train_from_start:
        if os.path.exists(config.summary_dir):
            shutil.rmtree(config.summary_dir)
        if os.path.exists(config.checkpoint_dir):
            shutil.rmtree(config.checkpoint_dir)
        if os.path.exists(config.best_model_dir):
            shutil.rmtree(config.best_model_dir)
    create_dirs(
        [config.summary_dir, config.checkpoint_dir, config.best_model_dir])

    # create tensorflow session
    sess = tf.Session()

    # create your data generator
    data_loader = DataGenerator(config)

    # create instance of the model you want
    model = Model(data_loader, config)

    # create tensorboard logger
    logger = DefinedSummarizer(sess,
                               summary_dir=config.summary_dir,
                               scalar_tags=[
                                   'train/loss_per_epoch',
                                   'train/cer_per_epoch',
                                   'test/loss_per_epoch', 'test/cer_per_epoch'
                               ])

    # create trainer and path all previous components to it
    trainer = Trainer(sess, model, config, logger, data_loader)

    # here we train our model
    trainer.train()
Exemple #17
0
def main(args):
    logger = Logger()
    if args.dataset != 'func':
        loss = cross_entropy_loss
        metrics = [accuracy]
        if args.arch[:6] == 'deeper':
            model = eval(args.arch[:6].title() + args.dataset.title() + 'CNN')(
                int(args.arch[6:]))
            identifier = type(
                model).__name__ + args.arch[6:] + '_' + args.dataset + '_'
        else:
            model = eval(args.arch.title() + args.dataset.title() + 'CNN')()
            identifier = type(model).__name__ + '_' + args.dataset + '_'
        data_loader = eval(args.dataset.title() + 'Loader')(args.batch_size,
                                                            args.rand_label,
                                                            args.noise)
    else:
        loss = mse_loss
        metrics = []
        model = eval(args.arch.title() + 'FC')()
        data_loader = FunctionDataLoader(args.target_func,
                                         batch_size=args.batch_size,
                                         n_sample=1024,
                                         x_range=(0, 1))
        identifier = type(model).__name__ + '_' + args.target_func + '_'

    model.summary()
    optimizer = optim.Adam(model.parameters())
    data_loader, valid_data_loader = split_validation(data_loader,
                                                      args.validation_split)
    trainer = Trainer(model,
                      loss,
                      metrics,
                      data_loader=data_loader,
                      valid_data_loader=valid_data_loader,
                      optimizer=optimizer,
                      epochs=args.epochs,
                      logger=logger,
                      save_dir=args.save_dir,
                      save_freq=args.save_freq,
                      resume=args.resume,
                      verbosity=args.verbosity,
                      identifier=identifier,
                      with_cuda=not args.no_cuda,
                      save_grad=args.save_grad)
    trainer.train()
    print(logger)
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_devices"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    base_summary_folder = config.summary_dir
    base_exp_name = config.exp_name
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    for lr in [0.00008 * (2**i) for i in range(8)]:
        for decay in [0.6, 0.7, 0.8, 0.9]:
            config.learning_rate = lr
            config.decay_rate = decay
            config.exp_name = base_exp_name + " lr={0}_decay={1}".format(
                lr, decay)
            curr_dir = os.path.join(base_summary_folder,
                                    "lr={0}_decay={1}".format(lr, decay))
            config.summary_dir = curr_dir
            create_dirs([curr_dir])
            # create your data generator
            data = DataGenerator(config)
            gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                                       log_device_placement=False)
            gpuconfig.gpu_options.visible_device_list = config.gpu
            sess = tf.Session(config=gpuconfig)
            # create an instance of the model you want
            model = invariant_basic(config, data)
            # create trainer and pass all the previous components to it
            trainer = Trainer(sess, model, data, config)
            # here you train your model
            acc, loss = trainer.train()
            sess.close()
            tf.reset_default_graph()

    doc_utils.summary_10fold_results(config.summary_dir)
Exemple #19
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
#     import tensorflow as tf
    import torch

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    # create tensorflow session
#     gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
#     gpuconfig.gpu_options.visible_device_list = config.gpus_list
#     gpuconfig.gpu_options.allow_growth = True
#     sess = tf.Session(config=gpuconfig)
    if config.cuda:
        print(f'Using GPU : {torch.cuda.get_device_name(int(config.gpu))}')
    else:
        print(f'Using CPU')
    # create your data generator
    data = DataGenerator(config)
#     data = torch.from_numpy(data)

    # create an instance of the model you want
    model = invariant_basic(config, data)
    if config.cuda:
        model = model.cuda()
        
    for name, param in model.named_parameters():
#         if param.device.type != 'cuda':
        print(f'{name}, device type {param.device.type}')
        
    # create trainer and pass all the previous components to it
#     trainer = Trainer(sess, model, data, config)
    trainer = Trainer(model, data, config)
    # load model if exists
#     model.load(sess)
    # here you train your model
    trainer.train()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config, dataset_name='QM9')

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    print("lr = {0}".format(config.hyperparams.learning_rate))
    print("decay = {0}".format(config.hyperparams.decay_rate))
    if config.target_param is not False:  # (0 == False) while (0 is not False)
        print("target parameter: {0}".format(config.target_param))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    doc_utils.doc_used_config(config)

    # create your data generator
    data = DataGenerator(config)
    gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)
    gpuconfig.gpu_options.visible_device_list = config.gpus_list
    gpuconfig.gpu_options.allow_growth = True
    sess = tf.Session(config=gpuconfig)
    # create an instance of the model you want
    model = invariant_basic(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(sess, model, data, config)
    # here you train your model
    trainer.train()
    # test model, restore best model
    test_dists, test_loss = trainer.test(load_best_model=True)
    sess.close()
    tf.reset_default_graph()

    doc_utils.summary_qm9_results(config.summary_dir, test_dists, test_loss,
                                  trainer.best_epoch)
Exemple #21
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config)

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
    import tensorflow as tf
    import numpy as np
    tf.set_random_seed(100)
    np.random.seed(100)
    print("lr = {0}".format(config.learning_rate))
    print("decay = {0}".format(config.decay_rate))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    for exp in range(1, config.num_exp + 1):
        for fold in range(1, 11):
            print("Experiment num = {0}\nFold num = {1}".format(exp, fold))
            # create your data generator
            config.num_fold = fold
            data = DataGenerator(config)
            gpuconfig = tf.ConfigProto(allow_soft_placement=True,
                                       log_device_placement=False)
            gpuconfig.gpu_options.visible_device_list = config.gpus_list
            gpuconfig.gpu_options.allow_growth = True
            sess = tf.Session(config=gpuconfig)
            # create an instance of the model you want
            model = invariant_basic(config, data)
            # create trainer and pass all the previous components to it
            trainer = Trainer(sess, model, data, config)
            # here you train your model
            acc, loss = trainer.train()
            doc_utils.doc_results(acc, loss, exp, fold, config.summary_dir)
            sess.close()
            tf.reset_default_graph()

    doc_utils.summary_10fold_results(config.summary_dir)
 def __init__(self,
              sess,
              model,
              wgan,
              params,
              name="classifier",
              mode="train"):
     '''
     Init:
     sess: tf.Session()
     model: function that takes in input placeholder, is_training placeholder and params dictionary,
     wgan: trainer for adversarial generator.
     params: dictionary of hyperparameters for the model and training.
     '''
     self.model = model
     self.gan = wgan
     Trainer.__init__(self, sess, params, name, mode)
     self.adversarial_dir = os.path.join(self.checkpoint_dir, "adversarial")
     check_folder(self.adversarial_dir)
Exemple #23
0
def main(args):
    #Train using the PBT algorithm
    if args.pbt_training:
        Pbt_trainer(args)

    #Train SVM or RF
    elif args.SVM:
        SVM(args)
    elif args.RF:
        RF(args)

    #Get trainer and train model
    elif args.is_train:
        trainer = Trainer(args)
        trainer.train()
    else:
        #Best performing model is loaded and evaluated on the test set
        evalObj = Eval_obj(args)
        evalObj.demo()
Exemple #24
0
def main():
    try:
        args = get_args()
        config = process_config(args.config)

    except:
        print("Missing or invalid arguments")
        exit(0)

    create_dirs([config.summary_dir, config.checkpoint_dir])
    #sess = tf.Session()
    print("Loading dataset")
    data = DataLoader(config, args.preprocess)
    print("Finished loading dataset")
    data.next_batch()
    model = Biaxial(config)
    logger = Logger(sess, config)
    trainer = Trainer(sess, model, data, config, logger)
    model.load(sess)
    trainer.train()
 def __init__(self,
              sess,
              model,
              generator,
              params,
              name="classifier",
              mode="train"):
     '''
     Init:
     sess: tf.Session()
     model: function that takes in input placeholder, is_training placeholder and params dictionary,
     adv_trainer: trainer for adversarial generator, must be not None is adversarial_mode == "gan"
     returns a tensor containing the logits.
     params: dictionary of hyperparameters for the model and training.
     '''
     self.model = model
     self.generator = generator
     Trainer.__init__(self, sess, params, name, mode)
     self.adversarial_dir = os.path.join(self.checkpoint_dir, "adversarial")
     check_folder(self.adversarial_dir)
Exemple #26
0
def build_model_and_trainer(config, data_loader):
    network_builder = get_network_builder(config=config)
    backbone = network_builder.define_backbone(model_name="backbone")
    full_model = network_builder.build_full(backbone=backbone,
                                            model_name="full")
    # TODO: Finish implementing the trainer
    data_loader = Dataloader(config=config)
    trainer = Trainer(data_loader=data_loader,
                      config=config,
                      backbone=backbone,
                      full=full_model)
    return backbone, full_model, trainer
Exemple #27
0
def train(args):

    config_path = args.conf

    with open(config_path) as f:
        config = yaml.load(f, Loader=yaml.FullLoader)

    config['network']['use_cuda'] = config['network'][
        'use_cuda'] and torch.cuda.is_available()
    config['checkname'] = 'deeplab-' + str(config['network']['backbone'])

    #    torch.manual_seed(config['seed'])
    trainer = Trainer(config)

    print('Starting Epoch:', trainer.config['training']['start_epoch'])
    print('Total Epoches:', trainer.config['training']['epochs'])

    for epoch in range(trainer.config['training']['start_epoch'],
                       trainer.config['training']['epochs']):
        trainer.training(epoch)
        if not trainer.config['training'][
                'no_val'] and epoch % config['training']['val_interval'] == (
                    config['training']['val_interval'] - 1):
            trainer.validation(epoch)

    trainer.writer.close()
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config, args.dataset_name)

    except Exception as e:
        print("missing or invalid arguments {}".format(e))
        exit(0)

    # os.environ['CUDA_LAUNCH_BLOCKING'] = "1"  # TODO uncomment only for CUDA error debugging
    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu

    torch.manual_seed(100)
    np.random.seed(100)
    # torch.backends.cudnn.deterministic = True  # can impact performance
    # torch.backends.cudnn.benchmark = False  # can impact performance

    print("lr = {0}".format(config.hyperparams.learning_rate))
    print("decay = {0}".format(config.hyperparams.decay_rate))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    doc_utils.doc_used_config(config)
    for exp in range(1, config.num_exp + 1):
        for fold in range(1, 11):
            print("Experiment num = {0}\nFold num = {1}".format(exp, fold))
            # create your data generator
            config.num_fold = fold
            data = DataGenerator(config)
            # create an instance of the model you want
            model_wrapper = ModelWrapper(config, data)
            # create trainer and pass all the previous components to it
            trainer = Trainer(model_wrapper, data, config)
            # here you train your model
            trainer.train()

    doc_utils.summary_10fold_results(config.summary_dir)
Exemple #29
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config(args.config, dataset_name='QM9')

    except Exception as e:
        print("missing or invalid arguments %s" % e)
        exit(0)

    os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu

    torch.manual_seed(100)
    np.random.seed(100)
    # torch.backends.cudnn.deterministic = True  # can impact performance
    # torch.backends.cudnn.benchmark = False  # can impact performance

    print("lr = {0}".format(config.hyperparams.learning_rate))
    print("decay = {0}".format(config.hyperparams.decay_rate))
    if config.target_param is not False:  # (0 == False) while (0 is not False)
        print("target parameter: {0}".format(config.target_param))
    print(config.architecture)
    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir])
    doc_utils.doc_used_config(config)

    data = DataGenerator(config)
    # create an instance of the model you want
    model_wrapper = ModelWrapper(config, data)
    # create trainer and pass all the previous components to it
    trainer = Trainer(model_wrapper, data, config)
    # here you train your model
    trainer.train()
    # test model, restore best model
    test_dists, test_loss = trainer.test(load_best_model=True)

    doc_utils.summary_qm9_results(config.summary_dir, test_dists, test_loss,
                                  trainer.best_epoch)
def main():
    parser = ArgumentParser()
    parser.add_argument("-m", "--model", dest="model")
    parser.add_argument("-a", "--all", action="store_true", dest="all")
    args = parser.parse_args()
    print(args)
    if args.all:
        train_all()
    model = None
    if args.model:
        model = retrieve_option_model(args.model)
    else:
        print("No Model specified!")
        model = retrieve_option_model("")
    Trainer(model)