Beispiel #1
0
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log', logpath=config.summary_dir+'/',
                        filepath=os.path.abspath(__file__), package_files=[path1, path2])

    logger.info(config)

    # load data
    train_loader, test_loader = load_pytorch(config)

    # define computational graph
    sess = tf.Session()

    model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
    trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

    trainer.train()
def main():

    config = None
    try:
        args = get_args()
        config = process_config(args.config)

        if config is None:
            raise Exception()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    logger = get_logger('log', logpath=config.summary_dir, filepath=os.path.abspath(__file__))

    train_labelled_data_loader, train_unlabelled_data_loader, test_loader = load_pytorch(config)

    model = SDNet(config.image_size, config.num_anatomical_factors, config.num_modality_factors, config.num_classes)
    print(model)
    trainer = Trainer(model, train_labelled_data_loader, train_unlabelled_data_loader, test_loader, config, logger)

    if config.train:
        trainer.train()

    if config.validation:
        trainer.resume(os.path.join(config.checkpoint_dir, 'model.pth'))

        trainer.test_epoch(debug=False)
Beispiel #3
0
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    config = None
    try:
        args = get_args()
        config = process_config(args.config)

        if config is None:
            raise Exception()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'classification/model.py')
    path2 = os.path.join(path, 'classification/train.py')
    path3 = os.path.join(path, 'regression/model.py')
    path4 = os.path.join(path, 'regression/train.py')
    logger = get_logger('log',
                        logpath=config.summary_dir + '/',
                        filepath=os.path.abspath(__file__),
                        package_files=[path1, path2, path3, path4])

    logger.info(config)

    # Define computational graph.
    sess = tf.Session()

    if config.mode == "classification":
        train_loader, test_loader = load_pytorch(config)

        model_ = ClassificationModel(config,
                                     _CLASSIFICATION_INPUT_DIM[config.dataset],
                                     len(train_loader.dataset),
                                     attack=False)
        trainer = ClassificationTrainer(sess, model_, train_loader,
                                        test_loader, config, logger)

    elif config.mode == "regression":
        train_loader, test_loader, std_train = generate_data_loader(config)
        config.std_train = std_train

        model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset],
                                 len(train_loader.dataset))
        trainer = RegressionTrainer(sess, model_, train_loader, test_loader,
                                    config, logger)

    else:
        print("Please choose either 'classification' or 'regression'.")
        raise NotImplementedError()

    # choose one of the following four functions
    # 1. train the model
    trainer.train()
Beispiel #4
0
def main():

    config = None
    try:
        args = get_args()
        config = process_config(args.config)

        if config is None:
            raise Exception()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    logger = get_logger('log',
                        logpath=config.summary_dir,
                        filepath=os.path.abspath(__file__))

    train_loader, test_loader = load_pytorch(config)

    model = PolyGNN(state_dim=128,
                    n_adj=4,
                    coarse_to_fine_steps=config.coarse_to_fine_steps,
                    get_point_annotation=False)

    trainer = Trainer(model, train_loader, test_loader, config, logger)

    if config.train:
        trainer.train()

    if config.validation:
        trainer.resume(os.path.join(config.checkpoint_dir, 'model.pth'))
        trainer.test_epoch(cur_epoch=999, plot=True)
Beispiel #5
0
def gradient_check():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    try:
        args = get_args()
        config = process_config(args.config)
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'core/model.py')
    path2 = os.path.join(path, 'core/train.py')
    logger = get_logger('log', logpath=config.summary_dir+'/',
                        filepath=os.path.abspath(__file__), package_files=[path1, path2])

    logger.info(config)

    batch_sizes = [1,4,16,32,64,128,256,512,1024]

    precon = False
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs, time.time()-start_time))
        tf.reset_default_graph()

    precon = True
    for bs in batch_sizes:
        start_time = time.time()
        print("processing batch size {}".format(bs))

        # load data
        train_loader, test_loader = load_pytorch(config)

        # define computational graph
        sess = tf.Session()

        model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset))
        trainer = Trainer(sess, model_, train_loader, test_loader, config, logger)

        trainer.grad_check(sess, bs, precon)
        print('batch size {} takes {} secs to finish'.format(
            bs, time.time()-start_time))
        tf.reset_default_graph()
Beispiel #6
0
else:
    states = torch.load(opt.ckpt)
    opt.log_dir = states['opt'].log_dir

os.makedirs('%s/gen_vis/' % opt.log_dir, exist_ok=True)

# tensorboard writer
tboard_dir = os.path.join(opt.log_dir, 'tboard')
try:
    writer = SummaryWriter(log_dir=tboard_dir)
except:
    writer = SummaryWriter(logdir=tboard_dir)

# setups starts here
# logger
logger = utils.get_logger(logpath=os.path.join(opt.log_dir, 'logs'),
                          filepath=__file__)
logger.info(opt)

# store cmd
cmd = utils.store_cmd(opt=opt)

# set seed
random.seed(opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
logger.info("[*] Random Seed: %d" % opt.seed)
# setups ends here

# setup datasets
train_data, test_data = data_utils.load_dataset(opt)
train_generator = data_utils.get_data_generator(train_data,
Beispiel #7
0
                    type=str,
                    default='',
                    help='path to model to evaluate')
parser.add_argument('--cnn_model',
                    type=str,
                    default='resnet101',
                    help='resnet101, resnet152')
parser.add_argument('--infos_path',
                    type=str,
                    default='',
                    help='path to infos to evaluate')
opts.add_eval_options(parser)

opt = parser.parse_args()

logger, log_dir = utils.get_logger(model_name=opt.id, log_path='eval_logs')

# Load infos
with open(opt.infos_path, 'rb') as f:
    infos = utils.pickle_load(f)

# override and collect parameters
replace = [
    'input_fc_dir', 'input_att_dir', 'input_box_dir', 'input_label_h5',
    'input_json', 'batch_size', 'id', 'num_cnn', 'use_mrc_feat', 'add_self',
    'frc_first'
]
ignore = ['start_from']

for k in vars(infos['opt']).keys():
    if k in replace:
Beispiel #8
0
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    config = None
    try:
        args = get_args()
        config = process_config(args.config)

        if config is None:
            raise Exception()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path1 = os.path.join(path, 'classification/model.py')
    path2 = os.path.join(path, 'classification/train.py')
    path3 = os.path.join(path, 'regression/model.py')
    path4 = os.path.join(path, 'regression/train.py')
    path5 = os.path.join(path, args.config)
    logger = get_logger('log',
                        logpath=config.summary_dir + '/',
                        filepath=os.path.abspath(__file__),
                        package_files=[path1, path2, path3, path4, path5])

    logger.info(config)

    # Define computational graph.
    sess = tf.Session()

    if config.mode == "classification":
        train_loader, test_loader = load_pytorch(config)

        model_ = ClassificationModel(config,
                                     _CLASSIFICATION_INPUT_DIM[config.dataset],
                                     len(train_loader.dataset), config.mode)
        trainer = ClassificationTrainer(sess, model_, train_loader,
                                        test_loader, config, logger)

    elif config.mode == "segmentation":
        train_loader, test_loader = load_pytorch(config)

        model_ = ClassificationModel(
            config, [config.image_size, config.image_size, 1],
            config.total_num_images * config.image_size * config.image_size,
            config.mode, config.num_classes)

        trainer = ClassificationTrainer(sess, model_, train_loader,
                                        test_loader, config, logger)

    elif config.mode == "regression":
        train_loader, test_loader, std_train = generate_data_loader(config)
        config.std_train = std_train

        model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset],
                                 len(train_loader.dataset))
        trainer = RegressionTrainer(sess, model_, train_loader, test_loader,
                                    config, logger)

    else:
        print("Please choose either 'classification' or 'regression'.")
        raise NotImplementedError()

    if config.train:
        trainer.train()

    if config.validation:
        trainer.load_checkpoint(config.checkpoint)
        trainer.test_epoch_with_misc_metrics()
def main():
    tf.set_random_seed(1231)
    np.random.seed(1231)

    config = None
    try:
        args = get_args()
        config = process_config(args.config)

        if config is None:
            raise Exception()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    makedirs(config.summary_dir)
    makedirs(config.checkpoint_dir)

    # set logger
    path = os.path.dirname(os.path.abspath(__file__))
    path3 = os.path.join(path, 'regression/model.py')
    path4 = os.path.join(path, 'regression/train.py')
    logger = get_logger('log',
                        logpath=config.summary_dir + '/',
                        filepath=os.path.abspath(__file__),
                        package_files=[path3, path4])

    logger.info(config)

    # Define computational graph
    rmse_results, ll_results = [], []
    n_runs = 10

    for i in range(1, n_runs + 1):
        sess = tf.Session()

        # Perform data splitting again with the provided seed.
        train_loader, test_loader, std_train = generate_data_loader(config,
                                                                    seed=i)
        config.std_train = std_train

        model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset],
                                 len(train_loader.dataset))
        trainer = RegressionTrainer(sess, model_, train_loader, test_loader,
                                    config, logger)

        trainer.train()

        rmse, ll = trainer.get_result()

        rmse_results.append(float(rmse))
        ll_results.append(float(ll))

        tf.reset_default_graph()

    for i, (rmse_result, ll_result) in enumerate(zip(rmse_results,
                                                     ll_results)):
        logger.info("\n## RUN {}".format(i))
        logger.info('# Test rmse = {}'.format(rmse_result))
        logger.info('# Test log likelihood = {}'.format(ll_result))

    logger.info("Results (mean/std. errors):")
    logger.info("Test rmse = {}/{}".format(np.mean(rmse_results),
                                           np.std(rmse_results) / n_runs**0.5))
    logger.info("Test log likelihood = {}/{}".format(
        np.mean(ll_results),
        np.std(ll_results) / n_runs**0.5))