def main(): tf.set_random_seed(1231) np.random.seed(1231) try: args = get_args() config = process_config(args.config) except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) # set logger path = os.path.dirname(os.path.abspath(__file__)) path1 = os.path.join(path, 'core/model.py') path2 = os.path.join(path, 'core/train.py') logger = get_logger('log', logpath=config.summary_dir+'/', filepath=os.path.abspath(__file__), package_files=[path1, path2]) logger.info(config) # load data train_loader, test_loader = load_pytorch(config) # define computational graph sess = tf.Session() model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = Trainer(sess, model_, train_loader, test_loader, config, logger) trainer.train()
def main(): config = None try: args = get_args() config = process_config(args.config) if config is None: raise Exception() except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) logger = get_logger('log', logpath=config.summary_dir, filepath=os.path.abspath(__file__)) train_labelled_data_loader, train_unlabelled_data_loader, test_loader = load_pytorch(config) model = SDNet(config.image_size, config.num_anatomical_factors, config.num_modality_factors, config.num_classes) print(model) trainer = Trainer(model, train_labelled_data_loader, train_unlabelled_data_loader, test_loader, config, logger) if config.train: trainer.train() if config.validation: trainer.resume(os.path.join(config.checkpoint_dir, 'model.pth')) trainer.test_epoch(debug=False)
def main(): tf.set_random_seed(1231) np.random.seed(1231) config = None try: args = get_args() config = process_config(args.config) if config is None: raise Exception() except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) # set logger path = os.path.dirname(os.path.abspath(__file__)) path1 = os.path.join(path, 'classification/model.py') path2 = os.path.join(path, 'classification/train.py') path3 = os.path.join(path, 'regression/model.py') path4 = os.path.join(path, 'regression/train.py') logger = get_logger('log', logpath=config.summary_dir + '/', filepath=os.path.abspath(__file__), package_files=[path1, path2, path3, path4]) logger.info(config) # Define computational graph. sess = tf.Session() if config.mode == "classification": train_loader, test_loader = load_pytorch(config) model_ = ClassificationModel(config, _CLASSIFICATION_INPUT_DIM[config.dataset], len(train_loader.dataset), attack=False) trainer = ClassificationTrainer(sess, model_, train_loader, test_loader, config, logger) elif config.mode == "regression": train_loader, test_loader, std_train = generate_data_loader(config) config.std_train = std_train model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = RegressionTrainer(sess, model_, train_loader, test_loader, config, logger) else: print("Please choose either 'classification' or 'regression'.") raise NotImplementedError() # choose one of the following four functions # 1. train the model trainer.train()
def main(): config = None try: args = get_args() config = process_config(args.config) if config is None: raise Exception() except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) logger = get_logger('log', logpath=config.summary_dir, filepath=os.path.abspath(__file__)) train_loader, test_loader = load_pytorch(config) model = PolyGNN(state_dim=128, n_adj=4, coarse_to_fine_steps=config.coarse_to_fine_steps, get_point_annotation=False) trainer = Trainer(model, train_loader, test_loader, config, logger) if config.train: trainer.train() if config.validation: trainer.resume(os.path.join(config.checkpoint_dir, 'model.pth')) trainer.test_epoch(cur_epoch=999, plot=True)
def gradient_check(): tf.set_random_seed(1231) np.random.seed(1231) try: args = get_args() config = process_config(args.config) except: print("Add a config file using \'--config file_name.json\'") exit(1) # set logger path = os.path.dirname(os.path.abspath(__file__)) path1 = os.path.join(path, 'core/model.py') path2 = os.path.join(path, 'core/train.py') logger = get_logger('log', logpath=config.summary_dir+'/', filepath=os.path.abspath(__file__), package_files=[path1, path2]) logger.info(config) batch_sizes = [1,4,16,32,64,128,256,512,1024] precon = False for bs in batch_sizes: start_time = time.time() print("processing batch size {}".format(bs)) # load data train_loader, test_loader = load_pytorch(config) # define computational graph sess = tf.Session() model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = Trainer(sess, model_, train_loader, test_loader, config, logger) trainer.grad_check(sess, bs, precon) print('batch size {} takes {} secs to finish'.format( bs, time.time()-start_time)) tf.reset_default_graph() precon = True for bs in batch_sizes: start_time = time.time() print("processing batch size {}".format(bs)) # load data train_loader, test_loader = load_pytorch(config) # define computational graph sess = tf.Session() model_ = Model(config, _INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = Trainer(sess, model_, train_loader, test_loader, config, logger) trainer.grad_check(sess, bs, precon) print('batch size {} takes {} secs to finish'.format( bs, time.time()-start_time)) tf.reset_default_graph()
def main(): tf.set_random_seed(1231) np.random.seed(1231) config = None try: args = get_args() config = process_config(args.config) if config is None: raise Exception() except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) # set logger path = os.path.dirname(os.path.abspath(__file__)) path1 = os.path.join(path, 'classification/model.py') path2 = os.path.join(path, 'classification/train.py') path3 = os.path.join(path, 'regression/model.py') path4 = os.path.join(path, 'regression/train.py') path5 = os.path.join(path, args.config) logger = get_logger('log', logpath=config.summary_dir + '/', filepath=os.path.abspath(__file__), package_files=[path1, path2, path3, path4, path5]) logger.info(config) # Define computational graph. sess = tf.Session() if config.mode == "classification": train_loader, test_loader = load_pytorch(config) model_ = ClassificationModel(config, _CLASSIFICATION_INPUT_DIM[config.dataset], len(train_loader.dataset), config.mode) trainer = ClassificationTrainer(sess, model_, train_loader, test_loader, config, logger) elif config.mode == "segmentation": train_loader, test_loader = load_pytorch(config) model_ = ClassificationModel( config, [config.image_size, config.image_size, 1], config.total_num_images * config.image_size * config.image_size, config.mode, config.num_classes) trainer = ClassificationTrainer(sess, model_, train_loader, test_loader, config, logger) elif config.mode == "regression": train_loader, test_loader, std_train = generate_data_loader(config) config.std_train = std_train model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = RegressionTrainer(sess, model_, train_loader, test_loader, config, logger) else: print("Please choose either 'classification' or 'regression'.") raise NotImplementedError() if config.train: trainer.train() if config.validation: trainer.load_checkpoint(config.checkpoint) trainer.test_epoch_with_misc_metrics()
def main(): tf.set_random_seed(1231) np.random.seed(1231) config = None try: args = get_args() config = process_config(args.config) if config is None: raise Exception() except: print("Add a config file using \'--config file_name.json\'") exit(1) makedirs(config.summary_dir) makedirs(config.checkpoint_dir) # set logger path = os.path.dirname(os.path.abspath(__file__)) path3 = os.path.join(path, 'regression/model.py') path4 = os.path.join(path, 'regression/train.py') logger = get_logger('log', logpath=config.summary_dir + '/', filepath=os.path.abspath(__file__), package_files=[path3, path4]) logger.info(config) # Define computational graph rmse_results, ll_results = [], [] n_runs = 10 for i in range(1, n_runs + 1): sess = tf.Session() # Perform data splitting again with the provided seed. train_loader, test_loader, std_train = generate_data_loader(config, seed=i) config.std_train = std_train model_ = RegressionModel(config, _REGRESSION_INPUT_DIM[config.dataset], len(train_loader.dataset)) trainer = RegressionTrainer(sess, model_, train_loader, test_loader, config, logger) trainer.train() rmse, ll = trainer.get_result() rmse_results.append(float(rmse)) ll_results.append(float(ll)) tf.reset_default_graph() for i, (rmse_result, ll_result) in enumerate(zip(rmse_results, ll_results)): logger.info("\n## RUN {}".format(i)) logger.info('# Test rmse = {}'.format(rmse_result)) logger.info('# Test log likelihood = {}'.format(ll_result)) logger.info("Results (mean/std. errors):") logger.info("Test rmse = {}/{}".format(np.mean(rmse_results), np.std(rmse_results) / n_runs**0.5)) logger.info("Test log likelihood = {}/{}".format( np.mean(ll_results), np.std(ll_results) / n_runs**0.5))