def main(_): sess = tf.compat.v1.Session() model = MyModel(sess,model_configs) if args.mode == "train": x_train, y_train, _,_ = load_data(args.data_dir) model.train(x_train, y_train,200) elif args.mode == "test": # Testing on public testing dataset _, _, x_test, y_test = load_data(args.data_dir) model.evaluate(x_test, y_test) elif args.mode == "predict": # Predicting and storing results on private testing dataset x_test = load_testing_images(args.data_dir) predictions = model.predict_prob(x_test) np.save("../predictions.npy", predictions)
if __name__ == '__main__': model = MyModel(model_configs) if args.mode == 'train': print('----- training mode ----') train,test,orig_trainset = load_data(args.data_dir,train_aug=training_configs['train_augmentation']) # augment the train data with config train,valid = train_valid_split(train,orig_trainset,train_ratio=1) if args.resume_checkpoint is not None: checkpoint = torch.load('../saved_models/' + args.resume_checkpoint) epoch,accuracy_type,prev_accuracy = (checkpoint[k] for k in ['epoch','accuracy_type','accuracy']) print('RESUME---> Loading model from Epoch %d with %s Accuracy %f' %(epoch,accuracy_type,prev_accuracy)) else: checkpoint = None model.train(train, training_configs,valid=None,test=test,checkpoint=checkpoint) # note test data is used only to evaluate model performance during training model.evaluate(test) elif args.mode == 'test': # Testing on public testing dataset _, test, _ = load_data(args.data_dir,None) if args.checkpoint is not None: checkpoint = torch.load('../saved_models/' + args.checkpoint) print('Loading Model--->') else: raise('No Checkpoint file specified! Specify one with --checkpoint') model.network.load_state_dict(checkpoint['net']) test_accuracy, correct, total = model.evaluate(test) print("[%s%s test results] Model Accuracy %f, Total Correctt %d, Total Test Samples %d" %(args.checkpoint,utils.get_time(),test_accuracy,correct,total)) elif args.mode == 'predict':
parser.add_argument("--test_file", help="path to the test file") parser.add_argument("--save_dir", help="path to save the results") args = parser.parse_args() if __name__ == '__main__': model = MyModel(model_configs, training_configs) if args.mode == 'train': x_train, y_train, x_test, y_test = load_data(args.data_dir) x_train, y_train, x_valid, y_valid = train_valid_split( x_train, y_train) model.train(x_train, y_train, x_valid, y_valid) model.save_weights( os.path.join(args.save_dir, model_configs["version"], "")) model.evaluate(x_test, y_test) elif args.mode == 'test': # Testing on public testing dataset model.load_weights( os.path.join(args.save_dir, model_configs["version"], "")) _, _, x_test, y_test = load_data(args.data_dir) model.evaluate(x_test, y_test) elif args.mode == 'predict': # Predicting and storing results on private testing dataset model.load_weights( os.path.join(args.save_dir, model_configs["version"], "")) x_test = load_testing_images(args.test_file) predictions = model.predict_prob(x_test) np.save("final_pred_" + model_configs["version"] + ".npy", predictions)
model = MyModel(model_configs) # model.load() if args.mode == 'train': x_train, y_train, x_test, y_test = load_data(args.data_dir) x_train, y_train, x_valid, y_valid = train_valid_split( x_train, y_train) train_stats = model.train(x_train, y_train, training_configs, x_valid, y_valid) w = csv.writer( open( os.path.join(model_configs["save_dir"], model_configs['name']) + ".csv", "w")) for key, val in train_stats.items(): w.writerow([key, val]) score, loss = model.evaluate(x_test, y_test) print("The test score is: {:.3f}% ({:.4f})".format(score * 100, loss)) elif args.mode == 'test': model.load() # Testing on public testing dataset _, _, x_test, y_test = load_data(args.data_dir) score, loss = model.evaluate(x_test, y_test) print("The test score is: {:.3f}% ({:.4f})".format(score * 100, loss)) elif args.mode == 'predict': model.load() # Predicting and storing results on private testing dataset x_test = load_testing_images(args.data_dir) predictions = model.predict_prob(x_test) np.save(args.result_dir, predictions)