def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_1/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) preprocess = FaceRecog(margin=7) batch_size = 55 # model = models.resnet18(pretrained=False) model = models.resnet34(pretrained=False) clssf = BinaryClassifier(model, freeze=False) train_data = Task1_loader("./Task_1/train.csv", phase='train', preprocess=preprocess) test_data = Task1_loader("./Task_1/test.csv", phase='test', preprocess=preprocess) train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) criterion = nn.BCELoss() # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27) optimizer = optim.Adam(model.parameters(), lr=0.0018, weight_decay=0.0015) train(clssf, train_loader, valid_loader, criterion, optimizer, 10, device='cpu')
def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_2_3/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) preprocess = FaceRecog(margin=7) batch_size = 35 model = models.resnet34(pretrained=False) clssf = BinaryClassifier(model, freeze=False) train_data = Task1_loader(root_dir_train + "train.csv", phase='train', preprocess=preprocess) test_data = Task1_loader(root_dir_test + "test.csv", phase='test', preprocess=preprocess) train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.0018, weight_decay=0.0015) load_checkpoint(model, 'checkpoints/task1_84_7.pkl', optimizer) valid_loss, accuracy = validate(model, valid_loader, criterion, 'cpu') print(f'val loss: {valid_loss:04f} ' f'val acc: {valid_acc*100:.4f}%')
def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_2_3/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) # preprocess() batch_size = 35 model = models.resnet50(pretrained=True) clssf = BinaryClassifier(model) train_data = Task1_loader(root_dir_train + "train.csv", phase='train') test_data = Task1_loader(root_dir_test + "test.csv", phase='test') train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) #criterion = nn.CrossEntropyLoss() criterion = nn.BCELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.2) # optimizer = optim.SGD(model.parameters(), lr=0.0015, momentum=0.26) train(clssf, train_loader, valid_loader, criterion, optimizer, 10, device='cpu')
'Missing arguments, you should set:\ - The model unique identifier\ - The directory to store all generated outputs\ - The training file\ Received: ' + ' '.join(sys.argv) ] # Add the provided unique id. if len(sys.argv) > 1: result['modelid'] = sys.argv[1] print(json.dumps(result)) sys.exit(result['status']) modelid = sys.argv[1] directory = sys.argv[2] # Sklearn binary classifier - logistic regression. binary_classifier = BinaryClassifier(modelid, directory) # TensorFlow binary classifier - NN. #binary_classifier = BinaryClassifierTensorFlow(modelid, directory) # TensorFlow binary classifier - logistic regression. #binary_classifier = BinaryClassifierSkflow(modelid, directory) # TensorFlow binary classifier - deep neural network. #binary_classifier = BinaryClassifierDNN(modelid, directory) result = binary_classifier.train_dataset(sys.argv[3]) print(json.dumps(result)) sys.exit(result['status'])
- The minimum deviation to accept the model as valid (defaults to 0.02)\ - The number of times the evaluation will run (defaults to 100)\ Received: ' + ' '.join(sys.argv) ] # Add the provided unique id. if len(sys.argv) > 1: result['modelid'] = sys.argv[1] print(json.dumps(result)) sys.exit(result['status']) modelid = sys.argv[1] directory = sys.argv[2] # Sklearn binary classifier - logistic regression. binary_classifier = BinaryClassifier(modelid, directory) # TensorFlow binary classifier - NN. #binary_classifier = BinaryClassifierTensorFlow(modelid, directory) # TensorFlow binary classifier - logistic regression. #binary_classifier = BinaryClassifierSkflow(modelid, directory) # TensorFlow binary classifier - deep neural network. #binary_classifier = BinaryClassifierDNN(modelid, directory) result = binary_classifier.evaluate_dataset(sys.argv[3], float(sys.argv[4]), float(sys.argv[5]), int(sys.argv[6])) print(json.dumps(result)) sys.exit(result['status'])
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from BinaryClassifier import BinaryClassifier train = pd.read_csv("./Dataset/train.csv").drop(["PassengerId", "Name", "Ticket", "Embarked", "Cabin"], axis=1) test = pd.read_csv("./Dataset/test.csv").drop(["PassengerId", "Name", "Ticket", "Embarked", "Cabin"], axis=1) train_labels = train["Survived"] def gender_to_int(sample): return int(sample == "male") train_gender = [[gender_to_int(sample)] for sample in train["Sex"].values] test_gender = [[gender_to_int(sample)] for sample in test["Sex"].values] train_features = np.concatenate([train.drop(["Sex"], axis=1).values, train_gender], axis=1) test_features = np.concatenate([test.drop(["Sex"], axis=1).values, test_gender], axis=1) train_ds = tf.data.Dataset.from_tensor_slices((train_features, train_labels)).shuffle(100) val_ds = train_ds.take(200).batch(200) train_ds = train_ds.skip(200).batch(64) test_ds = tf.data.Dataset.from_tensor_slices((test_features)).batch(64) model = BinaryClassifier([8, 4, 1]) model.compile(tf.keras.optimizers.Adadelta(0.0001), tf.keras.losses.MeanSquaredError(), [tf.keras.metrics.Accuracy()]) model.fit(train_ds.take(1), epochs=20, verbose=2, validation_data=val_ds) #for sample in train_ds.take(1): # print(sample)
TEST_IMAGES = ['./images/5.jpg', './images/6.jpg'] TEST_TRUTHS = ['./images/5_mask.jpg', './images/6_mask.jpg'] # Change this to run a different pipeline RUN_PIPELINE = 2 if RUN_PIPELINE == 1: # Pipeline 1 ## This trains on the train images with 3 gaussians, a threshold of 0.5 and then ## tests the model on the test images. print("Running pipeline 1") APPLE_CLASSIFIER = BinaryClassifier(gabor=False, log=True) APPLE_CLASSIFIER.load_train_images(TRAIN_IMAGES, TRAIN_TRUTHS, display=False) APPLE_CLASSIFIER.train(k=3) APPLE_CLASSIFIER.set_threshold(0.5) APPLE_CLASSIFIER.test(TEST_IMAGES, truths=TEST_TRUTHS, display=True) elif RUN_PIPELINE == 2: # Pipeline 2 ## This tunes the model over a variable number of gaussians (max 5) and threshold ## (between 0.01 and 0.99 in steps of 0.01), finds the optimum parameters and ## trains the model on these. Finally, the model is tested against the test images. print("Running pipeline 2")