def main(): batch_size = 20 # model = models.resnet18(pretrained=False) model = get_model("xception", pretrained=False) model = nn.Sequential(*list( model.children())[:-1]) # Remove original output layer model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1)) # xcep model = FCN(model, 2048) model.cuda() train_data = Task1_loader("./Task_1/train.csv", phase='train') test_data = Task1_loader("./Task_1/test.csv", phase='test') train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) criterion = nn.BCELoss() # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27) optimizer = optim.Adam(model.parameters(), lr=1e-4, weight_decay=1e-5) train(model, train_loader, valid_loader, criterion, optimizer, 100, device='cuda')
def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_1/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) preprocess = FaceRecog(margin=7) batch_size = 55 # model = models.resnet18(pretrained=False) model = models.resnet34(pretrained=False) clssf = BinaryClassifier(model, freeze=False) train_data = Task1_loader("./Task_1/train.csv", phase='train', preprocess=preprocess) test_data = Task1_loader("./Task_1/test.csv", phase='test', preprocess=preprocess) train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) criterion = nn.BCELoss() # optimizer = optim.SGD(model.parameters(), lr=0.0018, momentum=0.27) optimizer = optim.Adam(model.parameters(), lr=0.0018, weight_decay=0.0015) train(clssf, train_loader, valid_loader, criterion, optimizer, 10, device='cpu')
def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_2_3/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) preprocess = FaceRecog(margin=7) batch_size = 35 model = models.resnet34(pretrained=False) clssf = BinaryClassifier(model, freeze=False) train_data = Task1_loader(root_dir_train + "train.csv", phase='train', preprocess=preprocess) test_data = Task1_loader(root_dir_test + "test.csv", phase='test', preprocess=preprocess) train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) criterion = nn.BCELoss() optimizer = optim.Adam(model.parameters(), lr=0.0018, weight_decay=0.0015) load_checkpoint(model, 'checkpoints/task1_84_7.pkl', optimizer) valid_loss, accuracy = validate(model, valid_loader, criterion, 'cpu') print(f'val loss: {valid_loss:04f} ' f'val acc: {valid_acc*100:.4f}%')
def main(): root_dir_train = "./Task_1/" root_dir_test = "./Task_2_3/" train_path = os.path.join(root_dir_train, "development/") # train images test_path = os.path.join(root_dir_test, "evaluation/") # train images create_csv(root_dir_train, root_dir_test, train_path, test_path) # preprocess() batch_size = 35 model = models.resnet50(pretrained=True) clssf = BinaryClassifier(model) train_data = Task1_loader(root_dir_train + "train.csv", phase='train') test_data = Task1_loader(root_dir_test + "test.csv", phase='test') train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) valid_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=8) #criterion = nn.CrossEntropyLoss() criterion = nn.BCELoss() optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.2) # optimizer = optim.SGD(model.parameters(), lr=0.0015, momentum=0.26) train(clssf, train_loader, valid_loader, criterion, optimizer, 10, device='cpu')
model_path = os.path.join(model_dir, "scratch_resnext") model = torch.load(model_path) use_gpu = torch.cuda.is_available() if use_gpu: model = model.cuda() batch_size = 16 epochs = 500 lr = 1e-4 momentum = 0 w_decay = 1e-5 step_size = 50 gamma = 0.5 train_data = Task1_loader("./Task_1/train.csv", phase='train') test_data = Task1_loader("./Task_1/test.csv", phase='test') train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=8) val_loader = DataLoader(train_data, batch_size=batch_size, shuffle=False, num_workers=8) activation = nn.Sigmoid() criterion = nn.BCELoss() optimizer = optim.RMSprop(model.parameters(), lr=lr, momentum=momentum, weight_decay=w_decay)
y_true = y_targets y_pred = y_preds return roc_curve(y_true, y_pred) """ model = get_model("xception", pretrained=True) model = nn.Sequential(*list(model.children())[:-1]) # Remove original output layer model[0].final_block.pool = nn.Sequential(nn.AdaptiveAvgPool2d(1)) # xcep model = FCN(model, 2048) """ model = torch.load("./checkpoints/89.pkl") test_data = Task1_loader("./Task_1/test.csv", phase='test') valid_loader = DataLoader(test_data, batch_size=55, shuffle=False, num_workers=8) y_pred = [] test_y = [] with torch.no_grad(): for batch in valid_loader: # Move the validation batch to the GPU inputs = Variable(batch['X']) labels = Variable(batch['Y'])