Example #1
0
def main(args):
    print(torch.cuda.device_count(), 'gpus available')
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ("image", )),
    ])

    print("Reading data...")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                             train_transforms,
                                             split="train")
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           train_transforms,
                                           split="val")
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'),
                                            train_transforms,
                                            split="test")

    torch.save(
        {
            'train_dataset': train_dataset,
            'val_dataset': val_dataset,
            'test_dataset': test_dataset,
        }, os.path.join(args.data, 'datasets.pth'))
Example #2
0
def main(args):
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image",)),
        TransformByKeys(transforms.ToTensor(), ("image",)),
        TransformByKeys(transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), ("image",)),
    ])

    print("Reading data...")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'), train_transforms, split="train")
    train_dataloader = data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                       shuffle=True, drop_last=True)
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'), train_transforms, split="val")
    val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                     shuffle=False, drop_last=False)

    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    model = models.resnet18(pretrained=True)
    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)
    model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, amsgrad=True)
    loss_fn = fnn.mse_loss

    # 2. train & validate
    print("Ready for training...")
    best_val_loss = np.inf
    for epoch in range(args.epochs):
        train_loss = train(model, train_dataloader, loss_fn, optimizer, device=device)
        val_loss = validate(model, val_dataloader, loss_fn, device=device)
        print("Epoch #{:2}:\ttrain loss: {:5.2}\tval loss: {:5.2}".format(epoch, train_loss, val_loss))
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            with open(f"{args.name}_best.pth", "wb") as fp:
                torch.save(model.state_dict(), fp)

    # 3. predict
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'), train_transforms, split="test")
    test_dataloader = data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                      shuffle=False, drop_last=False)

    with open(f"{args.name}_best.pth", "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    test_predictions = predict(model, test_dataloader, device)
    with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
        pickle.dump({"image_names": test_dataset.image_names,
                     "landmarks": test_predictions}, fp)

    create_submission(args.data, test_predictions, f"{args.name}_submit.csv")
Example #3
0
def main(args):
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ("image", )),
    ])

    print("Reading data...")
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           train_transforms,
                                           split="data")
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=4,
                                     pin_memory=True,
                                     shuffle=False,
                                     drop_last=False)

    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    model = models.resnext50_32x4d(pretrained=True)
    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)
    model.to(device)

    MODEL_FILENAME = "./rexnext300_best.pth"
    with open(MODEL_FILENAME, "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    loss_fn = fnn.mse_loss

    # 2. predict for train
    print("Ready for training...")
    print(len(val_dataloader.dataset))

    accuracy = validate(model, val_dataloader, loss_fn, device=device)
    print("good div all: {:5.2}".format(accuracy))
Example #4
0
def main(args):
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        AffineAugmenter(min_scale=0.9, max_offset=0.1, rotate=True),
        BrightnessContrastAugmenter(brightness=0.3, contrast=0.3),
        BlurAugmenter(max_kernel=5),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ("image", )),
    ])
    test_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
            ("image", )),
    ])

    print("Reading data...")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                             train_transforms,
                                             split="train")
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       num_workers=4,
                                       pin_memory=True,
                                       shuffle=True,
                                       drop_last=True)
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           test_transforms,
                                           split="val")
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=4,
                                     pin_memory=True,
                                     shuffle=False,
                                     drop_last=False)

    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    model = models.resnext50_32x4d(pretrained=True)
    # for param in model.parameters():
    #     param.requires_grad = False

    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)

    # model.fc = nn.Sequential(
    #     # nn.BatchNorm1d(model.fc.in_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
    #     # nn.Linear(model.fc.in_features, model.fc.in_features, bias=True),
    #     # nn.ReLU(),
    #     nn.BatchNorm1d(model.fc.in_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
    #     nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True))

    model.to(device)

    # optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, weight_decay=0.01, amsgrad=True)
    optimizer = RAdam(model.parameters(),
                      lr=args.learning_rate)  # , weight_decay=0.01)

    optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=3)
    # optim.lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.2)

    loss_fn = fnn.mse_loss

    # 2. train & validate
    print("Ready for training...")
    best_val_loss = np.inf
    for epoch in range(args.epochs):
        train_loss = train(model,
                           train_dataloader,
                           loss_fn,
                           optimizer,
                           device=device)
        val_loss = validate(model, val_dataloader, loss_fn, device=device)
        print("Epoch #{:2}:\ttrain loss: {:5.2}\tval loss: {:5.2}".format(
            epoch, train_loss, val_loss))
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            with open(f"{args.name}_best.pth", "wb") as fp:
                torch.save(model.state_dict(), fp)
        # with open(f"{args.name}_{epoch}_{train_loss:7.4}_{val_loss:7.4}.pth", "wb") as fp:
        #     torch.save(model.state_dict(), fp)

    # 3. predict
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'),
                                            test_transforms,
                                            split="test")
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=args.batch_size,
                                      num_workers=4,
                                      pin_memory=True,
                                      shuffle=False,
                                      drop_last=False)

    with open(f"{args.name}_best.pth", "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    test_predictions = predict(model, test_dataloader, device)
    with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
        pickle.dump(
            {
                "image_names": test_dataset.image_names,
                "landmarks": test_predictions
            }, fp)

    create_submission(args.data, test_predictions, f"{args.name}_submit.csv")

    if args.draw:
        print("Drawing landmarks...")
        directory = os.path.join("result",
                                 test_dataset.image_names[0].split('.')[0])
        if not os.path.exists(directory):
            os.makedirs(directory)
        random_idxs = np.random.choice(len(test_dataset.image_names),
                                       size=1000,
                                       replace=False)
        for i, idx in enumerate(random_idxs, 1):
            image = cv2.imread(test_dataset.image_names[idx])
            image = draw_landmarks(image, test_predictions[idx])
            cv2.imwrite(os.path.join("result", test_dataset.image_names[idx]),
                        image)
Example #5
0
def main(args):
    # 1. prepare data & models
    # применение новых трансформаций не дало улучшения результатов
    # единственное изменение это параметры нормализации
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(
                mean=[
                    0.485, 0.456, 0.406
                ],  # средние значения и дисперсии взяты из документации pytorch,
                std=[0.229, 0.224, 0.225]
            ),  # с такими же значениями обучалась сеть на корпусе imagenet
            ("image", ),
        ),
    ])
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    print("Creating model...")
    model = models.resnext50_32x4d(pretrained=True)
    in_features = model.fc.in_features
    fc = nn.Sequential(nn.Linear(in_features, 2 * NUM_PTS), )  # новая "голова"
    model.fc = fc
    state_dict = None
    #  если есть сеть дообученная на датасете из контеста
    if args.pretrained_model:
        print(f"Load best_state_dict {args.pretrained_model}")
        state_dict = torch.load(args.pretrained_model)
        model.load_state_dict(state_dict)
        del state_dict

    model.to(device)
    print(model)

    factor = 0.1**(1 / 2)  # уменьшающий фактор для lr
    # оптимизатора выбран AdamW, с небольшой нормализацией весов
    optimizer = optim.AdamW(model.parameters(),
                            lr=args.learning_rate,
                            amsgrad=True,
                            weight_decay=0.05)
    loss_fn = fnn.mse_loss
    # изменения lr происходит при помощи ReduceLROnPlateau
    scheduler = ReduceLROnPlateau(
        optimizer,
        mode='min',
        patience=1,
        factor=factor,
    )

    print(loss_fn)
    print(optimizer)
    print(scheduler)

    print("Reading data...")
    print("Read train landmark dataset")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                             train_transforms,
                                             split="train")
    print("Create picture loader for test dataset")
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       num_workers=0,
                                       pin_memory=True,
                                       shuffle=True,
                                       drop_last=True)
    print("Read val landmark dataset")
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           train_transforms,
                                           split="val")
    print("Create picture loader for val dataset")
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=0,
                                     pin_memory=True,
                                     shuffle=False,
                                     drop_last=False)

    # 2. train & validate
    print("Ready for training...")
    best_val_loss = np.inf
    for epoch in range(args.epochs):
        train_loss = train(model,
                           train_dataloader,
                           loss_fn,
                           optimizer,
                           device=device)
        val_loss = validate(model, val_dataloader, loss_fn, device=device)
        print("Epoch #{:2}:\ttrain loss: {:5.5}\tval loss: {:5.5}".format(
            epoch + 1, train_loss, val_loss))
        scheduler.step(val_loss)
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            with open(f"{args.name}_best.pth", "wb") as fp:
                torch.save(model.state_dict(), fp)

    # 3. predict
    print("Start predict")
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'),
                                            train_transforms,
                                            split="test")
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=args.batch_size,
                                      num_workers=0,
                                      pin_memory=True,
                                      shuffle=False,
                                      drop_last=False)

    with open(f"{args.name}_best.pth", "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    test_predictions = predict(model, test_dataloader, device)
    with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
        pickle.dump(
            {
                "image_names": test_dataset.image_names,
                "landmarks": test_predictions
            }, fp)

    create_submission(args.data, test_predictions, f"{args.name}_submit.csv")
def main(args):
    print(torch.cuda.device_count(), 'gpus available')
    # 0. Initializing training
    if args.fold is not None:
        if args.fold_prefix is None:
            print('Please add fold-prefix to arguments')
            return
        folder_name = f'{args.name}_{args.fold_prefix // 10}{args.fold_prefix % 10}_fold{args.fold}'
        checkpoint_path = os.path.join(args.data, 'checkpoints', folder_name)
        log_path = os.path.join(args.data, 'logs', folder_name)
        if not os.path.exists(checkpoint_path):
            os.mkdir(checkpoint_path)
    else:
        for i in range(100):
            folder_name = f'{args.name}_{i // 10}{i % 10}'
            checkpoint_path = os.path.join(args.data, 'checkpoints', folder_name)
            log_path = os.path.join(args.data, 'logs', folder_name)
            if not os.path.exists(checkpoint_path):
                os.mkdir(checkpoint_path)
                break
    if args.checkpoint is None:
        training_state = {
            'best_checkpoints': [],
            'best_scores': [],
            'epoch': []
        }
    else:
        # loading checkpoint
        from_checkpoint = f'{args.name}_{args.checkpoint // 10}{args.checkpoint % 10}'
        parent_checkpoint_path = os.path.join(args.data, 'checkpoints', from_checkpoint)
        training_state = torch.load(os.path.join(parent_checkpoint_path, 'training_state.pth'))
        training_state['from_checkpoint'] = from_checkpoint
        print(f'Using checkpoint {from_checkpoint}')
    print(f'Results can be found in {folder_name}')
    writer = SummaryWriter(log_dir=log_path)

    # 1. prepare data & models
    if args.name == 'senet154':
        crop_size = 224
    else:
        crop_size = CROP_SIZE
    train_transforms = transforms.Compose([
        # HorizontalFlip(p=0.5),
        ScaleMinSideToSize((crop_size, crop_size)),
        CropCenter(crop_size),
        TransformByKeys(transforms.ToPILImage(), ('image',)),
        TransformByKeys(transforms.ToTensor(), ('image',)),
        TransformByKeys(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ('image',)),
    ])
    test_transforms = transforms.Compose([
        ScaleMinSideToSize((crop_size, crop_size)),
        CropCenter(crop_size),
        TransformByKeys(transforms.ToPILImage(), ('image',)),
        TransformByKeys(transforms.ToTensor(), ('image',)),
        TransformByKeys(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ('image',)),
    ])
    albu_transforms = albu.Compose([
                            albu.Blur(p=0.1),
                            albu.MultiplicativeNoise(p=0.1, per_channel=True),
                            albu.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=20, p=0.2),
                            albu.ChannelShuffle(p=0.2)
                       ],
                      keypoint_params=albu.KeypointParams(format='xy'))
    print('\nTransforms:')
    print(albu_transforms)
    print(train_transforms)

    print('\nReading data...')
    datasets = torch.load(os.path.join(args.data, 'datasets.pth'))
    for d in datasets:
        datasets[d].transforms = train_transforms
    if args.fold is None:
        print('Using predefined data split')
        train_dataset = datasets['train_dataset']
        val_dataset = datasets['val_dataset']
    else:
        print(f'Using fold {args.fold}')
        train_dataset = FoldDatasetDataset(datasets['train_dataset'], datasets['val_dataset'], train_transforms,
                           albu_transforms, split='train', fold=args.fold, seed=42)
        val_dataset = FoldDatasetDataset(datasets['train_dataset'], datasets['val_dataset'], train_transforms,
                           None, split='val', fold=args.fold, seed=42)

    test_dataset = datasets['test_dataset']
    test_dataset.transforms = test_transforms

    train_dataloader = data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=32, pin_memory=True,
                                       shuffle=True, drop_last=True)
    val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=32, pin_memory=True,
                                     shuffle=False, drop_last=False)

    print('Creating model...')
    device = torch.device('cuda: 0') if args.gpu else torch.device('cpu')
    if args.name == 'senet154':
        model = pretrainedmodels.senet154(num_classes=1000, pretrained='imagenet')
        model.last_linear = nn.Linear(model.last_linear.in_features, 2 * NUM_PTS, bias=True)
    else:
        model = models.resnext50_32x4d(pretrained=True)
        model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)
    model = nn.DataParallel(model)
    print(f'Using {torch.cuda.device_count()} gpus')
    if args.checkpoint is not None:
        model.load_state_dict(training_state['best_checkpoints'][0])
    model.to(device)

    # optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, nesterov=True)
    optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    # optimizer = RAdam(model.parameters(), lr=args.learning_rate)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5, factor=0.5)
    # scheduler = None
    print(f'Optimizer: {optimizer}')
    print(f'Scheduler: {scheduler}')
    loss_fn = fnn.mse_loss

    # 2. train & validate
    print('Ready for training...')
    if args.checkpoint is None:
        start_epoch = 0
        best_val_loss = np.inf
    else:
        start_epoch = training_state['epoch'][0]
        best_val_loss = training_state['best_scores'][0]

    for epoch in range(start_epoch, start_epoch + args.epochs):
        train_loss = train(model, train_dataloader, loss_fn, optimizer, device, writer, epoch)
        val_loss = validate(model, val_dataloader, loss_fn, device, writer, epoch, scheduler)
        print('Epoch #{:2}:\ttrain loss: {:5.2}\tval loss: {:5.2}'.format(epoch, train_loss, val_loss))
        print(f'Learning rate = {optimizer.param_groups[0]["lr"]}')
        if len(training_state['best_scores']) == 0:
            training_state['best_checkpoints'].append(model.state_dict())
            training_state['best_scores'].append(val_loss)
            training_state['epoch'].append(epoch)
            with open(os.path.join(checkpoint_path, 'training_state.pth'), 'wb') as fp:
                torch.save(training_state, fp)
        elif len(training_state['best_scores']) < 3 or val_loss < training_state['best_scores'][-1]:
            cur_val_index = 0
            for cur_val_index in range(len(training_state['best_scores'])):
                if val_loss < training_state['best_scores'][cur_val_index]:
                    break
            training_state['best_scores'].insert(cur_val_index, val_loss)
            training_state['best_checkpoints'].insert(cur_val_index, model.state_dict())
            training_state['epoch'].insert(cur_val_index, epoch)
            if len(training_state['best_scores']) > 3:
                training_state['best_scores'] = training_state['best_scores'][:3]
                training_state['best_checkpoints'] = training_state['best_checkpoints'][:3]
                training_state['epoch'] = training_state['epoch'][:3]
            with open(os.path.join(checkpoint_path, 'training_state.pth'), 'wb') as fp:
                torch.save(training_state, fp)
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            with open(os.path.join(checkpoint_path, f'{args.name}_best.pth'), 'wb') as fp:
                torch.save(model.state_dict(), fp)
    print('Training finished')
    print(f'Min val loss = {training_state["best_scores"]} at epoch {training_state["epoch"]}')
    print()

    # 3. predict
    test_dataloader = data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=16, pin_memory=True,
                                      shuffle=False, drop_last=False)

    with open(os.path.join(checkpoint_path, f'{args.name}_best.pth'), 'rb') as fp:
        best_state_dict = torch.load(fp, map_location='cpu')
        model.load_state_dict(best_state_dict)

    test_predictions = predict(model, test_dataloader, device)
    with open(os.path.join(checkpoint_path, f'{args.name}_test_predictions.pkl'), 'wb') as fp:
        pickle.dump({'image_names': test_dataset.image_names,
                     'landmarks': test_predictions}, fp)

    create_submission(args.data, test_predictions, os.path.join(checkpoint_path, f'{args.name}_submit.csv'))
def main(args):

    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.39963884, 0.31994772, 0.28253724],
                                 std=[0.33419772, 0.2864468, 0.26987]),
            ("image", )),
    ])

    print("Reading data...")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                             train_transforms,
                                             split="train")
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       num_workers=4,
                                       pin_memory=True,
                                       shuffle=True,
                                       drop_last=True)
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           train_transforms,
                                           split="val")
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=4,
                                     pin_memory=True,
                                     shuffle=False,
                                     drop_last=False)

    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")

    #    model = models.wide_resnet101_2(pretrained=True)
    #     fc_layers = nn.Sequential(
    #                 nn.Linear(model.fc.in_features, model.fc.in_features),
    #                 nn.ReLU(inplace=True),
    #                 nn.Dropout(p=0.1),
    #                 nn.Linear(model.fc.in_features,  2 * NUM_PTS),
    #                 nn.ReLU(inplace=True),
    #                 nn.Dropout(p=0.1))
    #     model.fc = fc_layers

    model = models.resnext101_32x8d(pretrained=True)

    #   Uncomment for learning with freezed feature extractor
    #     for param in model.parameters():
    #         param.requires_grad = False

    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)

    if args.checkpoint is not None:
        model.load_state_dict(torch.load(args.checkpoint))
        print('PRETRAIDED LOADED')

    model.to(device)
    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           amsgrad=True)
    loss_fn = fnn.mse_loss
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)

    # 2. train & validate
    writer = SummaryWriter()

    print("Ready for training...")
    best_val_loss = np.inf
    for epoch in range(args.epochs):

        train_loss = train(model,
                           train_dataloader,
                           loss_fn,
                           optimizer,
                           device=device,
                           epoch=epoch,
                           writer=writer)

        val_loss = validate(model,
                            val_dataloader,
                            loss_fn,
                            device=device,
                            epoch=epoch,
                            writer=writer)
        # if epoch > 0:
        scheduler.step()
        print(f"EPOCH {epoch} \n")

        print("Epoch #{:2}:\ttrain loss: {:5.2}\tval loss: {:5.2}".format(
            epoch, train_loss, val_loss))
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            with open(f"{args.name}_best.pth", "wb") as fp:
                torch.save(model.state_dict(), fp)

    # 3. predict
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'),
                                            train_transforms,
                                            split="test")
    test_dataloader = data.DataLoader(test_dataset,
                                      batch_size=args.batch_size,
                                      num_workers=4,
                                      pin_memory=True,
                                      shuffle=False,
                                      drop_last=False)

    with open(f"{args.name}_best.pth", "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    test_predictions = predict(model, test_dataloader, device)
    with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
        pickle.dump(
            {
                "image_names": test_dataset.image_names,
                "landmarks": test_predictions
            }, fp)

    create_submission(args.data, test_predictions, f"{args.name}_submit.csv")
Example #8
0
def main(args):
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]), ("image", )),
    ])

    test_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image", )),
        TransformByKeys(transforms.ToTensor(), ("image", )),
        TransformByKeys(
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225]), ("image", )),
    ])

    print(datetime.datetime.now())
    print("Reading data...")
    train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                             train_transforms,
                                             split="train")
    train_dataloader = data.DataLoader(train_dataset,
                                       batch_size=args.batch_size,
                                       num_workers=4,
                                       pin_memory=True,
                                       shuffle=True,
                                       drop_last=True)
    val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'),
                                           test_transforms,
                                           split="val")
    val_dataloader = data.DataLoader(val_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=4,
                                     pin_memory=True,
                                     shuffle=False,
                                     drop_last=False)

    print(datetime.datetime.now())
    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    model = torch.hub.load(
        'facebookresearch/WSL-Images', 'resnext101_32x8d_wsl'
    )  # models.resnext50_32x4d(pretrained=True) # resnet18(pretrained=True)
    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)

    if os.path.isfile(f"{args.name}_best.pth"):
        print("Loading saved model " + f"{args.name}_best.pth")
        with open(f"{args.name}_best.pth", "rb") as fp:
            best_state_dict = torch.load(fp, map_location="cpu")
            model.load_state_dict(best_state_dict)

    model.to(device)

    optimizer = optim.Adam(model.parameters(),
                           lr=args.learning_rate,
                           amsgrad=True)
    loss_fn = fnn.l1_loss  # WingLoss() #fnn.mse_loss
    loss_val = fnn.mse_loss

    # 2. train & validate
    print("Ready for training...")
    best_val_loss = np.inf
    for epoch in range(args.epochs):
        train_loss = train(model,
                           train_dataloader,
                           loss_fn,
                           optimizer,
                           device=device)
        val_loss = validate(model, val_dataloader, loss_fn, device=device)
        val_loss_mse = validate(model, val_dataloader, loss_val, device=device)
        print(
            "Epoch #{:2}:\ttrain loss: {:5.4}\tval loss: {:5.4}\tval mse: {:5.4}"
            .format(epoch, train_loss, val_loss, val_loss_mse))

        if 1 == 1:  #val_loss < best_val_loss: #save results of all epoch to check several at kaggle
            best_val_loss = val_loss
            with open(f"{args.name}_best.pth", "wb") as fp:
                torch.save(model.state_dict(), fp)

            with open(f"{args.name}_" + str(epoch) + ".pth", "wb") as fp:
                torch.save(model.state_dict(), fp)

            # 3. predict
            print('Predict')
            test_dataset = ThousandLandmarksDataset(os.path.join(
                args.data, 'test'),
                                                    test_transforms,
                                                    split="test")
            test_dataloader = data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              num_workers=4,
                                              pin_memory=True,
                                              shuffle=False,
                                              drop_last=False)

            #with open(f"{args.name}_best.pth", "rb") as fp:
            #    best_state_dict = torch.load(fp, map_location="cpu")
            #    model.load_state_dict(best_state_dict)

            test_predictions = predict(model, test_dataloader, device)
            with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
                pickle.dump(
                    {
                        "image_names": test_dataset.image_names,
                        "landmarks": test_predictions
                    }, fp)

            create_submission(args.data, test_predictions,
                              f"{args.name}_submit_" + str(epoch) + ".csv")
def main(args):
    # 1. prepare data & models
    train_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        Cutout(10),
        RandomBlur(),
        TransformByKeys(transforms.ToPILImage(), ("image",)),
        TransformByKeys(transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.02), ("image",)),
        TransformByKeys(transforms.ToTensor(), ("image",)),
        TransformByKeys(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ("image",)),
    ])

    val_transforms = transforms.Compose([
        ScaleMinSideToSize((CROP_SIZE, CROP_SIZE)),
        CropCenter(CROP_SIZE),
        TransformByKeys(transforms.ToPILImage(), ("image",)),
        TransformByKeys(transforms.ToTensor(), ("image",)),
        TransformByKeys(transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ("image",)),
    ])

    print("Creating model...")
    device = torch.device("cuda: 0") if args.gpu else torch.device("cpu")
    model = models.resnet50(pretrained=True)

    if args.freeze > 0:
        ct = 0
        for child in model.children():
            ct += 1
            if ct <= args.freeze + 4:
                for param in child.parameters():
                    param.requires_grad = False

    model.fc = nn.Linear(model.fc.in_features, 2 * NUM_PTS, bias=True)

    startEpoch = args.cont
    if startEpoch > 0:
        with open(f"{args.name}_best_{startEpoch}.pth", "rb") as fp:
            best_state_dict = torch.load(fp, map_location="cpu")
            model.load_state_dict(best_state_dict)

    model.to(device)

    if args.test:
        val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'), val_transforms, split="train")
        val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                         shuffle=False, drop_last=False)
        val_loss_fn = fnn.mse_loss

        val_full = validate_full(model, val_dataloader, val_loss_fn, device=device)

        res = dict(sorted(val_full.items(), key=lambda x: x[1], reverse=True)[:100])
        js = json.dumps(res)
        with open(f"{args.name}.json", "w") as f:
            f.write(js)
        print(res)
        return

    if not args.predict:
        print("Reading data...")
        train_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'), train_transforms, split="train")
        train_dataloader = data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                           shuffle=True, drop_last=True)
        val_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'train'), val_transforms, split="val")
        val_dataloader = data.DataLoader(val_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                         shuffle=False, drop_last=False)

        optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=0.0001, nesterov=True)
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True)
        train_loss_fn = nn.SmoothL1Loss(reduction="mean")
        val_loss_fn = fnn.mse_loss

        # 2. train & validate
        print("Ready for training...")
        best_val_loss = np.inf
        for epoch in range(startEpoch, args.epochs):
            train_loss = train(model, train_dataloader, train_loss_fn, optimizer, device=device)
            val_loss = validate(model, val_dataloader, val_loss_fn, device=device)
            scheduler.step(val_loss)
            print("Epoch #{:2}:\ttrain loss: {:.5f}\tval loss: {:.5f}".format(epoch, train_loss, val_loss))
            with open(f"{args.name}_res.txt", 'a+') as file:
                file.write("Epoch #{:2}:\ttrain loss: {:.5f}\tval loss: {:.5f}\n".format(epoch, train_loss, val_loss))

            if val_loss < best_val_loss:
                best_val_loss = val_loss
                with open(f"{args.name}_best.pth", "wb") as fp:
                    torch.save(model.state_dict(), fp)

            if epoch > startEpoch and epoch % 5 == 0:
                best_val_loss = val_loss
                with open(f"{args.name}_best_{epoch}.pth", "wb") as fp:
                    torch.save(model.state_dict(), fp)

    # 3. predict
    test_dataset = ThousandLandmarksDataset(os.path.join(args.data, 'test'), val_transforms, split="test")
    test_dataloader = data.DataLoader(test_dataset, batch_size=args.batch_size, num_workers=4, pin_memory=True,
                                      shuffle=False, drop_last=False)

    with open(f"{args.name}_best.pth", "rb") as fp:
        best_state_dict = torch.load(fp, map_location="cpu")
        model.load_state_dict(best_state_dict)

    for layer in model.modules():
        layer.eval()

    test_predictions = predict(model, test_dataloader, device)
    with open(f"{args.name}_test_predictions.pkl", "wb") as fp:
        pickle.dump({"image_names": test_dataset.image_names,
                     "landmarks": test_predictions}, fp)

    create_submission(args.data, test_predictions, f"{args.name}_submit.csv")