Пример #1
0
def main(config):
    model = getattr(encoders,
                    config['model_name'])(out_features=config['features'],
                                          device=device)

    if 'weights' in config['validataion']:
        model.load_state_dict(
            torch.load(config['validataion']['weights'])['state_dict'])
    else:
        load_weights(model, config['prefix'], 'model',
                     config['validataion']['epoch'])

    transforms = Transforms(input_size=config['input_size'], train=False)

    for dataset in config['validataion']['dataset']:
        print(dataset.upper())
        data_loader = DataLoader(
            insightface.Test(folder=config['validataion']['folder'],
                             dataset=dataset,
                             transforms=transforms),
            batch_size=config['validataion']['batch_size'],
            num_workers=1,
            shuffle=False)

        validate(data_loader, model, flip_image=config['validataion']['flip'])
Пример #2
0
def main(config):
    model = getattr(encoders,
                    config['model_name'])(out_features=config['features'],
                                          device=device)

    margin = getattr(margins,
                     config['margin_name'])(in_features=config['features'],
                                            out_features=config['num_classes'],
                                            device=device)

    if config['snapshot']['use']:
        load_weights(model, config['prefix'], 'model',
                     config['snapshot']['epoch'])
        load_weights(margin, config['prefix'], 'margin',
                     config['snapshot']['epoch'])

    if torch.cuda.is_available() and config['parallel']:
        model = nn.DataParallel(model)
        margin = nn.DataParallel(margin)

    if config['criterion'] == 'FocalLoss':
        criterion = FocalLoss(gamma=2)
    elif config['criterion'] == 'CrossEntropyLoss':
        criterion = nn.CrossEntropyLoss()

    if config['optimizer'] == 'SGD':
        optimizer = optim.SGD([{
            'params': model.parameters()
        }, {
            'params': margin.parameters()
        }],
                              lr=config['learning_rate'],
                              momentum=config['momentum'],
                              weight_decay=config['weight_decay'])
    elif config['optimizer'] == 'Adam':
        optimizer = optim.Adam([{
            'params': model.parameters()
        }, {
            'params': margin.parameters()
        }],
                               lr=config['learning_rate'],
                               weight_decay=config['weight_decay'])

    lr_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=config['milestones'], gamma=0.1)

    transforms = Transforms(input_size=config['input_size'], train=True)
    data_loader = DataLoader(insightface.Train(
        folder=config['train']['folder'],
        dataset=config['train']['dataset'],
        transforms=transforms),
                             batch_size=config['batch_size'],
                             num_workers=config['num_workers'],
                             shuffle=True)

    for epoch in range(
            config['snapshot']['epoch'] if config['snapshot']['use'] else 0,
            config['num_epochs']):
        print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))

        train(data_loader, model, margin, criterion, optimizer, epoch, config)
        lr_scheduler.step()

        if (epoch + 1) % config['save_freq'] == 0:
            save_weights(model, config['prefix'], 'model', epoch + 1,
                         config['parallel'])
            save_weights(margin, config['prefix'], 'margin', epoch + 1,
                         config['parallel'])
Пример #3
0
            'path': os.path.join(path_test_dir, row.path)
        } for _, row in test_dataset_paths.iterrows()
                 if int(row.frame) in config['frames']]
        test_df = pd.DataFrame(paths)

        test_loader = DataLoader(idrnd.TestAntispoofDataset(
            test_df, Transforms(input_size=config['input_size'], train=False),
            config['tta']),
                                 batch_size=config['batch_size'],
                                 num_workers=config['num_workers'],
                                 shuffle=False)

        model = getattr(encoders, config['encoder'])(device=device,
                                                     out_features=1,
                                                     pretrained=False)
        load_weights(model, config['prefix'], 'model', 'best')
        model.eval()

        with torch.no_grad():
            for batch, video, frame in test_loader:
                batch = batch.to(device)
                probability = torch.sigmoid(model(batch).view(-1))

                samples.extend(video)
                frames.extend(frame.numpy())
                probabilities.extend(probability.cpu().numpy())

    # save
    predictions = pd.DataFrame.from_dict({
        'id': samples,
        'frame': frames,
def main(config):
    model = getattr(models,
                    config['encoder'])(device=device,
                                       out_features=config['out_features'],
                                       pretrained=config['pretrained'])

    start_epoch = 0
    if config['snapshot']['use']:
        load_weights(model, config['prefix'], 'model',
                     config['snapshot']['epoch'])
        start_epoch = config['snapshot']['epoch']

    if torch.cuda.is_available() and config['parallel']:
        model = nn.DataParallel(model)

    criterion = getattr(losses, config['loss'])()
    optimizer = optim.Adam(model.parameters(), lr=config['learning_rate'])

    lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                        factor=0.5,
                                                        patience=2,
                                                        min_lr=1e-6)

    train_df, test_df = idrnd.load_dataset(config['train']['folder'],
                                           test_size=0.05)

    train_loader = DataLoader(idrnd.TrainAntispoofDataset(
        train_df, Transforms(input_size=config['input_size'], train=True)),
                              batch_size=config['batch_size'],
                              num_workers=config['num_workers'],
                              shuffle=True)

    test_loader = DataLoader(idrnd.TrainAntispoofDataset(
        test_df, Transforms(input_size=config['input_size'], train=False),
        config['tta']),
                             batch_size=config['batch_size'],
                             num_workers=config['num_workers'],
                             shuffle=False)

    thresholds = np.linspace(0.001, 0.6, num=config['thresholds'])
    best_threshold = 0.5
    best_epoch = 0
    best_score = np.inf
    best_loss = np.inf

    for epoch in range(start_epoch, config['num_epochs']):
        if epoch == 0:
            opt = optim.Adam(model.module.linear_params(),
                             lr=config['learning_rate'])
            train(train_loader, model, criterion, opt, epoch, config)
        else:
            train(train_loader, model, criterion, optimizer, epoch, config)

        loss, accuracy, score = validation(test_loader, model, criterion,
                                           thresholds)

        current_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        print(' Validation:'
              ' Time: {}'
              ' Epoch: {}'
              ' Loss: {:.4f}'.format(current_time, epoch + 1, loss))

        best_index = np.argmin(score)
        print(' Threshold: {:.4f}'
              ' Accuracy: {:.5f}'
              ' Score: {:.5f}'.format(thresholds[best_index],
                                      accuracy[best_index], score[best_index]))

        if best_loss > loss:
            best_threshold = thresholds[best_index]
            best_score = score[best_index]
            best_loss = loss
            best_epoch = epoch + 1
            save_weights(model, config['prefix'], 'model', 'best',
                         config['parallel'])

        if epoch != 0:
            lr_scheduler.step(loss)

        save_weights(model, config['prefix'], 'model', epoch + 1,
                     config['parallel'])

    print(' Best threshold: {:.4f}'
          ' Best score: {:.5f}'
          ' Best loss: {:.4f}'
          ' Best epoch: {}'.format(best_threshold, best_score, best_loss,
                                   best_epoch))
 def load_margin(self, prefix, epoch):
     load_weights(self.margin, prefix, 'margin', epoch)
 def load_model(self, prefix, epoch):
     load_weights(self.model, prefix, 'model', epoch)