コード例 #1
0
ファイル: test.py プロジェクト: Yalda-Afshar/sigver
def main(args):
    exp_users = range(*args.exp_users)
    dev_users = range(*args.dev_users)

    assert len(
        set(exp_users).intersection(set(dev_users))) == 0, 'Exploitation set and Development set must not overlap'

    state_dict, class_weights, forg_weights = torch.load(args.model_path,
                                                                  map_location=lambda storage, loc: storage)
    device = torch.device('cuda', args.gpu_idx) if torch.cuda.is_available() else torch.device('cpu')

    print('Using device: {}'.format(device))

    base_model = models.available_models[args.model]().to(device)#.eval()

    base_model.load_state_dict(state_dict)

    def process_fn(batch):
        input = batch[0].to(device)
        return base_model(input)

    x, y, yforg, user_mapping, filenames = load_dataset(args.data_path)

    features = extract_features(x, process_fn, args.batch_size, args.input_size)

    data = (features, y, yforg)

    exp_set = get_subset(data, exp_users)
    dev_set = get_subset(data, dev_users)

    rng = np.random.RandomState(1234)

    eer_u_list = []
    eer_list = []
    all_results = []
    for _ in range(args.folds):
        classifiers, results = training.train_test_all_users(exp_set,
                                                             dev_set,
                                                             svm_type=args.svm_type,
                                                             C=args.svm_c,
                                                             gamma=args.svm_gamma,
                                                             num_gen_train=args.gen_for_train,
                                                             num_forg_from_exp=args.forg_from_exp,
                                                             num_forg_from_dev=args.forg_from_dev,
                                                             num_gen_test=args.gen_for_test,
                                                             rng=rng)
        this_eer_u, this_eer = results['all_metrics']['EER_userthresholds'], results['all_metrics']['EER']
        all_results.append(results)
        eer_u_list.append(this_eer_u)
        eer_list.append(this_eer)
    print('EER (global threshold): {:.2f} (+- {:.2f})'.format(np.mean(eer_list) * 100, np.std(eer_list) * 100))
    print('EER (user thresholds): {:.2f} (+- {:.2f})'.format(np.mean(eer_u_list) * 100, np.std(eer_u_list) * 100))

    if args.save_path is not None:
        print('Saving results to {}'.format(args.save_path))
        with open(args.save_path, 'wb') as f:
            pickle.dump(all_results, f)
    return all_results
コード例 #2
0
ファイル: train.py プロジェクト: atinesh-s/sigver
def main(args):
    # Setup logging
    logdir = pathlib.Path(args.logdir)
    if not logdir.exists():
        logdir.mkdir()

    if args.visdomport is not None:
        logger = VisdomLogger(port=args.visdomport)
    else:
        logger = None

    device = torch.device('cuda', args.gpu_idx) if torch.cuda.is_available() else torch.device('cpu')
    print('Using device: {}'.format(device))

    torch.manual_seed(args.seed)
    if torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)

    print('Loading Data')

    x, y, yforg, usermapping, filenames = util.load_dataset(args.dataset_path)
    data = util.get_subset((x, y, yforg), subset=range(*args.users))
    if not args.forg:
        data = util.remove_forgeries(data, forg_idx=2)

    train_loader, val_loader = setup_data_loaders(data, args.batch_size, args.input_size)

    print('Initializing Model')

    n_classes = len(np.unique(data[1]))

    base_model = models.available_models[args.model]().to(device)
    classification_layer = nn.Linear(base_model.feature_space_size, n_classes).to(device)
    if args.forg:
        forg_layer = nn.Linear(base_model.feature_space_size, 1).to(device)
    else:
        forg_layer = nn.Module()  # Stub module with no parameters

    if args.test:
        print('Testing')
        base_model_params, classification_params, forg_params = torch.load(args.checkpoint)
        base_model.load_state_dict(base_model_params)

        classification_layer.load_state_dict(classification_params)
        if args.forg:
            forg_layer.load_state_dict(forg_params)
        val_acc, val_loss, val_forg_acc, val_forg_loss = test(val_loader, base_model, classification_layer,
                                                              device, args.forg, forg_layer)
        if args.forg:
            print('Val loss: {:.4f}, Val acc: {:.2f}%,'
                  'Val forg loss: {:.4f}, Val forg acc: {:.2f}%'.format(val_loss,
                                                                        val_acc * 100,
                                                                        val_forg_loss,
                                                                        val_forg_acc * 100))
        else:
            print('Val loss: {:.4f}, Val acc: {:.2f}%'.format(val_loss, val_acc * 100))

    else:
        print('Training')
        train(base_model, classification_layer, forg_layer, train_loader, val_loader,
              device, logger, args, logdir)
コード例 #3
0
from sigver.preprocessing.normalize import crop_center_multiple

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Run adversarial attacks')
    parser.add_argument('--dataset-path', required=True)
    parser.add_argument('--models-path', required=True, nargs='*')
    parser.add_argument('--save-path', required=True, nargs='*')
    parser.add_argument('--users', default=None, nargs=2, type=int)
    parser.add_argument('--seed', default=1234)

    args = parser.parse_args()

    rng = np.random.RandomState(args.seed)

    # Load and split the dataset
    x, y, yforg, user_mapping, filenames = load_dataset(args.dataset_path)

    assert len(args.models_path) == len(
        args.save_path), 'Inform one save file for each model'

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    batch_size = 32
    input_size = (150, 220)
    x = crop_center_multiple(
        x, input_size
    )  # For the attacks, we will consider the inputs at size 150, 220

    all_cnn_features = []
    all_models = []
    all_classifiers = []
    all_thresholds_rbf = []
コード例 #4
0
ファイル: view_sigs.py プロジェクト: Yalda-Afshar/sigver
#!/usr/bin/env python3

import pickle
import numpy as np
import sys
import sigver.datasets.util as util
from PIL import Image

if __name__ == "__main__":
    x, y, yforg, usermapping, filenames = util.load_dataset(sys.argv[1])
    i = int(sys.argv[2])
    imgData = x[i][0]
    img = Image.fromarray(imgData, 'L')
    img.show()

    print(imgData.shape)
    print(y[i])
    print(yforg[i])

#    for i in range(150):
#        for j in range(220):
#            bit = imgData[i][j]
#            if bit not in [0, 1]:
#                print(bit)
コード例 #5
0
def main(args):
    rng = np.random.RandomState(args.seed)

    if args.test:
        assert args.checkpoint is not None, 'Please inform the checkpoint (trained model)'

    if args.logdir is None:
        logdir = get_logdir(args)
    else:
        logdir = pathlib.Path(args.logdir)
    if not logdir.exists():
        logdir.mkdir()

    print('Writing logs to {}'.format(logdir))

    device = torch.device(
        'cuda',
        args.gpu_idx) if torch.cuda.is_available() else torch.device('cpu')

    if args.port is not None:
        logger = VisdomLogger(port=args.port)
    else:
        logger = None

    print('Loading Data')
    x, y, yforg, usermapping, filenames = load_dataset(args.dataset_path)

    dev_users = range(args.dev_users[0], args.dev_users[1])
    if args.devset_size is not None:
        # Randomly select users from the dev set
        dev_users = rng.choice(dev_users, args.devset_size, replace=False)

    if args.devset_sk_size is not None:
        assert args.devset_sk_size <= len(
            dev_users), 'devset-sk-size should be smaller than devset-size'

        # Randomly select users from the dev set to have skilled forgeries (others don't)
        dev_sk_users = set(
            rng.choice(dev_users, args.devset_sk_size, replace=False))
    else:
        dev_sk_users = set(dev_users)

    print('{} users in dev set; {} users with skilled forgeries'.format(
        len(dev_users), len(dev_sk_users)))

    if args.exp_users is not None:
        val_users = range(args.exp_users[0], args.exp_users[1])
        print('Testing with users from {} to {}'.format(
            args.exp_users[0], args.exp_users[1]))
    elif args.use_testset:
        val_users = range(0, 300)
        print('Testing with Exploitation set')
    else:
        val_users = range(300, 350)

    print('Initializing model')
    base_model = models.available_models[args.model]().to(device)
    weights = base_model.build_weights(device)
    maml = MAML(base_model,
                args.num_updates,
                args.num_updates,
                args.train_lr,
                args.meta_lr,
                args.meta_min_lr,
                args.epochs,
                args.learn_task_lr,
                weights,
                device,
                logger,
                loss_function=balanced_binary_cross_entropy,
                is_classification=True)

    if args.checkpoint:
        params = torch.load(args.checkpoint)
        maml.load(params)

    if args.test:
        test_and_save(args, device, logdir, maml, val_users, x, y, yforg)
        return

    # Pretraining
    if args.pretrain_epochs > 0:
        print('Pre-training')
        data = util.get_subset((x, y, yforg), subset=range(350, 881))

        wrapped_model = PretrainWrapper(base_model, weights)

        if not args.pretrain_forg:
            data = util.remove_forgeries(data, forg_idx=2)

        train_loader, val_loader = pretrain.setup_data_loaders(
            data, 32, args.input_size)
        n_classes = len(np.unique(y))

        classification_layer = nn.Linear(base_model.feature_space_size,
                                         n_classes).to(device)
        if args.pretrain_forg:
            forg_layer = nn.Linear(base_model.feature_space_size, 1).to(device)
        else:
            forg_layer = nn.Module()  # Stub module with no parameters

        pretrain_args = argparse.Namespace(lr=0.01,
                                           lr_decay=0.1,
                                           lr_decay_times=1,
                                           momentum=0.9,
                                           weight_decay=0.001,
                                           forg=args.pretrain_forg,
                                           lamb=args.pretrain_forg_lambda,
                                           epochs=args.pretrain_epochs)
        print(pretrain_args)
        pretrain.train(wrapped_model,
                       classification_layer,
                       forg_layer,
                       train_loader,
                       val_loader,
                       device,
                       logger,
                       pretrain_args,
                       logdir=None)

    # MAML training

    trainset = MAMLDataSet(data=(x, y, yforg),
                           subset=dev_users,
                           sk_subset=dev_sk_users,
                           num_gen_train=args.num_gen,
                           num_rf_train=args.num_rf,
                           num_gen_test=args.num_gen_test,
                           num_rf_test=args.num_rf_test,
                           num_sk_test=args.num_sk_test,
                           input_shape=args.input_size,
                           test=False,
                           rng=np.random.RandomState(args.seed))

    val_set = MAMLDataSet(data=(x, y, yforg),
                          subset=val_users,
                          num_gen_train=args.num_gen,
                          num_rf_train=args.num_rf,
                          num_gen_test=args.num_gen_test,
                          num_rf_test=args.num_rf_test,
                          num_sk_test=args.num_sk_test,
                          input_shape=args.input_size,
                          test=True,
                          rng=np.random.RandomState(args.seed))

    loader = DataLoader(trainset,
                        batch_size=args.meta_batch_size,
                        shuffle=True,
                        num_workers=2,
                        collate_fn=trainset.collate_fn)

    print('Training')
    best_val_acc = 0
    with tqdm(initial=0, total=len(loader) * args.epochs) as pbar:
        if args.checkpoint is not None:
            postupdate_accs, postupdate_losses, preupdate_losses = test_one_epoch(
                maml, val_set, device, args.num_updates)

            if logger:
                for i in range(args.num_updates):
                    logger.scalar('val_postupdate_loss_{}'.format(i), 0,
                                  np.mean(postupdate_losses, axis=0)[i])

                    logger.scalar('val_postupdate_acc_{}'.format(i), 0,
                                  np.mean(postupdate_accs, axis=0)[i])

        for epoch in range(args.epochs):
            loss_weights = get_per_step_loss_importance_vector(
                args.num_updates, args.msl_epochs, epoch)

            n_batches = len(loader)
            for step, item in enumerate(loader):
                item = move_to_gpu(*item, device=device)
                maml.meta_learning_step((item[0], item[1]), (item[2], item[3]),
                                        loss_weights, epoch + step / n_batches)
                pbar.update(1)

            maml.scheduler.step()

            postupdate_accs, postupdate_losses, preupdate_losses = test_one_epoch(
                maml, val_set, device, args.num_updates)

            if logger:
                for i in range(args.num_updates):
                    logger.scalar('val_postupdate_loss_{}'.format(i),
                                  epoch + 1,
                                  np.mean(postupdate_losses, axis=0)[i])

                    logger.scalar('val_postupdate_acc_{}'.format(i), epoch + 1,
                                  np.mean(postupdate_accs, axis=0)[i])

                logger.save(logdir / 'train_curves.pickle')
            this_val_loss = np.mean(postupdate_losses, axis=0)[-1]
            this_val_acc = np.mean(postupdate_accs, axis=0)[-1]

            if this_val_acc > best_val_acc:
                best_val_acc = this_val_acc
                torch.save(maml.parameters, logdir / 'best_model.pth')
            print('Epoch {}. Val loss: {:.4f}. Val Acc: {:.2f}%'.format(
                epoch, this_val_loss, this_val_acc * 100))

    # Re-load best parameters and test with 10 folds
    params = torch.load(logdir / 'best_model.pth')
    maml.load(params)

    test_and_save(args, device, logdir, maml, val_users, x, y, yforg)
コード例 #6
0
ファイル: example.py プロジェクト: Yalda-Afshar/sigver
#canvas_size = (952, 1360)  # Maximum signature size

# If GPU is available, use it:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: {}'.format(device))

## Load and pre-process the signature
#original = img_as_ubyte(imread('data/some_signature.png', as_gray=True))
#processed = preprocess_signature(original, canvas_size)
#
## Note: the image needs to be a pytorch tensor with pixels in the range [0, 1]
#input = torch.from_numpy(processed).view(1, 1, 150, 220)
#input = input.float().div(255).to(device)

x, y, yforg, usermapping, filenames = util.load_dataset(
    "persian_1_115_150-220.npz")
i = int(sys.argv[1])
j = int(sys.argv[2])
input1 = torch.from_numpy(x[i][0]).view(1, 1, 150, 220)
input1 = input1.float().div(255).to(device)
input2 = torch.from_numpy(x[j][0]).view(1, 1, 150, 220)
input2 = input2.float().div(255).to(device)

# Load the model
#state_dict, _, _ = torch.load('pre_trained/signet_f_lambda_0.95.pth')
state_dict, _, _ = torch.load('signet_with_forgery_150-220/model_best.pth')
base_model = SigNet().to(device).eval()
base_model.load_state_dict(state_dict)

# Extract features
with torch.no_grad(
コード例 #7
0
import argparse
import os
from skimage.io import imsave

from sigver.datasets.util import load_dataset
from tqdm import tqdm

parser = argparse.ArgumentParser()
parser.add_argument('--data-path', required=True)
parser.add_argument('--save-path', required=True)

args = parser.parse_args()

data = load_dataset(args.data_path)
if not os.path.exists(args.save_path):
    os.mkdir(args.save_path)

x, _, _, _, filenames = data

for img, filename in tqdm(zip(x, filenames), total=len(x)):
    full_name = os.path.join(args.save_path, filename)
    imsave(full_name, 255 - img.squeeze())