Beispiel #1
0
def train(lr, l2, momentum, patience, latent_size, n_hidden, hidden_size, n_frames, model, ncoef, dropout_prob, epochs, batch_size, n_workers, cuda, train_hdf_file, valid_hdf_file, valid_n_cycles, cp_path, softmax):

	if cuda:
		device=get_freer_gpu()

	train_dataset=Loader_test(hdf5_name=train_hdf_file, max_nb_frames=int(n_frames))
	train_loader=torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=False, num_workers=n_workers, worker_init_fn=set_np_randomseed)

	valid_dataset = Loader(hdf5_name = valid_hdf_file, max_nb_frames = int(n_frames), n_cycles=valid_n_cycles)
	valid_loader=torch.utils.data.DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, num_workers=n_workers, worker_init_fn=set_np_randomseed)

	if args.model == 'resnet_stats':
		model = model_.ResNet_stats(n_z=int(latent_size), nh=int(n_hidden), n_h=int(hidden_size), proj_size=len(train_dataset.speakers_list), ncoef=ncoef, dropout_prob=dropout_prob, sm_type=softmax)
	elif args.model == 'resnet_mfcc':
		model = model_.ResNet_mfcc(n_z=int(latent_size), nh=int(n_hidden), n_h=int(hidden_size), proj_size=len(train_dataset.speakers_list), ncoef=ncoef, dropout_prob=dropout_prob, sm_type=softmax)
	if args.model == 'resnet_lstm':
		model = model_.ResNet_lstm(n_z=int(latent_size), nh=int(n_hidden), n_h=int(hidden_size), proj_size=len(train_dataset.speakers_list), ncoef=ncoef, dropout_prob=dropout_prob, sm_type=softmax)
	elif args.model == 'resnet_small':
		model = model_.ResNet_small(n_z=int(latent_size), nh=int(n_hidden), n_h=int(hidden_size), proj_size=len(train_dataset.speakers_list), ncoef=ncoef, dropout_prob=dropout_prob, sm_type=softmax)
	elif args.model == 'resnet_large':
		model = model_.ResNet_large(n_z=int(latent_size), nh=int(n_hidden), n_h=int(hidden_size), proj_size=len(train_dataset.speakers_list), ncoef=ncoef, dropout_prob=dropout_prob, sm_type=softmax)

	if cuda:
		model=model.cuda(device)
	else:
		device=None

	optimizer=optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=l2)

	trainer=TrainLoop(model, optimizer, train_loader, valid_loader, patience=int(patience), verbose=-1, device=device, cp_name=get_file_name(cp_path), save_cp=False, checkpoint_path=cp_path, pretrain=False, cuda=cuda)

	return trainer.train(n_epochs=epochs)
Beispiel #2
0
def plot_real(n_tests, data_path):

    real_loader = Loader(hdf5_name=data_path)

    n_cols, n_rows = (n_tests, 15)
    fig, axes = plt.subplots(n_cols, n_rows, figsize=(n_rows, n_cols))

    for i in range(n_tests):

        img_idx = np.random.randint(len(real_loader))
        real_sample = real_loader[img_idx].squeeze()
        real_sample = real_sample.transpose(0, 1)

        print(real_sample.size())

        for ax, img in zip(axes[i, :].flatten(), real_sample):
            img = denorm(img.transpose(0, -1))

            ax.axis('off')
            ax.set_adjustable('box-forced')

            ax.imshow(img, aspect='equal')

        plt.subplots_adjust(wspace=0, hspace=0)

    save_fn = 'real.pdf'
    plt.savefig(save_fn)

    plt.close()
Beispiel #3
0
def plot_real(n_tests, data_path):

    real_loader = Loader(hdf5_name=data_path)

    n_cols, n_rows = (n_tests, 30)
    fig, axes = plt.subplots(n_cols, n_rows, figsize=(n_rows, n_cols))

    intra_mse = []

    for i in range(n_tests):

        img_idx = np.random.randint(len(real_loader))
        real_sample = real_loader[img_idx].squeeze()

        for ax, img in zip(axes[i, :].flatten(), real_sample):
            ax.axis('off')
            ax.set_adjustable('box-forced')

            ax.imshow(img, cmap="gray", aspect='equal')

        plt.subplots_adjust(wspace=0, hspace=0)

        mse = 0.0
        #mse = []
        count = 0
        for j in range(1, real_sample.size(0)):
            if j % 1 == 0:
                a = real_sample[j].numpy().squeeze()
                a[a > 0] = 1.
                a[a < 0] = 0.
                b = real_sample[j - 1].numpy().squeeze()
                b[b > 0] = 1.
                b[b < 0] = 0.
                count += 1
                mse += mean_squared_error(a, b)
                #mse+=torch.nn.functional.mse_loss(real_sample[j,:,:], real_sample[j-1,:,:]).item()

        mse /= count

        intra_mse.append(np.asarray(mse))

    save_fn = 'real.pdf'
    plt.savefig(save_fn)

    plt.close()

    return intra_mse
Beispiel #4
0
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if args.n_classes > 2:
    assert args.lists_path is not None, 'Pass the path for the lists of utterances per attack'
    train_dataset = Loader_mcc(
        hdf5_clean=args.train_hdf_path + 'train_clean.hdf',
        hdf5_attack=args.train_hdf_path + 'train_attack.hdf',
        max_nb_frames=args.n_frames,
        n_cycles=args.n_cycles,
        file_lists_path=args.lists_path)
else:
    train_dataset = Loader(hdf5_clean=args.train_hdf_path + 'train_clean.hdf',
                           hdf5_attack=args.train_hdf_path +
                           'train_attack.hdf',
                           max_nb_frames=args.n_frames,
                           n_cycles=args.n_cycles)

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.workers)

if args.valid_hdf_path is not None:
    if args.n_classes > 2:
        valid_dataset = Loader_mcc(
            hdf5_clean=args.valid_hdf_path + 'valid_clean.hdf',
            hdf5_attack=args.valid_hdf_path + 'valid_attack.hdf',
            max_nb_frames=args.n_frames,
            n_cycles=args.valid_n_cycles,
Beispiel #5
0
def train(lr, l2, momentum, smoothing, warmup, model, emb_size, n_hidden,
          hidden_size, dropout_prob, epochs, batch_size, valid_batch_size,
          n_workers, cuda, data_path, valid_data_path, hdf_path,
          valid_hdf_path, checkpoint_path, softmax, pretrained,
          pretrained_path, max_gnorm, stats, log_dir, eval_every, ablation):

    args_dict = locals()

    cp_name = get_cp_name(checkpoint_path)

    if pretrained_path != 'none':
        print('\nLoading pretrained model from: {}\n'.format(
            args.pretrained_path))
        ckpt = torch.load(pretrained_path,
                          map_location=lambda storage, loc: storage)
        dropout_prob, n_hidden, hidden_size, emb_size = ckpt[
            'dropout_prob'], ckpt['n_hidden'], ckpt['hidden_size'], ckpt[
                'emb_size']
        if 'r_proj_size' in ckpt:
            rproj_size = ckpt['r_proj_size']
        else:
            rproj_size = -1
        print('\nUsing pretrained config for discriminator. Ignoring args.')

    args_dict['dropout_prob'], args_dict['n_hidden'], args_dict[
        'hidden_size'], args_dict[
            'emb_size'] = dropout_prob, n_hidden, hidden_size, emb_size

    if log_dir != 'none':
        writer = SummaryWriter(log_dir=os.path.join(log_dir, cp_name),
                               comment=model,
                               purge_step=0)
        writer.add_hparams(hparam_dict=args_dict,
                           metric_dict={'best_eer': 0.0})
    else:
        writer = None

    if stats == 'cars':
        mean, std = [0.4461, 0.4329, 0.4345], [0.2888, 0.2873, 0.2946]
    elif stats == 'cub':
        mean, std = [0.4782, 0.4925, 0.4418], [0.2330, 0.2296, 0.2647]
    elif stats == 'sop':
        mean, std = [0.5603, 0.5155, 0.4796], [0.2939, 0.2991, 0.3085]
    elif stats == 'imagenet':
        mean, std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]

    if hdf_path != 'none':
        transform_train = transforms.Compose([
            transforms.ToPILImage(),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(10),
            transforms.RandomPerspective(p=0.1),
            transforms.RandomGrayscale(p=0.1),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        trainset = Loader(hdf_path, transform_train)
    else:
        transform_train = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.RandomRotation(10),
            transforms.RandomPerspective(p=0.1),
            transforms.RandomGrayscale(p=0.1),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        trainset = datasets.ImageFolder(data_path, transform=transform_train)

    train_loader = torch.utils.data.DataLoader(
        trainset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=n_workers,
        worker_init_fn=set_np_randomseed,
        pin_memory=True)

    if valid_hdf_path != 'none':
        transform_test = transforms.Compose([
            transforms.ToPILImage(),
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        validset = Loader(args.valid_hdf_path, transform_test)
    else:
        transform_test = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=mean, std=std)
        ])
        validset = datasets.ImageFolder(args.valid_data_path,
                                        transform=transform_test)

    valid_loader = torch.utils.data.DataLoader(validset,
                                               batch_size=valid_batch_size,
                                               shuffle=True,
                                               num_workers=n_workers,
                                               pin_memory=True)

    nclasses = trainset.n_classes if isinstance(trainset, Loader) else len(
        trainset.classes)

    if model == 'vgg':
        model_ = vgg.VGG('VGG19',
                         nh=n_hidden,
                         n_h=hidden_size,
                         dropout_prob=dropout_prob,
                         sm_type=softmax,
                         n_classes=nclasses,
                         emb_size=emb_size,
                         r_proj_size=rproj_size)
    elif model == 'resnet':
        model_ = resnet.ResNet50(nh=n_hidden,
                                 n_h=hidden_size,
                                 dropout_prob=dropout_prob,
                                 sm_type=softmax,
                                 n_classes=nclasses,
                                 emb_size=emb_size,
                                 r_proj_size=rproj_size)
    elif model == 'densenet':
        model_ = densenet.DenseNet121(nh=n_hidden,
                                      n_h=hidden_size,
                                      dropout_prob=dropout_prob,
                                      sm_type=softmax,
                                      n_classes=nclasses,
                                      emb_size=emb_size,
                                      r_proj_size=rproj_size)

    if pretrained_path != 'none':
        if ckpt['sm_type'] == 'am_softmax':
            del (ckpt['model_state']['out_proj.w'])
        elif ckpt['sm_type'] == 'softmax':
            del (ckpt['model_state']['out_proj.w.weight'])
            del (ckpt['model_state']['out_proj.w.bias'])

        print(model_.load_state_dict(ckpt['model_state'], strict=False))
        print('\n')

    if pretrained:
        print('\nLoading pretrained encoder from torchvision\n')
        if model == 'vgg':
            model_pretrained = torchvision.models.vgg19(pretrained=True)
        elif model == 'resnet':
            model_pretrained = torchvision.models.resnet50(pretrained=True)
        elif model == 'densenet':
            model_pretrained = torchvision.models.densenet121(pretrained=True)

        print(
            model_.load_state_dict(model_pretrained.state_dict(),
                                   strict=False))
        print('\n')

    if cuda:
        device = get_freer_gpu()
        model_ = model_.cuda(device)
        torch.backends.cudnn.benchmark = True

    optimizer = TransformerOptimizer(optim.SGD(model_.parameters(),
                                               lr=lr,
                                               momentum=momentum,
                                               weight_decay=l2,
                                               nesterov=True),
                                     lr=lr,
                                     warmup_steps=warmup)

    trainer = TrainLoop(model_,
                        optimizer,
                        train_loader,
                        valid_loader,
                        max_gnorm=max_gnorm,
                        label_smoothing=smoothing,
                        verbose=-1,
                        cp_name=cp_name,
                        save_cp=True,
                        checkpoint_path=checkpoint_path,
                        ablation=ablation,
                        cuda=cuda,
                        logger=writer)

    for i in range(5):

        print(' ')
        print('Hyperparameters:')
        print('Selected model: {}'.format(model))
        print('Embedding size: {}'.format(emb_size))
        print('Hidden layer size: {}'.format(hidden_size))
        print('Number of hidden layers: {}'.format(n_hidden))
        print('Random projection size: {}'.format(rproj_size))
        print('Dropout rate: {}'.format(dropout_prob))
        print('Batch size: {}'.format(batch_size))
        print('LR: {}'.format(lr))
        print('Momentum: {}'.format(momentum))
        print('l2: {}'.format(l2))
        print('Label smoothing: {}'.format(smoothing))
        print('Warmup iterations: {}'.format(warmup))
        print('Softmax Mode is: {}'.format(softmax))
        print('Pretrained: {}'.format(pretrained))
        print('Pretrained path: {}'.format(pretrained_path))
        print('Evaluate every {} iterations.'.format(eval_every))
        print('Ablation Mode: {}'.format(ablation))
        print(' ')

        if i > 0:
            print(' ')
            print('Trial {}'.format(i + 1))
            print(' ')

        try:
            cost = trainer.train(n_epochs=epochs,
                                 save_every=epochs + 10,
                                 eval_every=eval_every)

            print(' ')
            print('Best e2e EER in file ' + cp_name +
                  ' was: {}'.format(cost[0]))
            print('Best cos EER in file ' + cp_name +
                  ' was: {}'.format(cost[1]))
            print(' ')

            if log_dir != 'none':
                writer.add_hparams(hparam_dict=args_dict,
                                   metric_dict={'best_eer': cost[0]})

            return cost[0]
        except:
            print("Error:", sys.exc_info())
            pass

    print('Returning dummy cost due to failures while training.')
    return 0.99
Beispiel #6
0
parser.add_argument('--checkpoint-epoch', type=int, default=None, metavar='N', help='epoch to load for checkpointing. If None, training starts from scratch')
parser.add_argument('--checkpoint-path', type=str, default=None, metavar='Path', help='Path for checkpointing')
parser.add_argument('--generator-path', type=str, default=None, metavar='Path', help='Path for frames generator params')
parser.add_argument('--ndiscriminators', type=int, default=8, help='Number of discriminators. Default=8')
parser.add_argument('--nadir-slack', type=float, default=1.5, metavar='nadir', help='factor for nadir-point update. Only used in hyper mode (default: 1.5)')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--save-every', type=int, default=5, metavar='N', help='how many batches to wait before logging training status. (default: 5)')
parser.add_argument('--n-workers', type=int, default=4)
parser.add_argument('--gen-arch', choices=['linear', 'conv'], default='linear', help='Linear or convolutional generator')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--optimizer', choices=['adam', 'amsgrad', 'rmsprop'], default='adam', help='Select optimizer (Default is adam).')
parser.add_argument('--average-mode', action='store_true', default=False, help='Disables hypervolume maximization and uses average loss instead')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

train_data_set = Loader(hdf5_name=args.data_path)
train_loader = DataLoader(train_data_set, batch_size=args.batch_size, shuffle=False, num_workers=args.n_workers)

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if args.gen_arch == 'conv':
	generator = models_zoo.Generator_conv(args.cuda)
elif args.gen_arch == 'linear':
	generator = models_zoo.Generator_linear(args.cuda)

frames_generator = models_zoo.frames_generator().eval()

gen_state = torch.load(args.generator_path, map_location=lambda storage, loc: storage)
frames_generator.load_state_dict(gen_state['model_state'])
Beispiel #7
0
print(args, '\n')

if args.cuda:
    torch.backends.cudnn.benchmark = True

if args.hdf_path:
    transform_train = transforms.Compose([
        transforms.ToPILImage(),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(30),
        transforms.RandomPerspective(p=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    trainset = Loader(args.hdf_path, transform_train)
else:
    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.RandomRotation(30),
        transforms.RandomPerspective(p=0.2),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean, std=std)
    ])
    trainset = datasets.ImageFolder(args.data_path, transform=transform_train)

train_loader = torch.utils.data.DataLoader(trainset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.n_workers,
Beispiel #8
0
                    type=int,
                    default=1,
                    metavar='N',
                    help='Verbose is activated if > 0')
parser.add_argument('--logdir',
                    type=str,
                    default=None,
                    metavar='Path',
                    help='Path for checkpointing')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

if args.cuda:
    torch.backends.cudnn.benchmark = True

trainset = Loader(hdf5_name=args.train_hdf_file, max_nb_frames=args.n_frames)
train_loader = torch.utils.data.DataLoader(trainset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.n_workers,
                                           worker_init_fn=set_np_randomseed,
                                           pin_memory=True)

validset = Loader(hdf5_name=args.valid_hdf_file, max_nb_frames=args.n_frames)
valid_loader = torch.utils.data.DataLoader(validset,
                                           batch_size=args.valid_batch_size,
                                           shuffle=True,
                                           num_workers=args.n_workers,
                                           pin_memory=True)

args.nclasses = trainset.nclasses
        import cupy
        cupy.cuda.Device(int(str(device).split(':')[-1])).use()
else:
    device = None

if args.logdir:
    from torch.utils.tensorboard import SummaryWriter
    writer = SummaryWriter(log_dir=os.path.join(args.logdir, args.cp_name),
                           comment=args.model,
                           purge_step=0)
    writer.add_hparams(hparam_dict=args_dict, metric_dict={'best_eer': 0.0})
else:
    writer = None

train_dataset = Loader(hdf5_name=args.train_hdf_file,
                       max_nb_frames=args.n_frames,
                       delta=args.delta)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)

valid_dataset = Loader_valid(hdf5_name=args.valid_hdf_file,
                             max_nb_frames=args.n_frames,
                             delta=args.delta)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                           batch_size=args.valid_batch_size,
                                           shuffle=True,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)
def train(lr, l2, momentum, max_gnorm, warmup, input_size, n_hidden,
          hidden_size, dropout_prob, smoothing, n_cycles, epochs, batch_size,
          valid_batch_size, n_workers, cuda, train_hdf_path, valid_hdf_path,
          cp_path, logdir):

    hp_dict = {
        'lr': lr,
        'l2': l2,
        'momentum': momentum,
        'max_gnorm': max_gnorm,
        'warmup': warmup,
        'input_size': input_size,
        'n_hidden': n_hidden,
        'hidden_size': hidden_size,
        'dropout_prob': dropout_prob,
        'smoothing': smoothing,
        'n_cycles': n_cycles,
        'epochs': epochs,
        'batch_size': batch_size,
        'valid_batch_size': valid_batch_size,
        'n_workers': n_workers,
        'cuda': cuda,
        'train_hdf_path': train_hdf_path,
        'valid_hdf_path': valid_hdf_path,
        'cp_path': cp_path
    }

    cp_name = get_file_name(cp_path)

    if args.logdir:
        from torch.utils.tensorboard import SummaryWriter
        writer = SummaryWriter(log_dir=logdir + cp_name, purge_step=True)
        writer.add_hparams(hparam_dict=hp_dict, metric_dict={'best_eer': 0.5})
    else:
        writer = None

    train_dataset = Loader(hdf5_clean=train_hdf_path + 'train_clean.hdf',
                           hdf5_attack=train_hdf_path + 'train_attack.hdf',
                           label_smoothing=smoothing,
                           n_cycles=n_cycles)
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True,
                                               num_workers=n_workers)

    valid_dataset = Loader(hdf5_clean=valid_hdf_path + 'valid_clean.hdf',
                           hdf5_attack=valid_hdf_path + 'valid_attack.hdf',
                           n_cycles=1)
    valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                               batch_size=valid_batch_size,
                                               shuffle=False,
                                               num_workers=n_workers)

    model = model_.MLP(n_in=input_size,
                       nh=n_hidden,
                       n_h=hidden_size,
                       dropout_prob=dropout_prob)

    if cuda:
        device = get_freer_gpu()
        model = model.cuda(device)

    optimizer = TransformerOptimizer(optim.SGD(model.parameters(),
                                               lr=lr,
                                               momentum=momentum,
                                               weight_decay=l2,
                                               nesterov=True),
                                     lr=lr,
                                     warmup_steps=warmup)

    trainer = TrainLoop(model,
                        optimizer,
                        train_loader,
                        valid_loader,
                        max_gnorm=max_gnorm,
                        verbose=-1,
                        cp_name=cp_name,
                        save_cp=True,
                        checkpoint_path=cp_path,
                        cuda=cuda,
                        logger=writer)

    for i in range(5):

        if i > 0:
            print(' ')
            print('Trial {}'.format(i + 1))
            print(' ')

        try:
            cost = trainer.train(n_epochs=epochs, save_every=epochs + 10)

            print(' ')
            print('Best EER in file ' + cp_name + ' was: {}'.format(cost))
            print(' ')
            print('With hyperparameters:')
            print('Hidden layer size size: {}'.format(int(hidden_size)))
            print('Number of hidden layers: {}'.format(int(n_hidden)))
            print('Dropout rate: {}'.format(dropout_prob))
            print('Batch size: {}'.format(batch_size))
            print('LR: {}'.format(lr))
            print('Warmup iterations: {}'.format(warmup))
            print('Momentum: {}'.format(momentum))
            print('l2: {}'.format(l2))
            print('Max. Grad. norm: {}'.format(max_gnorm))
            print('Label smoothing: {}'.format(smoothing))
            print(' ')

            if args.logdir:
                writer.add_hparams(hparam_dict=hp_dict,
                                   metric_dict={'best_eer': cost})

            return cost
        except:
            pass

    print('Returning dummy cost due to failures while training.')
    cost = 0.99
    if args.logdir:
        writer.add_hparams(hparam_dict=hp_dict, metric_dict={'best_eer': cost})
    return cost
Beispiel #11
0
if args.cuda:
	device = get_freer_gpu()
else:
	device = torch.device('cpu')

if args.logdir:
	writer = SummaryWriter(log_dir=args.logdir, purge_step=True)
else:
	writer = None

torch.manual_seed(args.seed)
if args.cuda:
	torch.cuda.manual_seed(args.seed)

train_dataset = Loader(hdf5_clean = args.train_hdf_path+'train_clean.hdf', hdf5_attack = args.train_hdf_path+'train_attack.hdf', label_smoothing=args.smoothing, n_cycles=args.n_cycles)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

valid_dataset = Loader(hdf5_clean = args.valid_hdf_path+'valid_clean.hdf', hdf5_attack = args.valid_hdf_path+'valid_attack.hdf', n_cycles=1)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.valid_batch_size, shuffle=False, num_workers=args.workers)

model = model_.MLP(n_in=args.input_size, nh=args.n_hidden, n_h=args.hidden_size, dropout_prob=args.dropout_prob)

if args.pretrained_path is not None:
	ckpt = torch.load(args.pretrained_path, map_location = lambda storage, loc: storage)
	model = model_.MLP(n_in=ckpt['input_size'], nh=ckpt['n_hidden'], n_h=ckpt['hidden_size'], dropout_prob=ckpt['dropout_prob'])
	try:
		model.load_state_dict(ckpt['model_state'], strict=True)
	except RuntimeError as err:
		print("Runtime Error: {0}".format(err))
		model.load_state_dict(ckpt['model_state'], strict=False)
Beispiel #12
0
parser.add_argument('--n-workers', type=int, default=4, metavar='N', help='Workers for data loading. Default is 4')
parser.add_argument('--model', choices=['vgg', 'resnet', 'densenet'], default='resnet')
parser.add_argument('--save-every', type=int, default=1, metavar='N', help='how many epochs to wait before logging training status. Default is 1')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
parser.add_argument('--no-cp', action='store_true', default=False, help='Disables checkpointing')
parser.add_argument('--verbose', type=int, default=1, metavar='N', help='Verbose is activated if > 0')
parser.add_argument('--softmax', choices=['softmax', 'am_softmax'], default='softmax', help='Softmax type')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

if args.cuda:
	torch.backends.cudnn.benchmark=True

if args.hdf_path:
	transform_train = transforms.Compose([transforms.ToPILImage(), transforms.RandomCrop(84, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
	trainset = Loader(args.hdf_path, transform_train)
else:
	transform_train = transforms.Compose([transforms.RandomCrop(84, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])	
	trainset = datasets.ImageFolder(args.data_path, transform=transform_train)

train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=args.n_workers, worker_init_fn=set_np_randomseed, pin_memory=True)

transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
validset = datasets.ImageFolder(args.valid_data_path, transform=transform_test)
valid_loader = torch.utils.data.DataLoader(validset, batch_size=args.valid_batch_size, shuffle=True, num_workers=args.n_workers, pin_memory=True)

if args.model == 'vgg':
	model = vgg.VGG('VGG16', sm_type=args.softmax)
elif args.model == 'resnet':
	model = resnet.ResNet18(sm_type=args.softmax)
elif args.model == 'densenet':
Beispiel #13
0
def train(lr, l2, max_gnorm, momentum, margin, lambda_, swap, latent_size, n_frames, model, ncoef, epochs, batch_size, valid_batch_size, n_workers, cuda, train_hdf_file, valid_hdf_file, cp_path, softmax, delta, logdir):

	if cuda:
		device=get_freer_gpu()
		if args.model == 'resnet_qrnn':
			import cupy
			cupy.cuda.Device(int(str(device).split(':')[-1])).use()

	cp_name = get_file_name(cp_path)

	if args.logdir:
		from torch.utils.tensorboard import SummaryWriter
		writer = SummaryWriter(log_dir=logdir+cp_name, comment=args.model, purge_step=True)
	else:
		writer = None

	train_dataset = Loader(hdf5_name = train_hdf_file, max_nb_frames = int(n_frames), delta = delta)
	train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=n.workers, worker_init_fn=set_np_randomseed)

	valid_dataset = Loader_valid(hdf5_name = valid_hdf_file, max_nb_frames = int(n_frames), delta = delta)
	valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=valid_batch_size, shuffle=True, num_workers=n_workers, worker_init_fn=set_np_randomseed)

	if model == 'resnet_mfcc':
		model=model_.ResNet_mfcc(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta)
	elif model == 'resnet_34':
		model=model_.ResNet_34(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta)
	elif model == 'resnet_lstm':
		model=model_.ResNet_lstm(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta)
	elif model == 'resnet_qrnn':
		model=model_.ResNet_qrnn(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta)
	elif model == 'resnet_stats':
		model=model_.ResNet_stats(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'resnet_large':
		model = model_.ResNet_large(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'resnet_small':
		model = model_.ResNet_small(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'resnet_2d':
		model = model_.ResNet_2d(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN':
		model = model_.TDNN(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_att':
		model = model_.TDNN_att(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_multihead':
		model = model_.TDNN_multihead(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_lstm':
		model = model_.TDNN_lstm(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_aspp':
		model = model_.TDNN_aspp(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_mod':
		model = model_.TDNN_mod(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'TDNN_multipool':
		model = model_.TDNN_multipool(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)
	elif args.model == 'transformer':
		model = model_.transformer_enc(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta)

	if cuda:
		model=model.to(device)
	else:
		device=None

	optimizer=optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=l2)

	trainer=TrainLoop(model, optimizer, train_loader, valid_loader, max_gnorm=max_gnorm, margin=margin, lambda_=lambda_, verbose=-1, device=device, cp_name=cp_name, save_cp=True, checkpoint_path=cp_path, swap=swap, softmax=True, pretrain=False, mining=True, cuda=cuda, logger=writer)

	return trainer.train(n_epochs=epochs)
Beispiel #14
0
                    default=False,
                    help='Enables PASE updates')
parser.add_argument('--verbose',
                    type=int,
                    default=1,
                    metavar='N',
                    help='Verbose is activated if > 0')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

train_dataset = Loader(hdf5_name=args.train_hdf_file,
                       max_len=args.max_len,
                       vad=args.vad,
                       ncoef=args.ncoef if args.mfcc else None)
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           drop_last=True,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)

if args.valid_hdf_file is not None:
    valid_dataset = Loader_valid(hdf5_name=args.valid_hdf_file,
                                 max_len=args.max_len,
                                 vad=args.vad,
                                 ncoef=args.ncoef if args.mfcc else None)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
Beispiel #15
0
parser.add_argument('--sgd',
                    action='store_true',
                    default=False,
                    help='enables SGD - *MGD only* ')
parser.add_argument('--job-id',
                    type=str,
                    default=None,
                    help='Arbitrary id to be written on checkpoints')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

trainset = Loader(args.data_path)
train_loader = torch.utils.data.DataLoader(trainset,
                                           batch_size=args.batch_size,
                                           num_workers=args.workers)

generator = Generator().train()

disc_list = []
for i in range(args.ndiscriminators):

    if args.optimizer == 'adam':
        disc = Discriminator(optim.Adam, args.optimizer, args.lr,
                             (args.beta1, args.beta2)).train()
    elif args.optimizer == 'amsgrad':
        disc = Discriminator(optim.Adam,
                             args.optimizer,
Beispiel #16
0
if args.cuda:
    torch.cuda.manual_seed(args.seed)

if args.mine_triplets:
    train_dataset = Loader_mining(hdf5_name=args.train_hdf_file,
                                  max_nb_frames=args.n_frames,
                                  n_cycles=args.n_cycles,
                                  augment=True)
elif args.softmax != 'none':
    train_dataset = Loader_softmax(hdf5_name=args.train_hdf_file,
                                   max_nb_frames=args.n_frames,
                                   n_cycles=args.n_cycles,
                                   augment=True)
else:
    train_dataset = Loader(hdf5_name=args.train_hdf_file,
                           max_nb_frames=args.n_frames,
                           n_cycles=args.n_cycles,
                           augment=True)

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)

if args.valid_hdf_file is not None:
    valid_dataset = Loader_softmax(hdf5_name=args.valid_hdf_file,
                                   max_nb_frames=args.n_frames,
                                   n_cycles=args.valid_n_cycles,
                                   augment=False)
    valid_loader = torch.utils.data.DataLoader(
        valid_dataset,
Beispiel #17
0
else:
    device = None

train_dataset = Loader_mining(hdf5_name=args.train_hdf_file,
                              max_nb_frames=args.n_frames,
                              n_cycles=args.n_cycles,
                              delta=args.delta)

train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)

valid_dataset = Loader(hdf5_name=args.valid_hdf_file,
                       max_nb_frames=args.n_frames,
                       n_cycles=args.valid_n_cycles,
                       delta=args.delta)
valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           num_workers=args.workers,
                                           worker_init_fn=set_np_randomseed)

if args.model == 'resnet_mfcc':
    model = model_.ResNet_mfcc(n_z=args.latent_size,
                               proj_size=len(train_dataset.speakers_list) if
                               args.softmax != 'none' or args.pretrain else 0,
                               ncoef=args.ncoef,
                               sm_type=args.softmax,
                               delta=args.delta)
elif args.model == 'resnet_lstm':
Beispiel #18
0
                        choices=['cnn', 'vgg', 'resnet', 'densenet', 'tdnn'],
                        default='resnet')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='Disables GPU use')
    parser.add_argument('--workers',
                        type=int,
                        default=4,
                        metavar='N',
                        help='Data load workers (default: 4)')
    args = parser.parse_args()
    args.cuda = True if not args.no_cuda and torch.cuda.is_available(
    ) else False

    testset = Loader(hdf5_name=args.data_path, max_nb_frames=args.n_frames)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.workers)

    args.nclasses = testset.nclasses

    args_dict = parse_args_for_log(args)
    print('\n')
    for key in args_dict:
        print('{}: {}'.format(key, args_dict[key]))
    print('\n')

    idx_to_class = {}
Beispiel #19
0
	parser.add_argument('--classifier-path', type=str, default=None, metavar='Path', help='Path to pretrained classifier on MNIST')
	parser.add_argument('--data-path', type=str, default='./test.hdf', metavar='Path', help='Path to hdf file containing stacked MNIST. Can be generated with gen_data.py')
	parser.add_argument('--out-file', type=str, default='./test_data_coverage.p', metavar='Path', help='files for dumping coverage data')
	parser.add_argument('--batch-size', type=int, default=512, metavar='Path', help='batch size')
	parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
	parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
	args = parser.parse_args()
	args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

	if args.classifier_path is None:
		raise ValueError('There is no classifier path. Use arg --classifier-path to indicate the path!')
	
	classifier = cnn().eval()
	classifier_state = torch.load(args.classifier_path, map_location=lambda storage, loc: storage)
	classifier.load_state_dict(classifier_state['model_state'])

	if args.cuda:
		classifier = classifier.cuda()

	print('Cuda Mode is: {}'.format(args.cuda))

	testset = Loader(args.data_path)
	test_loader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size, num_workers=args.workers)

	freqs = compute_freqs_real_data(test_loader, classifier, cuda=args.cuda)
	freqs/=freqs.sum()

	pfile = open(args.out_file, "wb")
	pickle.dump(freqs, pfile)
	pfile.close()