from src.utils import makedirs, tensor2cuda, load_model, LabelDict
from argument import parser
from src.visualization import VanillaBackprop
from src.attack import FastGradientSignUntargeted
from src.model.madry_model import WideResNet

import matplotlib.pyplot as plt

max_epsilon = 4.7

perturbation_type = 'l2'

out_num = 5

img_folder = 'img'
makedirs(img_folder)

args = parser()

label_dict = LabelDict(args.dataset)

te_dataset = tv.datasets.CIFAR10(args.data_root,
                                 train=False,
                                 transform=tv.transforms.ToTensor(),
                                 download=True)

te_loader = DataLoader(te_dataset,
                       batch_size=args.batch_size,
                       shuffle=False,
                       num_workers=4)
Пример #2
0
def main(args):
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    makedirs(args.log_root)
    makedirs(args.model_folder)

    setattr(args, 'log_folder', args.log_root)
    setattr(args, 'model_folder', args.model_folder)

    logger = create_logger(args.log_root, args.todo, 'info')

    print_args(args, logger)
    if args.model == 'enet':
        model = EfficientNet.from_pretrained('efficientnet-b5', num_classes=2)
    elif args.model == 'xception':
        model, *_ = model_selection(modelname='xception',
                                    num_out_classes=2,
                                    init_checkpoint=args.init_load)
    else:
        raise NotImplementedError

    if args.load_checkpoint is not None:
        if device.type == 'cpu':
            checkpoint = torch.load(args.load_checkpoint,
                                    map_location=torch.device('cpu'))
        else:
            checkpoint = torch.load(args.load_checkpoint)
        model.load_state_dict(checkpoint)

    if torch.cuda.device_count() > 1:
        print('GPUs: ', torch.cuda.device_count())
        model = nn.DataParallel(model)

    model = model.to(device)

    trainer = Trainer(args, logger)

    if args.todo == 'train':
        if args.array:
            transform = transforms.Normalize(mean=[0.5], std=[0.5])

            def npy_loader(path):
                sample = torch.from_numpy(np.load(path))
                return sample

            #BUILD TRAIN SET
            print("Initializing Dataset...")
            train_set = DatasetFolderWithPaths(root=args.data_root,
                                               loader=npy_loader,
                                               extensions='.npy',
                                               transform=transform)
            print("Dataset is successful")

            #BUILD VAL SET
            val_set = DatasetFolderWithPaths(root=args.val_root,
                                             loader=npy_loader,
                                             extensions='.npy',
                                             transform=transform)
        else:
            transform = transforms.Compose([
                transforms.Resize((299, 299)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
            ])
            train_set = ImageFolderWithPaths(args.data_root,
                                             transform=transform)
            val_set = ImageFolderWithPaths(args.val_root, transform=transform)
        logger.info('Train Total: %d' % len(train_set))
        logger.info('Val Total: %d' % len(val_set))
        #logger.info( "Classes: {}".format(' '.join(map(str, train_set.classes))))

        tr_loader = DataLoader(train_set,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=args.nworkers,
                               pin_memory=torch.cuda.is_available())
        te_loader = DataLoader(val_set,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=args.nworkers,
                               pin_memory=torch.cuda.is_available())

        if args.array:
            trainer.train(model,
                          tr_loader,
                          te_loader,
                          device,
                          adv_train=args.adv)
        else:
            trainer.train(model,
                          tr_loader,
                          te_loader,
                          device,
                          adv_train=args.adv)

    elif args.todo == 'test':
        if args.array:
            transform = transforms.Normalize(mean=[0.5], std=[0.5])

            def npy_loader(path):
                sample = torch.from_numpy(np.load(path))
                return sample

            te_dataset = DatasetFolderWithPaths(root=args.data_root,
                                                loader=npy_loader,
                                                extensions='.npy',
                                                transform=transform)
        else:
            transform = transforms.Compose([
                transforms.Resize((299, 299)),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
            ])
            te_dataset = ImageFolderWithPaths(args.data_root,
                                              transform=transform)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=args.nworkers,
                               pin_memory=torch.cuda.is_available())

        if args.array:
            std_acc, loss = trainer.test(model,
                                         te_loader,
                                         device,
                                         adv_test=args.adv)
        else:
            std_acc, loss = trainer.test(model,
                                         te_loader,
                                         device,
                                         adv_test=args.adv)
        print("std acc: {:4f}".format(std_acc * 100))
    else:
        raise NotImplementedError
Пример #3
0
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    model = WideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)

    attack = FastGradientSignUntargeted(model,
                                        args.epsilon,
                                        args.alpha,
                                        min_val=0,
                                        max_val=1,
                                        max_iters=args.k,
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':
        transform_train = tv.transforms.Compose([
            tv.transforms.RandomCrop(32,
                                     padding=4,
                                     fill=0,
                                     padding_mode='constant'),
            tv.transforms.RandomHorizontalFlip(),
            tv.transforms.ToTensor(),
        ])
        tr_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=True,
                                         transform=transform_train,
                                         download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=4)

        # evaluation during training
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=4)

        checkpoint = torch.load(args.load_checkpoint)
        model.load_state_dict(checkpoint)

        std_acc, adv_acc = trainer.test(model,
                                        te_loader,
                                        adv_test=True,
                                        use_pseudo_label=False)

        print(f"std acc: {std_acc * 100:.3f}%, adv_acc: {adv_acc * 100:.3f}%")

    else:
        raise NotImplementedError
Пример #4
0
                    default='single',
                    choices=['single', 'double'])
parser.add_argument('--approach',
                    type=str,
                    default='ocflow',
                    choices=['ocflow'])

args = parser.parse_args()

if args.prec == 'double':
    argPrec = torch.float64
else:
    argPrec = torch.float32

# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'),
                          filepath=os.path.abspath(__file__))
logger.info(args)

device = torch.device('cpu')  # only supports cpu

figNum = 1
lw = 2  # linewidth
strTitle = 'eval_' + os.path.basename(args.resume)[:-12]

fontsize = 18
title_fontsize = 22

if __name__ == '__main__':
def main(args):

    save_folder = '%s_%s' % (args.dataset, args.affix)

    log_folder = os.path.join(args.log_root, save_folder)
    model_folder = os.path.join(args.model_root, save_folder)

    makedirs(log_folder)
    makedirs(model_folder)

    setattr(args, 'log_folder', log_folder)
    setattr(args, 'model_folder', model_folder)

    logger = create_logger(log_folder, args.todo, 'info')

    print_args(args, logger)

    # model = WideResNet(depth=34, num_classes=10, widen_factor=10, dropRate=0.0)
    model = models.resnet50(pretrained=True)
    num_classes = 10
    model.fc = nn.Linear(model.fc.in_features, num_classes)

    attack = FastGradientSignUntargeted(model,
                                        args.epsilon,
                                        args.alpha,
                                        min_val=0,
                                        max_val=1,
                                        max_iters=args.k,
                                        _type=args.perturbation_type)

    if torch.cuda.is_available():
        model.cuda()

    trainer = Trainer(args, logger, attack)

    if args.todo == 'train':
        transform_train = tv.transforms.Compose([
            tv.transforms.ToTensor(),
            tv.transforms.Lambda(lambda x: F.pad(
                x.unsqueeze(0),
                (4, 4, 4, 4), mode='constant', value=0).squeeze()),
            tv.transforms.ToPILImage(),
            tv.transforms.RandomCrop(32),
            tv.transforms.RandomHorizontalFlip(),
            tv.transforms.ToTensor(),
        ])
        tr_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=True,
                                         transform=transform_train,
                                         download=True)

        tr_loader = DataLoader(tr_dataset,
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=48)

        # evaluation during training
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=48)

        trainer.train(model, tr_loader, te_loader, args.adv_train)
    elif args.todo == 'test':
        te_dataset = tv.datasets.CIFAR10(args.data_root,
                                         train=False,
                                         transform=tv.transforms.ToTensor(),
                                         download=True)

        te_loader = DataLoader(te_dataset,
                               batch_size=args.batch_size,
                               shuffle=False,
                               num_workers=48)

        checkpoint = torch.load(args.load_checkpoint)
        model.load_state_dict(checkpoint)

        std_acc, adv_acc = trainer.test(model,
                                        te_loader,
                                        adv_test=True,
                                        use_pseudo_label=False)

        print("std acc: %.4f, adv_acc: %.4f" % (std_acc * 100, adv_acc * 100))

    else:
        raise NotImplementedError
Пример #6
0
def train(train_file, model_path, file_format, train_mode, epochs, batch_size,
          d_embed, n_negative, log_every, lr, val_every, gpu,
          validation_split):
    #print('Configuration:')

    torch.cuda.device(gpu)

    lhs_field, rhs_field = create_fields(train_mode)
    train_dataset, batch_extractor = get_dataset_and_extractor(
        train_file, file_format, lhs_field, rhs_field)

    train, validation = train_validation_split(
        train_dataset, validation_size=validation_split)

    print('Num entity pairs train:', len(train))
    print('Num entity pairs validation:', len(validation))

    lhs_field.build_vocab(train)
    rhs_field.build_vocab(train)

    n_lhs = len(lhs_field.vocab)
    n_rhs = len(rhs_field.vocab)

    print('Num LHS features:', n_lhs)
    print('Num RHS features:', n_rhs)

    train_iter, val_iter = data.BucketIterator.splits((train, validation),
                                                      batch_size=batch_size,
                                                      device=gpu)

    # TODO: implement loading from snapshot
    model = StarSpace(d_embed=d_embed,
                      n_input=n_lhs,
                      n_output=n_rhs,
                      similarity=InnerProductSimilarity(),
                      max_norm=20,
                      aggregate=torch.sum)

    model.cuda()

    neg_sampling = NegativeSampling(n_output=n_rhs, n_negative=n_negative)

    criterion = MarginRankingLoss(margin=1., aggregate=torch.mean)
    opt = torch.optim.Adam(model.parameters(), lr=lr)

    logger = TableLogger(headers=[
        'time', 'epoch', 'iterations', 'loss', 'accuracy', 'val_accuracy'
    ])

    makedirs(model_path)

    iterations = 0
    start = time.time()
    train_iter.repeat = False
    best_val_acc = -1

    for epoch in range(epochs):
        n_correct, n_total = 0, 0
        for batch in train_iter:

            model.train()
            opt.zero_grad()

            iterations += 1

            # TODO: add input/output extractor function for different tasks
            # TODO: add prediction candidate generator function for different tasks
            # TODO: add correctness/accuracy function for different tasks
            # TODO: clean up accuracy computation

            # extract entity pairs from batch
            lhs, rhs = batch_extractor(batch)

            # get similarity for positive entity pairs
            lhs_repr, pos_rhs_repr = model(lhs, rhs)  # B x dim, B x dim
            positive_similarity = model.similarity(
                lhs_repr, pos_rhs_repr).squeeze(1)  # B x 1

            # get similarity for negative entity pairs
            n_samples = batch.batch_size * n_negative
            neg_rhs = neg_sampling.sample(n_samples)
            if lhs.is_cuda:
                neg_rhs = neg_rhs.cuda()
            _, neg_rhs_repr = model(output=neg_rhs)  # (B * n_negative) x dim
            neg_rhs_repr = neg_rhs_repr.view(batch.batch_size, n_negative,
                                             -1)  # B x n_negative x dim
            negative_similarity = model.similarity(
                lhs_repr, neg_rhs_repr).squeeze(1)  # B x n_negative

            # calculate accuracy of predictions in the current batch
            candidate_rhs = torch.autograd.Variable(
                torch.arange(0, n_rhs).long().expand(batch.batch_size,
                                                     -1))  # B x n_output
            if lhs.is_cuda:
                candidate_rhs = candidate_rhs.cuda()
            _, candidate_rhs_repr = model(output=candidate_rhs.view(
                batch.batch_size * n_rhs))  # B x dim, (B * n_output) x dim
            candidate_rhs_repr = candidate_rhs_repr.view(
                batch.batch_size, n_rhs, -1)  # B x n_output x dim
            similarity = model.similarity(
                lhs_repr, candidate_rhs_repr).squeeze(1)  # B x n_output
            n_correct += (torch.max(similarity, dim=-1)[1].view(
                rhs.size()).data == rhs.data).sum()
            n_total += batch.batch_size
            train_acc = 100. * n_correct / n_total

            # calculate loss
            loss = criterion(positive_similarity, negative_similarity)

            loss.backward()
            opt.step()

            # evaluate performance on validation set
            if iterations % val_every == 0:
                model.eval()

                # calculate accuracy on validation set
                n_val_correct = 0
                for val_batch_idx, val_batch in enumerate(val_iter):
                    val_lhs, val_rhs = batch_extractor(val_batch)

                    val_candidate_rhs = torch.autograd.Variable(
                        torch.arange(0, n_rhs).long().expand(
                            val_batch.batch_size, -1))  # B x n_output
                    if val_lhs.is_cuda:
                        val_candidate_rhs = val_candidate_rhs.cuda()
                    val_lhs_repr, val_candidate_rhs_repr = model(
                        val_lhs,
                        val_candidate_rhs.view(
                            val_batch.batch_size *
                            n_rhs))  # B x dim, (B * n_output) x dim
                    val_candidate_rhs_repr = val_candidate_rhs_repr.view(
                        val_batch.batch_size, n_rhs, -1)  # B x n_output x dim
                    similarity = model.similarity(
                        val_lhs_repr,
                        val_candidate_rhs_repr).squeeze(1)  # B x n_output
                    n_val_correct += (torch.max(similarity, dim=-1)[1].view(
                        val_rhs.size()).data == val_rhs.data).sum()
                val_acc = 100. * n_val_correct / len(validation)

                # log progress, including validation metrics
                logger.log(time=time.time() - start,
                           epoch=epoch,
                           iterations=iterations,
                           loss=loss.data[0],
                           accuracy=(100. * n_correct / n_total),
                           val_accuracy=val_acc)

                # found a better model
                if val_acc > best_val_acc:
                    best_val_acc = val_acc

                    # save model
                    torch.save(model, os.path.join(model_path, 'model.pt'))

                    # save vocabulary for both entity fields
                    save_vocab(lhs_field,
                               os.path.join(model_path, 'lhs_vocab.pkl'))
                    save_vocab(rhs_field,
                               os.path.join(model_path, 'rhs_vocab.pkl'))

            elif iterations % log_every == 0:
                # log training progress
                logger.log(time=time.time() - start,
                           epoch=epoch,
                           iterations=iterations,
                           loss=loss.data[0],
                           accuracy=(100. * n_correct / n_total))
Пример #7
0
def main():
    makedirs(config.temp_dir)
    makedirs(config.result_dir)
    makedirs(config.train_log_dir)
    makedirs(config.valid_log_dir)

    print('preparing data...')
    config.word_2_id, config.id_2_word = read_json_dict(config.vocab_dict)
    config.vocab_size = min(config.vocab_size, len(config.word_2_id))
    config.oov_vocab_size = min(config.oov_vocab_size,
                                len(config.word_2_id) - config.vocab_size)

    embedding_matrix = None
    if args.do_train:
        if os.path.exists(config.glove_file):
            print('loading embedding matrix from file: {}'.format(
                config.glove_file))
            embedding_matrix, config.word_em_size = load_glove_embedding(
                config.glove_file, list(config.word_2_id.keys()))
            print('shape of embedding matrix: {}'.format(
                embedding_matrix.shape))
    else:
        if os.path.exists(config.glove_file):
            with open(config.glove_file, 'r', encoding='utf-8') as fin:
                line = fin.readline()
                config.word_em_size = len(line.strip().split()) - 1

    data_reader = DataReader(config)
    evaluator = Evaluator('tgt')

    print('building model...')
    model = get_model(config, embedding_matrix)
    saver = tf.train.Saver(max_to_keep=10)

    if args.do_train:
        print('loading data...')
        train_data = data_reader.read_train_data()
        valid_data = data_reader.read_valid_data()

        print_title('Trainable Variables')
        for v in tf.trainable_variables():
            print(v)

        print_title('Gradients')
        for g in model.gradients:
            print(g)

        with tf.Session(config=sess_config) as sess:
            model_file = args.model_file
            if model_file is None:
                model_file = tf.train.latest_checkpoint(config.result_dir)
            if model_file is not None:
                print('loading model from {}...'.format(model_file))
                saver.restore(sess, model_file)
            else:
                print('initializing from scratch...')
                tf.global_variables_initializer().run()

            train_writer = tf.summary.FileWriter(config.train_log_dir,
                                                 sess.graph)
            valid_writer = tf.summary.FileWriter(config.valid_log_dir,
                                                 sess.graph)

            valid_log_history = run_train(sess,
                                          model,
                                          train_data,
                                          valid_data,
                                          saver,
                                          evaluator,
                                          train_writer,
                                          valid_writer,
                                          verbose=True)
            save_json(
                valid_log_history,
                os.path.join(config.result_dir, 'valid_log_history.json'))

    if args.do_eval:
        print('loading data...')
        valid_data = data_reader.read_valid_data()

        with tf.Session(config=sess_config) as sess:
            model_file = args.model_file
            if model_file is None:
                model_file = tf.train.latest_checkpoint(config.result_dir)
            if model_file is not None:
                print('loading model from {}...'.format(model_file))
                saver.restore(sess, model_file)

                predicted_ids, alignment_history, valid_loss, valid_accu = run_evaluate(
                    sess, model, valid_data, verbose=True)
                print(
                    'average valid loss: {:>.4f}, average valid accuracy: {:>.4f}'
                    .format(valid_loss, valid_accu))

                print_title('Saving Result')
                if not config.beam_search:
                    save_result_v1(predicted_ids, alignment_history,
                                   config.id_2_word, config.valid_data,
                                   config.valid_result)
                else:
                    save_result_v2(predicted_ids, config.id_2_word,
                                   config.valid_result)
                evaluator.evaluate(config.valid_data, config.valid_result,
                                   config.to_lower)
            else:
                print('model not found!')

    if args.do_test:
        print('loading data...')
        test_data = data_reader.read_test_data()

        with tf.Session(config=sess_config) as sess:
            model_file = args.model_file
            if model_file is None:
                model_file = tf.train.latest_checkpoint(config.result_dir)
            if model_file is not None:
                print('loading model from {}...'.format(model_file))
                saver.restore(sess, model_file)

                predicted_ids, alignment_history = run_test(sess,
                                                            model,
                                                            test_data,
                                                            verbose=True)

                print_title('Saving Result')
                if not config.beam_search:
                    save_result_v1(predicted_ids, alignment_history,
                                   config.id_2_word, config.test_data,
                                   config.test_result)
                else:
                    save_result_v2(predicted_ids, config.id_2_word,
                                   config.test_result)
                evaluator.evaluate(config.test_data, config.test_result,
                                   config.to_lower)
            else:
                print('model not found!')