Esempio n. 1
0
def main(config):
    # Initialize generator and discriminator
    generator = MODELS[config.generator](2, 1)
    discriminator = MODELS[config.discriminator](nc_in=1)
    vgg = Vgg16(requires_grad=False)
    if config.use_cuda:
        device = config.gpu if config.use_cuda else None
        torch.cuda.set_device(device)
        vgg.cuda()
    criterion = {
        'criterion_GAN': torch.nn.MSELoss(),
        'criterion_pixelwise': torch.nn.L1Loss(),
        'L1LossMaskedMean': L1LossMaskedMean(),
        'VGGLoss': VGGLoss(vgg),
        'StyleLoss': StyleLoss(vgg)
    }

    helper = config.helper
    print("helper:\t" + helper)
    syn = HELPER[helper](generator, discriminator,
                         criterion, config)
    weight_files = sorted(glob(join(syn.config.load_model_path, 'checkpoint_epoch_*.pth')), reverse=True)

    print("loaded:" + weight_files[0])
    syn.load_generator_history_weight(weight_files[0])

    syn.move_to_cuda()
    test(syn)
Esempio n. 2
0
    def __init__(self, hparams):
        super().__init__()

        print("Initializing our HandGAN model...")

        # Workaround from https://github.com/PyTorchLightning/pytorch-lightning/issues/3998
        # Happens when loading model from checkpoints. save_hyperparameters() not working
        if isinstance(hparams, dict):
            hparams = Namespace(**hparams)

        #self.save_hyperparameters()
        self.hparams = hparams

        # Used to initialize the networks
        init = mutils.Initializer(init_type=hparams.init_type,
                                  init_gain=hparams.init_gain)

        # Network architecture
        # Two generators, one for each domain:
        self.g_ab = init(generator(
            hparams))  #  - g_ab: translation from domain A to domain B
        self.g_ba = init(generator(
            hparams))  #  - g_ba: translation from domain B to domain A

        # Discriminators:
        self.d_a = init(
            discriminator(hparams))  #  - d_a: domain A discriminator
        self.d_b = init(
            discriminator(hparams))  #  - d_b: domain B discriminator

        # For the perceptual discriminator we will need a feature extractor
        if hparams.netD == 'perceptual':
            self.vgg_net = Vgg16().eval()

        # For validation we will need Inception network to compute FID metric
        if hparams.valid_interval > 0.0:
            block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[2048]
            self.inception_net = InceptionV3([block_idx]).eval()

        if hparams.ganerated:
            model = SilNet.load_from_checkpoint(
                "./weights/silnet_pretrained.ckpt")
            mutils.set_requires_grad(model, requires_grad=False)
            self.silnet = model.unet.eval()

        # ImagePool from where we randomly get generated images in both domains
        self.fake_a_pool = mutils.ImagePool(hparams.pool_size)
        self.fake_b_pool = mutils.ImagePool(hparams.pool_size)

        # Criterions
        self.crit_cycle = torch.nn.L1Loss()
        self.crit_discr = DiscriminatorLoss('lsgan')

        if hparams.lambda_idt > 0.0:
            self.crit_idt = torch.nn.L1Loss()

        if hparams.ganerated:
            self.crit_geom = torch.nn.BCEWithLogitsLoss()
Esempio n. 3
0
def main(config):
    # Initialize generator and discriminator
    helper = config.helper
    print("helper:\t" + helper)
    if 'Kumar' in helper or 'Dakai' in helper:
        generator = MODELS[config.generator](n_channels=1, n_classes=1)
        discriminator = MODELS[config.discriminator](in_channels=2)
    else:
        # Initialize generator and discriminator
        generator = MODELS[config.generator](nc_in=2, nc_out=1)
        discriminator = MODELS[config.discriminator](nc_in=2)

    # criterion_GAN = torch.nn.L1Loss()
    if 'Lcon' in helper:
        vgg = Vgg16(requires_grad=False)
        if config.use_cuda:
            device = config.gpu if config.use_cuda else None
            torch.cuda.set_device(device)
            vgg.cuda()
        criterion = {
            'criterion_GAN': torch.nn.MSELoss(),
            'criterion_pixelwise': torch.nn.L1Loss(),
            'L1LossMaskedMean': L1LossMaskedMean(),
            'VGGLoss': VGGLoss(vgg),
            'StyleLoss': StyleLoss(vgg)
        }
    else:
        criterion = {
            'criterion_GAN': torch.nn.MSELoss(),
            'criterion_pixelwise': torch.nn.L1Loss(),
            'L1LossMaskedMean': L1LossMaskedMean(),
        }

    syn = HELPER[helper](generator, discriminator, criterion, config)
    syn.move_to_cuda()
    weight_files = sorted(glob(
        join(syn.config.load_model_path, 'checkpoint_epoch_*.pth')),
                          reverse=True)
    if len(weight_files) > 0:
        syn.load_generator_history_weight(weight_files[0])
    else:
        exit(0)
    test_dataset = SYNDataLoader(root=syn.config.data_root,
                                 split_radio=syn.config.split_radio,
                                 split='test',
                                 data_type="SYN",
                                 config=syn.config)
    test_old(syn, test_dataset, 'final_test_epoch')
Esempio n. 4
0
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = [
            'G_GAN', 'G_L1', 'G_content_low', 'G_content_deep', 'G_style',
            'D_real', 'D_fake'
        ]
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        self.visual_names = ['real_A', 'fake_B', 'real_B']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.isTrain:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        # define networks (both generator and discriminator)
        self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf,
                                      opt.netG, opt.norm, not opt.no_dropout,
                                      opt.init_type, opt.init_gain,
                                      self.gpu_ids)

        if self.isTrain:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.netD = networks.define_D(opt.input_nc + opt.output_nc,
                                          opt.ndf, opt.netD, opt.n_layers_D,
                                          opt.norm, opt.init_type,
                                          opt.init_gain, self.gpu_ids)

        if self.isTrain:
            # define loss functions
            self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()
            self.mse_loss = torch.nn.MSELoss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.netG.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.netD.parameters(),
                                                lr=opt.lr,
                                                betas=(opt.beta1, 0.999))
            self.optimizers.append(self.optimizer_G)
            self.optimizers.append(self.optimizer_D)

        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.vgg16 = Vgg16(requires_grad=False).to(device)
Esempio n. 5
0
def main(config):
    # Initialize generator and discriminator
    helper = config.helper
    if 'Kumar' in helper or 'Dakai' in helper:
        generator = MODELS[config.generator](n_channels=1, n_classes=1)
        discriminator = MODELS[config.discriminator](in_channels=2)
    else:
        # Initialize generator and discriminator
        generator = MODELS[config.generator](nc_in=2, nc_out=1)
        discriminator = MODELS[config.discriminator](nc_in=2)

    vgg = Vgg16(requires_grad=False)
    if config.use_cuda:
        device = config.gpu if config.use_cuda else None
        torch.cuda.set_device(device)
        vgg.cuda()
    criterion = {
        'criterion_GAN': torch.nn.L1Loss(),
        'criterion_pixelwise': torch.nn.L1Loss(),
        'L1LossMaskedMean': L1LossMaskedMean(),
        'VGGLoss': VGGLoss(vgg),
        'StyleLoss': StyleLoss(vgg),
        "L2LossMaskedMean": L2LossMaskedMean()
    }

    print("helper:\t" + helper)
    syn = HELPER[helper](generator, discriminator, criterion, config)
    # weight_files = sorted(glob(join(
    #     '../log/' + syn.config.data_name + '/' + syn.config.generator + '_' + syn.config.helper + '/' + syn.config.discriminator + '_' + str(
    #         syn.config.gpu) + '/checkpoint',
    #     'checkpoint_epoch_*.pth')), reverse=True)
    weight_files = sorted(glob(
        join(syn.config.load_model_path, 'checkpoint_epoch_*.pth')),
                          reverse=True)

    print("loaded:" + weight_files[0])
    syn.load_generator_history_weight(weight_files[0])
    syn.move_to_cuda()
    vali_dataset = SYNDataLoader(root=syn.config.data_root,
                                 split_radio=config.split_radio,
                                 split='vali',
                                 config=syn.config,
                                 data_type="SYN")
    test(syn, vali_dataset)
    exit(0)
Esempio n. 6
0
import torchvision
import pdb


def _save_img(imgs_np, file_path):
    img_np = np.transpose((imgs_np[0] * 255).astype(np.uint8), (1, 2, 0))
    img_pil = Image.fromarray(img_np)
    img_pil.save(file_path)
    return

# Resnet152 [4, 5, 6, 7]
# Vgg16 [2, 7, 14, 21, 28]
image_np = np.expand_dims(load_image(data_format='channels_first', fpath='./images/example.png', abs_path=True), axis=0)
image = numpy_to_variable(image_np)
_save_img(image_np, './temp_ori.png')

model = Vgg16()
internal = [i for i in range(29)]
attack = DispersionAttack_gpu(model, epsilon=16./255, step_size=1./255, steps=200)
adv = attack(image, attack_layer_idx_list=[14], internal=internal)

adv_np = variable_to_numpy(adv)
_save_img(adv_np, './temp_adv.png')

diff_np = np.abs(image_np - adv_np)
_save_img(diff_np, './temp_diff.png')

diff_amp_np = diff_np / diff_np.max()
_save_img(diff_amp_np, './temp_diff_amp_{0:.2f}.png'.format(1./diff_np.max()))

Esempio n. 7
0
def run(train_loader, val_loader, epochs, lr, momentum, weight_decay, lr_step,
        k1, k2, es_patience, log_dir):
    model = Vgg16()

    device = 'cpu'
    if torch.cuda.is_available():
        device = 'cuda'
    model.to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=weight_decay)

    lr_scheduler = ExponentialLR(optimizer, gamma=0.975)

    # criterion = VAELoss(k1=k1, k2=k2).to(device)

    def update_fn(engine, batch):
        x, y = _prepare_batch(batch, device=device, non_blocking=True)

        model.train()

        optimizer.zero_grad()

        output = model(x)

        # Compute loss
        loss = F.nll_loss(output, y)

        loss.backward()

        optimizer.step()

        return {
            "batchloss": loss.item(),
        }

    trainer = Engine(update_fn)

    try:
        GpuInfo().attach(trainer)
    except RuntimeError:
        print(
            "INFO: By default, in this example it is possible to log GPU information (used memory, utilization). "
            "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please "
            "install it : `pip install pynvml`")

    trainer.add_event_handler(Events.ITERATION_COMPLETED(every=lr_step),
                              lambda engine: lr_scheduler.step())

    metric_names = [
        'batchloss',
    ]

    def output_transform(x, name):
        return x[name]

    for n in metric_names:
        # We compute running average values on the output (batch loss) across all devices
        RunningAverage(output_transform=partial(output_transform, name=n),
                       epoch_bound=False,
                       device=device).attach(trainer, n)

    exp_name = datetime.now().strftime("%Y%m%d-%H%M%S")
    log_path = log_dir + "/vgg_vae/{}".format(exp_name)

    tb_logger = TensorboardLogger(log_dir=log_path)

    tb_logger.attach(trainer,
                     log_handler=OutputHandler(tag="training",
                                               metric_names=metric_names),
                     event_name=Events.ITERATION_COMPLETED)

    tb_logger.attach(trainer,
                     log_handler=OptimizerParamsHandler(optimizer, "lr"),
                     event_name=Events.ITERATION_STARTED)

    ProgressBar(persist=True,
                bar_format="").attach(trainer,
                                      event_name=Events.EPOCH_STARTED,
                                      closing_event_name=Events.COMPLETED)
    ProgressBar(persist=False, bar_format="").attach(trainer,
                                                     metric_names=metric_names)

    # val process definition
    def loss_output_transform(output):
        return output

    def acc_output_transform(output):
        return output

    customed_loss = Loss(loss_fn=F.nll_loss,
                         output_transform=loss_output_transform,
                         device=device)
    customed_accuracy = Accuracy(output_transform=acc_output_transform,
                                 device=device)

    metrics = {'Loss': customed_loss, 'Accuracy': customed_accuracy}

    def val_update_fn(engine, batch):
        model.eval()
        with torch.no_grad():
            x, y = _prepare_batch(batch, device=device, non_blocking=True)
            output = model(x)
            return output, y

    val_evaluator = Engine(val_update_fn)

    for name, metric in metrics.items():
        metric.attach(val_evaluator, name)

    def run_evaluation(engine):
        val_evaluator.run(val_loader)

    trainer.add_event_handler(Events.EPOCH_COMPLETED, run_evaluation)
    trainer.add_event_handler(Events.COMPLETED, run_evaluation)

    ProgressBar(persist=False, desc="Train evaluation").attach(val_evaluator)

    # Log val metrics:
    tb_logger.attach(val_evaluator,
                     log_handler=OutputHandler(tag="val",
                                               metric_names=list(
                                                   metrics.keys()),
                                               another_engine=trainer),
                     event_name=Events.EPOCH_COMPLETED)

    # trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())

    # Store the best model
    def default_score_fn(engine):
        score = engine.state.metrics['Accuracy']
        return score

    best_model_handler = ModelCheckpoint(dirname=log_path,
                                         filename_prefix="best",
                                         n_saved=3,
                                         score_name="val_acc",
                                         score_function=default_score_fn)
    val_evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {
        'model': model,
    })

    # Add early stopping
    es_patience = es_patience
    es_handler = EarlyStopping(patience=es_patience,
                               score_function=default_score_fn,
                               trainer=trainer)
    val_evaluator.add_event_handler(Events.COMPLETED, es_handler)

    setup_logger(es_handler._logger)
    setup_logger(logging.getLogger("ignite.engine.engine.Engine"))

    def empty_cuda_cache(engine):
        torch.cuda.empty_cache()
        import gc
        gc.collect()

    trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
    val_evaluator.add_event_handler(Events.COMPLETED, empty_cuda_cache)

    trainer.run(train_loader, max_epochs=epochs)
Esempio n. 8
0
def main():

    # load option
    parser = TrainOptions()
    opts = parser.parse()

    # create output folder
    if not os.path.exists(os.path.join(opts.output_dir, 'model', opts.name)):
        os.mkdir(os.path.join(opts.output_dir, 'model', opts.name))

    # data loader
    print('\n--- load {} dataset from {} ---'.format(opts.reg_phase,
                                                     opts.dataroot))
    dataset = SingleStageDataset(opts, opts.reg_phase)
    loader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opts.batch_size,
                                         shuffle=True,
                                         num_workers=opts.nThreads,
                                         drop_last=True)
    loader_iterator = iter(cycle(loader))
    dataset_other = SingleStageDataset(opts, opts.reg_phase)
    loader_other = torch.utils.data.DataLoader(dataset_other,
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               num_workers=opts.nThreads,
                                               drop_last=True)
    loader_other_iterator = iter(cycle(loader_other))

    # model to be regularized
    opts.load = os.path.join(opts.output_dir, 'model', opts.load)
    print('\n--- load {}-th stage ArtEditing model from {} ---'.format(
        opts.reg_stage, opts.load))
    model = Model(opts)
    _, _ = model.load(opts.load)
    model = model.bicycle[opts.reg_stage]
    output_dim = opts.input_dim[opts.reg_stage + 1]
    model.cuda()
    model.eval()

    # perceptual model
    print('\n--- create perceptual model')
    vgg = Vgg16(requires_grad=False)
    vgg.cuda()
    vgg.eval()

    # regularizer
    print('\n--- create the regularizer')
    adain_optimize = AdaINOptimize(opts, model, vgg, output_dim)
    if opts.reg_load != '':
        opts.reg_load = os.path.join(opts.output_dir, 'model', opts.reg_load)
        print('  load the regularizer from {}'.format(opts.reg_load))
        ep0 = adain_optimize.load(opts.reg_load)
        ep0 += 1
    else:
        ep0 = 0
    adain_optimize.cuda()
    print('start the training at epoch {}'.format(ep0 + 1))

    # tensorboard
    tf_board = SummaryWriter(
        logdir=os.path.join(opts.output_dir, 'tfboard', opts.name))

    # start the training
    for it in range(ep0, opts.n_ep):
        inp, out = next(loader_iterator)

        # determine input output
        inp = inp.cuda()
        out = out.cuda()

        # refine loop for this inp/out pair
        _ = adain_optimize(inp, out, loader_other_iterator)

        # display
        adain_optimize.write_display(tf_board, it)

        if (it + 1) % (opts.n_ep // 100) == 0:
            print('Iteration {}/{}'.format(it + 1, opts.n_ep))

        # write model file
        if (it + 1) % (opts.n_ep // 10) == 0:
            adain_optimize.save(
                os.path.join(opts.output_dir, 'model', opts.name,
                             '{}.pth'.format(it + 1)), it)
    tf_board.close()
    return
Esempio n. 9
0
def main(config):
    # device = torch.device("cuda")
    helper = config.helper
    print("helper:\t" + helper)
    if 'Kumar' in helper or 'Dakai' in helper:
        generator = MODELS[config.generator](n_channels=1, n_classes=1)
        discriminator = MODELS[config.discriminator](in_channels=2)
    else:
        # Initialize generator and discriminator
        generator = MODELS[config.generator](nc_in=2, nc_out=1)
        discriminator = MODELS[config.discriminator](nc_in=2)

    # criterion_GAN = torch.nn.L1Loss()
    vgg = Vgg16(requires_grad=False)
    if config.use_cuda:
        device = config.gpu if config.use_cuda else None
        torch.cuda.set_device(device)
        vgg.cuda()
    criterion = {
        'criterion_GAN': torch.nn.MSELoss(),
        'criterion_pixelwise': torch.nn.L1Loss(),
        'L1LossMaskedMean': L1LossMaskedMean(),
        'VGGLoss': VGGLoss(vgg),
        'StyleLoss': StyleLoss(vgg)
    }

    syn = HELPER[helper](generator, discriminator, criterion, config)
    # if 'allGateDilateRicherContextRefine' == syn.config.generator:
    #     optimizer_G = Optimizer(name=syn.config.learning_algorithm,
    #                             model=syn.generator.refine_net,
    #                             lr=syn.config.learning_rate)
    # else:
    optimizer_G = Optimizer(name=syn.config.learning_algorithm,
                            model=syn.generator,
                            lr=syn.config.learning_rate)
    optimizer_D = Optimizer(name=syn.config.learning_algorithm,
                            model=syn.discriminator,
                            lr=syn.config.learning_rate)
    syn.move_to_cuda()
    model_optimizer = {
        'G': optimizer_G,
        'D': optimizer_D,
    }
    # if 'allGateDilateRicherContextRefine' == syn.config.generator:
    #     syn.load_pretrained_coarse_weight()
    epo = syn.load_lastest_weight(model_optimizer)
    train_dataset = SYNDataLoader(root=syn.config.data_root,
                                  split_radio=syn.config.split_radio,
                                  split='train',
                                  data_type="SYN",
                                  config=syn.config)
    vali_dataset = SYNDataLoader(root=syn.config.data_root,
                                 split_radio=syn.config.split_radio,
                                 split='vali',
                                 data_type="SYN",
                                 config=syn.config)
    test_dataset = SYNDataLoader(root=syn.config.data_root,
                                 split_radio=syn.config.split_radio,
                                 split='test',
                                 data_type="SYN",
                                 config=syn.config)
    train_loader = DataLoader(train_dataset,
                              batch_size=syn.config.train_batch_size,
                              shuffle=True,
                              pin_memory=True,
                              num_workers=syn.config.workers)
    vali_loader = DataLoader(vali_dataset,
                             batch_size=syn.config.train_batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=syn.config.workers)
    # test_loader = DataLoader(test_dataset, batch_size=syn.config.test_batch_size, shuffle=False, pin_memory=True,
    #                          num_workers=syn.config.workers)

    # decay_epoch = [60, 90, 120, 150, 180, 210, 240]
    decay_epoch = [60, 70, 80, 90, 120, 150]
    decay_next = (np.array(decay_epoch) - epo > 0).argmax(axis=0)
    decay_e = decay_epoch[decay_next]
    for epoch in range(epo, syn.config.epochs):
        # increase boundary loss factor
        syn.boundary_loss_factor = min(
            syn.config.boundary_loss_factor /
            syn.config.max_boundary_loss_factor_epoch * epoch,
            syn.config.boundary_loss_factor)
        print("\nIncrease the boundary loss factor to %.3f" %
              (syn.boundary_loss_factor))
        train_critics = train(syn, train_loader, model_optimizer, epoch)
        syn.write_summary(epoch, train_critics)
        syn.plot_train_loss(epoch, train_critics)
        syn.save_model_checkpoint(epoch, model_optimizer)
        if (epoch + 1) % syn.config.validate_every_epoch == 0:
            vali_critics = valid(syn, vali_loader, epoch)
            syn.write_summary(epoch, vali_critics)
            syn.plot_vali_loss(epoch, vali_critics)
            # syn.write_summary(epoch, test_critics)
        # decay after start_decay_at
        # if (epoch + 1) % decay_e == 0:
        #     for g in model_optimizer['G'].param_groups:
        #         current_lr = max(g['lr'] * 0.5, syn.config.min_lrate)
        #         print("Decaying the learning ratio to %.8f" % (current_lr))
        #         g['lr'] = current_lr
        #     decay_next += 1
        #     decay_e = decay_epoch[decay_next]
        #     print("Next decay will be in the %d th epoch" % (decay_e))

    test_old(syn, test_dataset, epoch)
    syn.summary_writer.close()
def main():
    params = {
        'batch_size': 8,
        'attack_model': 'inception_v3',
        'input_dir_path': '/home/yantao/datasets/imagenet_100image/original',
        'output_dir_path': 'images_adv',
        'attack_num_steps': 1000,
        'step_size': 2,
        'learning_rate': 5e-2,
        'attack_type': 'ori_trans',  # ori/opt
    }

    if params['attack_model'] == 'vgg16':
        params['IMAGE_SIZE'] = 224
        attack_layer_idx = 14  # 0 ~ 28
        model = Vgg16(attack_layer_idx=attack_layer_idx)
    elif params['attack_model'] == 'resnet152':
        params['IMAGE_SIZE'] = 224
        attack_layer_idx = 8  # 0 ~ 8
        model = Resnet152(attack_layer_idx=attack_layer_idx)
    elif params['attack_model'] == 'inception_v3':
        params['IMAGE_SIZE'] = 299
        attack_layer_idx = [7]  # 0 ~ 13
        model = Inception_v3(attack_layer_idx=attack_layer_idx)
        model_ori = torchvision.models.inception_v3(
            pretrained=True).cuda().eval()
    else:
        raise ValueError('Invalid attack model type.')

    model_name = model.get_name()
    '''
    params['output_dir_path'] = os.path.join('/home/yantao/datasets/imagenet_100image', 'DR_' + params['attack_type'] + '_' + model_name + '_layer_{0}'.format(attack_layer_idx) + '_steps_{0}_{1:01d}'.format(params['attack_num_steps'], params['step_size']))
    if not os.path.exists(params['output_dir_path']):
        os.mkdir(params['output_dir_path'])
    '''
    params['output_dir_path'] = 'images_adv'

    if params['attack_type'] == 'ori':
        adversary = DispersionAttack_gpu(model,
                                         epsilon=16 / 255.,
                                         step_size=params['step_size'] / 255.,
                                         steps=params['attack_num_steps'])
    elif params['attack_type'] == 'opt':
        adversary = DispersionAttack_opt_gpu(
            model,
            epsilon=16 / 255.,
            learning_rate=params['learning_rate'],
            steps=params['attack_num_steps'])
    elif params['attack_type'] == 'ori_trans':
        adversary = transform_DR_attack(model,
                                        epsilon=16 / 255.,
                                        step_size=params['step_size'] / 255.,
                                        steps=params['attack_num_steps'],
                                        prob=1.0,
                                        image_resize=330)
    else:
        raise ValueError('Invalid attack type.')

    images_t, file_name_list = load_images(
        dir_path=params['input_dir_path'],
        size=[params['IMAGE_SIZE'], params['IMAGE_SIZE']],
        order='channel_first',
        zero_one_bound=True,
        to_tensor=True)

    idx = 0
    pbar = tqdm(total=int(len(images_t) / params['batch_size']) + 1)
    while (idx * params['batch_size'] <= len(images_t)):
        if (idx + 1) * params['batch_size'] <= len(images_t):
            temp_images_t = images_t[idx * params['batch_size']:(idx + 1) *
                                     params['batch_size']]
            temp_file_name_list = file_name_list[idx *
                                                 params['batch_size']:(idx +
                                                                       1) *
                                                 params['batch_size']]
        else:
            temp_images_t = images_t[idx * params['batch_size']:]
            temp_file_name_list = file_name_list[idx * params['batch_size']:]

        advs_var = adversary(temp_images_t.cuda())
        advs_t = advs_var.cpu()
        save_images(advs_t,
                    dir_path=params['output_dir_path'],
                    file_name_list=temp_file_name_list)
        pbar.update()
        idx += 1
    pbar.close()
def main(args=None):
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)
    args_dic = vars(args)

    with open('utils/labels.txt', 'r') as inf:
        args_dic['imagenet_dict'] = eval(inf.read())

    args_dic['input_dir'] = os.path.join(args.dataset_dir, 'ori')

    target_model = None
    internal = None
    attack = None
    attack_layer_idx = None
    if args.adv_method == 'dr':
        loss_mtd = args.loss_method
        if args.target_model == 'vgg16':
            assert args.vgg16_attacklayer != -1
            target_model = Vgg16()
            internal = [i for i in range(29)]
            attack_layer_idx = [args.vgg16_attacklayer]  # 12, 14
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'resnet152':
            assert args.res152_attacklayer != -1
            target_model = Resnet152()
            internal = [i for i in range(9)]
            attack_layer_idx = [args.res152_attacklayer]  # #[4, 5, 6, 7]
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'inception_v3':
            assert args.inc3_attacklayer != -1
            target_model = Inception_v3()
            internal = [i for i in range(14)]
            attack_layer_idx = [args.inc3_attacklayer]  # [3, 4, 7, 8, 12]
            args_dic['image_size'] = (299, 299)
        else:
            raise

        attack = DispersionAttack_gpu(target_model,
                                      epsilon=args.epsilon / 255.,
                                      step_size=args.step_size / 255.,
                                      steps=args.steps,
                                      loss_mtd=loss_mtd)

    elif args.adv_method == 'tidim' or args.adv_method == 'dim' or args.adv_method == 'mifgsm' or args.adv_method == 'pgd':
        attack_layer_idx = [0]
        internal = [0]
        loss_mtd = ''

        if args.target_model == 'vgg16':
            target_model = torchvision.models.vgg16(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'resnet152':
            target_model = torchvision.models.resnet152(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (224, 224)
        elif args.target_model == 'inception_v3':
            target_model = torchvision.models.inception_v3(
                pretrained=True).cuda().eval()
            args_dic['image_size'] = (299, 299)
        else:
            raise ValueError('Invalid adv_method.')

        if args.adv_method == 'dim':
            attack = DIM_Attack(target_model,
                                decay_factor=1,
                                prob=0.5,
                                epsilon=args.epsilon / 255.,
                                step_size=args.step_size / 255.,
                                steps=args.steps,
                                image_resize=330)
        elif args.adv_method == 'mifgsm':
            attack = MomentumIteratorAttack(target_model,
                                            decay_factor=0.5,
                                            epsilon=args.epsilon / 255.,
                                            step_size=args.step_size / 255.,
                                            steps=args.steps,
                                            random_start=False)
        elif args.adv_method == 'pgd':
            attack = LinfPGDAttack(target_model,
                                   epsilon=args.epsilon / 255.,
                                   a=args.step_size / 255.,
                                   k=args.steps,
                                   random_start=False)
        elif args.adv_method == 'tidim':
            attack = TIDIM_Attack(target_model,
                                  decay_factor=1,
                                  prob=0.5,
                                  epsilon=args.epsilon / 255.,
                                  step_size=args.step_size / 255.,
                                  steps=args.steps,
                                  image_resize=330)

    else:
        raise ValueError('Invalid adv_mdthod.')
    assert target_model != None and internal != None and attack != None and attack_layer_idx != None
    attack_layer_idx_str = ''
    for layer_idx in attack_layer_idx:
        attack_layer_idx_str += (str(layer_idx) + '_')
    attack_layer_idx_str = attack_layer_idx_str[:-1]

    if not DEBUG:
        args_dic['output_dir'] = os.path.join(
            args.dataset_dir,
            '{0}_{1}_layerAt_{2}_eps_{3}_stepsize_{4}_steps_{5}_lossmtd_{6}'.
            format(args.adv_method, args.target_model, attack_layer_idx_str,
                   args.epsilon, args.step_size, args.steps, loss_mtd))

        if os.path.exists(args.output_dir):
            raise ValueError('Output folder existed.')
        os.mkdir(args.output_dir)

    count = 0
    images_list = []
    names_list = []
    total_images = len(os.listdir(args.input_dir))
    assert args.batch_size > 0
    for image_count, image_name in enumerate(tqdm(os.listdir(args.input_dir))):
        image_path = os.path.join(args.input_dir, image_name)
        image_np = load_image(shape=args.image_size,
                              data_format='channels_first',
                              abs_path=True,
                              fpath=image_path)
        images_list.append(image_np)
        names_list.append(image_name)
        count += 1
        if count < args.batch_size and image_count != total_images - 1:
            continue

        images_np = np.array(images_list)
        count = 0
        images_list = []

        images_var = numpy_to_variable(images_np)
        if args.adv_method == 'dr':
            advs = attack(images_var, attack_layer_idx, internal)
        else:
            assert args.batch_size == 1, 'Baselines are not tested for batch input.'
            target_model.eval()
            logits_nat = target_model(images_var)
            y_var = logits_nat.argmax().long().unsqueeze(0)
            advs = attack(images_var.cpu(), y_var.cpu())

        if not DEBUG:
            advs_np = variable_to_numpy(advs)
            for idx, adv_np in enumerate(advs_np):
                image_pil = Image.fromarray(
                    np.transpose((adv_np * 255).astype(np.uint8), (1, 2, 0)))
                image_pil.save(
                    os.path.join(args.output_dir,
                                 os.path.splitext(names_list[idx])[0] +
                                 '.png'))
        names_list = []
Esempio n. 12
0
    def __init__(self, args):
        super(SDCNet2D,self).__init__()

        self.rgb_max = 255
        self.sequence_length = args.sequence_length

        factor = 2
        input_channels = self.sequence_length * 3 + (self.sequence_length - 1) * 2 #14

        self.conv1 = conv2d(input_channels, 64 // factor, kernel_size=7, stride=2)
        self.conv2 = conv2d(64 // factor, 128 // factor, kernel_size=5, stride=2)
        self.conv3 = conv2d(128 // factor, 256 // factor, kernel_size=5, stride=2)
        self.conv3_1 = conv2d(256 // factor, 256 // factor)
        self.conv4 = conv2d(256 // factor, 512 // factor, stride=2)
        self.conv4_1 = conv2d(512 // factor, 512 // factor)
        self.conv5 = conv2d(512 // factor, 512 // factor, stride=2)
        self.conv5_1 = conv2d(512 // factor, 512 // factor)
        self.conv6 = conv2d(512 // factor, 1024 // factor, stride=2)
        self.conv6_1 = conv2d(1024 // factor, 1024 // factor)

        self.deconv5 = deconv2d(1024 // factor, 512 // factor)
        self.deconv4 = deconv2d(1024 // factor, 256 // factor)
        self.deconv3 = deconv2d(768 // factor, 128 // factor)
        self.deconv2 = deconv2d(384 // factor, 64 // factor)
        self.deconv1 = deconv2d(192 // factor, 32 // factor)
        self.deconv0 = deconv2d(96 // factor, 16 // factor)

        self.final_flow = nn.Conv2d(input_channels + 16 // factor, 2, kernel_size=3, stride=1, padding=1, bias=True)

        # init parameters, when doing convtranspose3d, do bilinear init
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose3d):
                if m.bias is not None:
                    init.uniform_(m.bias)
                init.xavier_uniform_(m.weight)
        self.vgg = Vgg16().type(torch.cuda.FloatTensor)
        self.flownet2 = FlowNet2(args, batchNorm=False)
        assert os.path.exists(args.flownet2_checkpoint), "flownet2 checkpoint must be provided."
        flownet2_checkpoint = torch.load(args.flownet2_checkpoint)
        self.flownet2.load_state_dict(flownet2_checkpoint['state_dict'], strict=False)

        for param in self.flownet2.parameters():
            param.requires_grad = False

        for param in self.vgg.parameters():
            param.requires_grad = False

        self.warp_nn = Resample2d(bilinear=False)
        self.warp_bilinear = Resample2d(bilinear=True)

        self.L1Loss = nn.L1Loss()

        flow_mean = torch.FloatTensor([-0.94427323, -1.23077035]).view(1, 2, 1, 1)
        flow_std = torch.FloatTensor([13.77204132, 7.47463894]).view(1, 2, 1, 1)
        rgb_mean = torch.FloatTensor([106.7747911, 96.13649598, 76.61428884]).view(1, 3, 1, 1)

        self.register_buffer('flow_mean', flow_mean)
        self.register_buffer('flow_std', flow_std)
        self.register_buffer('rgb_mean', rgb_mean)

        self.ignore_keys = ['flownet2', 'vgg']
        return
Esempio n. 13
0
def main():

  # load option
  parser = TestOptions()
  opts = parser.parse()
  if len(opts.gpu_ids) > 1:
    raise Exception('only one GPU for testing!')

  # create result folder
  result_dir = os.path.join(opts.result_dir, opts.name)
  if not os.path.exists(result_dir):
    os.mkdir(result_dir)

  # data loader
  print('\n--- load {} dataset ---'.format(opts.phase))
  dataset = AlignedDataset(opts)
  loader = torch.utils.data.DataLoader(dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.nThreads)

  # load model
  opts.load = os.path.join(opts.output_dir, 'model', opts.load)
  print('\n--- load ArtEditing model from {} ---'.format(opts.load))
  model = Model(opts)
  model.cuda()
  _, _ = model.load(opts.load)

  # perceptual model
  vgg = Vgg16(requires_grad=False)
  vgg.cuda()
  vgg.eval()

  # load regularizer
  print('\n--- AdaIN optimizer ---')
  adain_optimize = []
  for i in range(opts.n_stage - 1):
    opts.reg_load[i] = os.path.join(opts.output_dir, 'model', opts.reg_load[i])
    print('  load {}-th stage from {}'.format(opts.reg_load[i]))
    adain_optimize.append(AdaINOptimize(opts, model.bicycle[i], vgg, opts.input_dim[i + 1]))
    adain_optimize[i].cuda()
    adain_optimize[i].load(opts.reg_load[i])

  # test
  print('\n--- Testing ---')
  for idx, (imgs, imgname) in enumerate(loader):
    outs = []
    with torch.no_grad():
      imgs = [img.cuda() for img in imgs]

      # store ground-truth img at each stage
      outs += imgs
      names = ['gt_{}'.format(i) for i in range(opts.n_stage)]

      # workflow inference
      cs = model.test_forward_backward(imgs[opts.n_stage - 1])
      outs += cs
      names += ['infer_{}'.format(i) for i in range(len(cs))]

    # artwork generation + adain optimization for each stage
    zs = []
    for i in range(opts.n_stage - 1):

      # get input and reference output image
      inp = cs[i] if i == 0 else outs[-1]
      out = imgs[i + 1] if i == opts.n_stage - 2 else cs[i + 1]

      # adain optimize
      z, imgs_rec = adain_optimize[i].forward(inp, out)
      zs.append(z)

      # store reconstructed images
      outs += imgs_rec
      names += ['rec_{}_{:04d}'.format(i + 1, step*(opts.n_refine // 5)) for step in range(len(imgs_rec))]

    # get editing results
    with torch.no_grad():

      # re-sample latent representations at each stage
      for idx_stage in range(opts.n_stage - 1):
        for idx_vary in range(opts.num):
          imgs_edit = model.test_forward(cs[0], zs, vary_stage=idx_stage)
          for i, img_edit in enumerate(imgs_edit):
            outs.append(img_edit)
            names.append('edit_{}_{}'.format(idx_stage + 1 + i, idx_stage + 1, idx_vary))

      # save
      save_imgs(outs, names, os.path.join(result_dir, imgname[0]))

    print('{}/{}'.format(idx + 1, len(loader)))

  return
Esempio n. 14
0
def run(train_loader, val_loader, epochs, lr, momentum, log_interval, log_dir):

    model = Vgg16()

    writer = create_summary_writer(model, train_loader, log_dir)

    device = 'cpu'
    if torch.cuda.is_available():
        device = 'cuda'

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=momentum,
                          weight_decay=0.001)

    lr_scheduler = ExponentialLR(optimizer, gamma=0.975)

    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        F.nll_loss,
                                        device=device)

    evaluator = create_supervised_evaluator(model,
                                            metrics={
                                                'accuracy': Accuracy(),
                                                'nll': Loss(F.nll_loss)
                                            },
                                            device=device)

    trainer.add_event_handler(Events.EPOCH_COMPLETED,
                              lambda engine: lr_scheduler.step())

    trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())

    # store the best model
    best_model_handler = ModelCheckpoint(dirname=log_dir,
                                         filename_prefix="best",
                                         n_saved=3,
                                         score_name="test_acc",
                                         score_function=default_score_fn)
    evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {
        'model': model,
    })

    # add early stopping
    es_patience = 5
    es_handler = EarlyStopping(patience=es_patience,
                               score_function=default_score_fn,
                               trainer=trainer)
    evaluator.add_event_handler(Events.COMPLETED, es_handler)

    def empty_cuda_cache(engine):
        torch.cuda.empty_cache()
        import gc
        gc.collect()

    trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
    evaluator.add_event_handler(Events.COMPLETED, empty_cuda_cache)

    @trainer.on(Events.ITERATION_COMPLETED(every=log_interval))
    def log_training_loss(engine):
        print("Epoch[{}] Iteration[{}/{}] Loss: {:.2f}"
              "".format(engine.state.epoch, engine.state.iteration,
                        len(train_loader), engine.state.output))
        writer.add_scalar("training/loss", engine.state.output,
                          engine.state.iteration)

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_training_results(engine):
        evaluator.run(train_loader)
        metrics = evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        avg_nll = metrics['nll']
        print(
            "Training Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
            .format(engine.state.epoch, avg_accuracy, avg_nll))
        writer.add_scalar("training/avg_loss", avg_nll, engine.state.epoch)
        writer.add_scalar("training/avg_accuracy", avg_accuracy,
                          engine.state.epoch)

    @trainer.on(Events.EPOCH_COMPLETED)
    def log_validation_results(engine):
        evaluator.run(val_loader)
        metrics = evaluator.state.metrics
        avg_accuracy = metrics['accuracy']
        avg_nll = metrics['nll']
        print(
            "Validation Results - Epoch: {}  Avg accuracy: {:.2f} Avg loss: {:.2f}"
            .format(engine.state.epoch, avg_accuracy, avg_nll))
        writer.add_scalar("valdation/avg_loss", avg_nll, engine.state.epoch)
        writer.add_scalar("valdation/avg_accuracy", avg_accuracy,
                          engine.state.epoch)

    # kick everything off
    trainer.run(train_loader, max_epochs=epochs)

    writer.close()