Пример #1
0
def iterative_grad_attack(inception_model,
                          source_tensor,
                          target_tensor,
                          n_steps=200,
                          lr=0.01):
    with torch.no_grad():
        target_rep = inception_model(target_tensor).detach()

    perturbed_tensor = source_tensor.clone()
    for step in range(1, n_steps + 1):
        grad, similarity = cal_source_grad(inception_model, perturbed_tensor,
                                           target_rep)
        perturbed_tensor = perturbed_tensor + lr * grad
        perturbed_tensor = torch.clamp(perturbed_tensor, -1.0, 1.0).detach_()
        if similarity > 0.99:
            break

    adv_rep = inception_model(perturbed_tensor)
    rep_dist = (target_rep * adv_rep).sum(dim=1).mean().cpu().item()

    adv_img = tensor_to_image(
        fixed_image_standardization_inverse(perturbed_tensor.cpu()).squeeze(0))
    tgt_img = tensor_to_image(
        fixed_image_standardization_inverse(target_tensor.cpu()).squeeze(0))

    pixel_dist = compute_dist(np.asarray(adv_img), np.asarray(tgt_img))
    return adv_img, pixel_dist, rep_dist
Пример #2
0
def validation_loop(dataloader, model, loss_fn, epoch, rgb_map: dict,
                    batch_size: int) -> float:
    """
    Validate the neural network with a given loss_fn and optimizer

    :param dataloader: a dataloader class that returns random batched image and mask tensor pairs
    :param model: the torch nn model to be trained
    :param loss_fn: the loss function used as to calculate the 'error' between X and y
    :param epoch: number of times we have gone through the loop
    :param rgb_map: rgb_map used in original image
    :param batch_size: number of batches used to train on
    :return: avg_loss
    """
    size = len(dataloader)
    validation_loss, correct, jaccard_loss = [], [], []
    with torch.no_grad():
        for i_batch, sample_batched in enumerate(dataloader):
            X = sample_batched['image']
            y = sample_batched['mask']
            pred = model(X)
            y = y.to(device)

            validation_loss.append(loss_fn(pred, y).item())
            correct.append(
                (pred.argmax(1) == y).type(torch.float).sum().item() /
                (y.shape[1] * y.shape[2]))
            # jaccard_loss.append(smp.utils.functional.iou(pred, y).item())

            # writer.add_histogram("accuracy-distribution/train", (pred.argmax(1) == y).type(torch.float).sum().item()
            #                      / (y.shape[1] * y.shape[2]), i_batch)

            if i_batch % 20 == 1:
                pred_fig = tensor_to_image(pred, rgb_map, True)
                y_fig = tensor_to_image(y, rgb_map, False)
                X_fig = tensor_image_to_image(X)
                writer.add_figure('prediction/' + str(i_batch),
                                  pred_fig,
                                  global_step=epoch)
                writer.add_figure('truth/' + str(i_batch),
                                  y_fig,
                                  global_step=epoch)
                writer.add_figure('input/' + str(i_batch),
                                  X_fig,
                                  global_step=epoch)

    validation_mean = get_sample_mean(validation_loss)
    correct_mean = get_sample_mean(correct)
    # jarrard_mean = get_sample_mean(jaccard_loss)
    print(
        f"Validation Error: \n Accuracy: {(100 * correct_mean / batch_size):>0.1f}% Avg loss: "
        f"{validation_mean / batch_size:>8f} \n")
    writer.add_scalar("val/avg accuracy", correct_mean / batch_size, epoch)
    writer.add_scalar("val/avg loss", validation_mean / batch_size, epoch)
    # writer.add_scalar("val/avg IoU", jarrard_mean, epoch)
    # TODO: Jacquard-loss is doing something weird, IoU doesn't look correct
    return validation_mean
Пример #3
0
def disrupt_stimuli(save_folder, inputs_and_targets, encoder, roi_mask,
                    target_roi, loss_method):
    roi_mask = torch.from_numpy(roi_mask.astype(np.uint8))

    towards_target = True if loss_method == 'towards' else False
    if not target_roi:
        loss_func = roi_loss_func(None, towards_target)
    else:
        loss_func = roi_loss_func(roi_mask, towards_target)

    for input_and_target in tqdm(inputs_and_targets):
        orig_image = input_and_target['stimulus_path']
        orig_image = utils.image_to_tensor(orig_image)

        target = input_and_target['target_voxels']
        target = torch.from_numpy(target)
        if not target_roi:
            target = target[roi_mask]
        else:
            with torch.no_grad():
                orig_voxels = encoder(orig_image.unsqueeze(0)).squeeze(0)
            target[1 - roi_mask] = orig_voxels[1 - roi_mask]

        disrupted_image = deepdream(orig_image, target, encoder, loss_func)
        metrics = loss_metrics(orig_image, disrupted_image, target, encoder,
                               roi_mask if target_roi else None)

        disrupted_image = utils.tensor_to_image(disrupted_image)
        save_disrupted_image(save_folder, input_and_target, disrupted_image,
                             metrics)
async def on_reaction_add(reaction: discord.Reaction,
                          user: discord.User) -> None:
    """Handle user reactions to select attributes."""
    # The reaction should not come from the bot and
    # should be on the bots message
    if user.id != bot.id and reaction.message.id == bot.sent_panel.id:
        await bot.sent_panel.remove_reaction(reaction, user)
        emoji = str(reaction)

        if emoji in bot.label_emojis:
            label_idx = bot.label_emojis.index(emoji)
            # Change the label to its opposite
            bot.labels[0][label_idx] = float(not bot.labels[0][label_idx])
            # Edit panel
            await bot.sent_panel.edit(embed=get_info_panel())

        if emoji == bot.go_emoji:
            # Generate image
            generated_data = bot.generator(bot.image_data, bot.labels)
            generated_image = tensor_to_image(generated_data, bot.config)

            # To upload the image, convert to bytes
            # https://stackoverflow.com/questions/59868527
            with io.BytesIO() as image_binary:
                generated_image.save(image_binary, 'png')
                image_binary.seek(0)
                image_file = discord.File(fp=image_binary,
                                          filename='gen_img.png')
                ctx = reaction.message.channel
                bot.sent_image = await ctx.send(file=image_file)
Пример #5
0
def run_test(args):
    it_network = ImageTransformNet(
        input_shape=hparams['test_size'],
        residual_layers=hparams['residual_layers'],
        residual_filters=hparams['residual_filters'])
    ckpt_dir = os.path.join(args.name, 'pretrained')
    ckpt = tf.train.Checkpoint(network=it_network, step=tf.Variable(0))
    ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()
    print('\n###################################################')
    print('Perceptual Losses for Real-Time Style Transfer Test')
    print('###################################################\n')
    print('Restored {} step: {}\n'.format(args.name, str(ckpt.step.numpy())))

    dir_size = 'step_{}_{}x{}'.format(str(ckpt.step.numpy()),
                                      str(hparams['test_size'][0]),
                                      str(hparams['test_size'][1]))
    dir_model = 'output_img_{}'.format(args.name)
    out_dir = os.path.join(args.output_path, dir_model, dir_size)

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    content_img_list = os.listdir(args.test_content_img)

    for c_file in content_img_list:
        content = convert(os.path.join(args.test_content_img, c_file),
                          hparams['test_size'][:2])[tf.newaxis, :]
        output = it_network(content, training=False)
        tensor = tensor_to_image(output)
        c_name = '{}_{}'.format(args.name, os.path.splitext(c_file)[0])
        save_path = os.path.join(out_dir, c_name)
        tensor.save(save_path + '.jpeg')
        print('Image: {}.jpeg saved'.format(save_path))
Пример #6
0
def iterative_grad_attack(inception_model,
                          source_img,
                          target_img,
                          n_steps=50,
                          lr=0.01):
    with torch.no_grad():
        target_rep = inception_model(target_img).detach()

    perturbed_img = source_img.clone()
    for step in range(1, n_steps + 1):
        grad, similarity = cal_source_grad(inception_model, perturbed_img,
                                           target_rep)
        # print('grad range ', grad.max(), grad.min())
        perturbed_img = perturbed_img + lr * grad
        perturbed_img = torch.clamp(perturbed_img, -1.0, 1.0).detach_()
        if similarity > 0.99:
            break
        # if step % 50 == 1:
        #     print('step {}, similarity: {:.4f}'.format(step, similarity.item()))
    adv_rep = inception_model(perturbed_img)
    rep_dist = (target_rep * adv_rep).sum(dim=1).mean().cpu().item()

    adv_imgs = [
        tensor_to_image(img.squeeze(0))
        for img in fixed_image_standardization_inverse(
            perturbed_img.cpu()).split(split_size=1, dim=0)
    ]
    target_imgs = [
        tensor_to_image(img.squeeze(0))
        for img in fixed_image_standardization_inverse(target_img.cpu()).split(
            split_size=1, dim=0)
    ]

    dist_list = [
        compute_dist(np.asarray(adv_img), np.asarray(target_img))
        for (adv_img, target_img) in zip(adv_imgs, target_imgs)
    ]
    dist = np.mean(dist_list)
    return adv_imgs, dist, rep_dist
Пример #7
0
def train(optimizer, model, target, content_features, style_weights, style_grams, content_weight, style_weight, steps=2000, show_every=200):

    for ii in tqdm(range(1, steps+1)):
        
        # get the features from your target image
        target_features = get_features(target, model)

        # the content loss
        content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)
        
        # the style loss
        # initialize the style loss to 0
        style_loss = 0
        # then add to it for each layer's gram matrix loss
        for layer in style_weights:
            # get the "target" style representation for the layer
            target_feature = target_features[layer]
            target_gram = gram_matrix(target_feature)
            _, d, h, w = target_feature.shape
            # get the "style" style representation
            style_gram = style_grams[layer]
            # the style loss for one layer, weighted appropriately
            layer_style_loss = style_weights[layer] * torch.mean((target_gram - style_gram)**2)
            # add to the style loss
            style_loss += layer_style_loss / (d * h * w)
            
        # calculate the *total* loss
        total_loss = content_weight * content_loss + style_weight * style_loss
        
        # update your target image
        optimizer.zero_grad()
        total_loss.backward()
        optimizer.step()
        
        # display intermediate images and print the loss
        if  ii % show_every == 0:
            print('Total loss: ', total_loss.item())
            plt.imshow(tensor_to_image(target))
            plt.show()
    return target
Пример #8
0
b2a_sample_array = np.zeros((__EVAL_NUM * im_h, (__SAMPLE_NUM + 1) * im_w, 3),
                            dtype=np.uint8)

pbar = tqdm(total=__EVAL_NUM)

with torch.no_grad():

    for i, (image_a, image_b, _) in enumerate(dataloader):
        image_a = image_a.cuda()
        image_b = image_b.cuda()

        pbar.update(1)
        if i == __EVAL_NUM: break

        a2b_sample_array[i*im_h:(i+1)*im_h, 0:im_w, :] = \
                tensor_to_image(image_a)

        b2a_sample_array[i*im_h:(i+1)*im_h, 0:im_w, :] = \
                tensor_to_image(image_b)

        for j in range(__SAMPLE_NUM):
            output_images = trainer.sample(image_a, image_b, training=False)
            im_array_a2b = tensor_to_image(output_images[3])
            im_array_b2a = tensor_to_image(output_images[-1])

            a2b_sample_array[i*im_h:(i+1)*im_h, (j+1)*im_w:(j+2)*im_w, :] = \
                    im_array_a2b

            b2a_sample_array[i*im_h:(i+1)*im_h, (j+1)*im_w:(j+2)*im_w, :] = \
                    im_array_b2a
Пример #9
0
results = extractor(tf.constant(content_image))

print('Styles:')
print_stats_of_layers(results['style'].items())

print("Contents:")
print_stats_of_layers(results['content'].items())

style_targets = extractor(style_image)['style']
content_targets = extractor(content_image)['content']
image = tf.Variable(content_image)
opt = tf.optimizers.Adam(learning_rate=0.02, beta_1=0.99, epsilon=1e-1)

targs_style = style_targets, style_weight, num_style_layers
targs_content = content_targets, content_weight, num_content_layers

start = time.time()
step = 0
for n in range(epochs):
    for m in range(steps_per_epoch):
        step += 1
        train_step(image, extractor, opt, targs_style, targs_content)
        print(".", end='')
    if save_progress:
        tensor_to_image(image).save(output_file[:-4] + str(n) + '.png')
    print("Train step: {}".format(step))

end = time.time()
print("Total time: {:.1f}".format(end - start))
tensor_to_image(image).save(output_file)
Пример #10
0
    for step in progress:
        optimizer.zero_grad()
        net_output = net(net_input)
        net_output_ds = ds_net(net_output)

        loss = MSE(net_output_ds, target)
        loss.backward()
        optimizer.step()

        train_loss = loss.data[0]

        if step % 100 == 0:
            if use_cuda:
                net_output = net_output.cpu()
            filename = None if args.output is None else '%s/%04d.png' % (
                args.output, step)
            img = tensor_to_image(net_output.data, filename)
            psnr = compare_psnr(truth, np.array(img))
            max_psnr = max(max_psnr, psnr)

        progress.set_description(
            'Loss: %.6f | PSNR: %.2f dB | Max PSNR: %.2f dB' %
            (train_loss, psnr, max_psnr))

        if use_cuda:
            noise_new = torch.cuda.FloatTensor(noise.shape).normal_(std=sigma)
        else:
            noise_new = torch.FloatTensor(noise.shape).normal_(std=sigma)

        net_input.data += noise_new
Пример #11
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument('--content', type=str,
                        dest='content_path',
                        help='content image path',
                        required=True)
    parser.add_argument('--style', type=str,
                        dest='style_path',
                        help='style image path',
                        required=True)
    parser.add_argument('--output', type=str,
                        dest='dest',
                        help='output path',
                        required=True)
    parser.add_argument('--content-weight', type=float,
                        dest='content_weight',
                        help='content weight',
                        default=CONTENT_WEIGHT)
    parser.add_argument('--style-weight', type=float,
                        dest='style_weight',
                        help='style weight',
                        default=STYLE_WEIGHT)
    parser.add_argument('--tv-weight', type=float,
                        dest='tv_weight',
                        help='total variation weight',
                        default=TV_WEIGHT)
    parser.add_argument('--learning-rate', type=float,
                        dest='learning_rate',
                        help='learning rate',
                        default=LEARNING_RATE)
    parser.add_argument('--epochs', type=int,
                        dest='epochs',
                        help='number of epochs',
                        default=NUM_EPOCHS)
    parser.add_argument('--len-epoch', type=int,
                        dest='len_epoch',
                        help='steps per epoch',
                        default=STEPS_PER_EPOCH)

    args = parser.parse_args()

    content_image = utils.load_image(args.content_path)
    style_image = utils.load_image(args.style_path)

    content_layers = ['block5_conv2']

    style_layers = ['block1_conv1',
                    'block2_conv1',
                    'block3_conv1',
                    'block4_conv1',
                    'block5_conv1']

    model = StyleTransfer(content_layers, style_layers)

    optimizer = tf.optimizers.Adam(learning_rate=args.learning_rate,
                                   beta_1=0.99, epsilon=1e-1)

    trainer = Trainer(model, style_content_loss, optimizer, content_image,
                      style_image, args.content_weight, args.style_weight,
                      args.tv_weight, args.epochs, args.len_epoch)

    image = tf.Variable(content_image)
    trainer.train(image)

    utils.tensor_to_image(image).save(args.dest)
    ret, frame = cap.read()
    if ret == False:
        break
    cv2.imwrite('images_transformed/' + str(i) + '.jpg', frame)
    i += 1

cap.release()
cv2.destroyAllWindows()

imgs = sorted(os.listdir('images_dissected/'))

for img in imgs:
    content_image = utils.load_img('images_dissected/' + img)
    stylized_image = hub_module(tf.constant(content_image),
                                tf.constant(style_image))[0]
    copy = utils.tensor_to_image(stylized_image)
    copy.save('images_transformed/transformed_' + img)

imgs_transformed = sorted(os.listdir('images_transformed/'))

img_array = []
for img in imgs_transformed:
    image = cv2.imread('images_transformed/' + img)
    height, width, layers = image.shape
    size = (width, height)
    img_array.append(image)

out = cv2.VideoWriter('project.avi', cv2.VideoWriter_fourcc(*'DIVX'), 30, size)

# Write the file
for i in range(len(img_array)):
Пример #13
0
    parser.add_argument('--save_folder', required=True, type=str, help='folder to save disrupted images')
    parser.add_argument('--encoder_file', required=True, type=str, help='name of the encoder file')
    parser.add_argument('--targets_folder', default=None, type=str,
                        help='folder containing voxel targets (if not provided, activation will be maximized)')
    args = parser.parse_args()

    shutil.rmtree(args.save_folder, ignore_errors=True)
    os.mkdir(args.save_folder)

    encoder = torch.load(os.path.join('saved_models', args.encoder_file))

    if args.targets_folder is not None:
        print('Generating targeted stimuli')
        targets = os.listdir(args.targets_folder)
        targets = [t for t in targets if t != '.DS_Store']
        targets = [t for t in targets if '.target.pth' in t]
        for target_name in tqdm(targets):
            target = torch.load(os.path.join(args.targets_folder, target_name))
            generated, metrics = generate_stimulus(target, encoder, towards_target=True)
            utils.tensor_to_image(generated).save(os.path.join(args.save_folder, target_name.split('.')[0] + '.png'))
            with open(os.path.join(args.save_folder, target_name.split('.')[0] + '_metrics.json'), 'w') as f:
                f.write(json.dumps(metrics, indent=2))
    else:
        print('Generating untargeted stimuli')
        for i in tqdm(range(20)):
            target = torch.zeros(encoder.regressor.linear.out_features)
            generated, metrics = generate_stimulus(target, encoder, towards_target=False)
            utils.tensor_to_image(generated).save(os.path.join(args.save_folder, '{:05d}.png'.format(i)))
            with open(os.path.join(args.save_folder, '{:05d}_metrics.json'.format(i)), 'w') as f:
                f.write(json.dumps(metrics, indent=2))
Пример #14
0
    stimuli = os.listdir(args.stimuli_folder)
    stimuli = [s for s in stimuli if s != '.DS_Store']

    roi_mask = utils.get_roi_mask(args.roi, args.encoder_file)

    encoder = torch.load(os.path.join('saved_models', args.encoder_file))

    print('Generating disruption examples')
    for stimulus_name in tqdm(stimuli):
        stimulus = utils.image_to_tensor(
            os.path.join(args.stimuli_folder, stimulus_name), resolution)
        target = torch.load(
            os.path.join(args.targets_folder, stimulus_name + '.target.pth'))
        disrupted, metrics = disrupt_stimulus(stimulus, target, encoder,
                                              roi_mask, args.towards_target,
                                              args.random)

        shutil.copyfile(
            os.path.join(args.stimuli_folder, stimulus_name),
            os.path.join(args.save_folder,
                         stimulus_name).replace('.', '_original.'))
        utils.tensor_to_image(disrupted).save(
            os.path.join(args.save_folder,
                         stimulus_name.replace('.', '_disrupted.')))
        with open(
                os.path.join(args.save_folder,
                             stimulus_name.split('.')[0] + '_metrics.json'),
                'w') as f:
            f.write(json.dumps(metrics, indent=2))
Пример #15
0
def main(config):
    logger = config.get_logger('test')

    # setup data_loader instances
    data_loader = getattr(module_data, config['data_loader']['type'])(
        config['data_loader']['args']['data_dir'],
        batch_size=8,
        shuffle=False,
        validation_split=0.0,
        training=False,
        num_workers=2)

    # build model architecture
    model = config.init_obj('arch', module_arch)
    logger.info(model)

    # get function handles of loss and metrics
    loss_fn = getattr(module_loss, config['loss'])
    metric_fns = [getattr(module_metric, met) for met in config['metrics']]

    logger.info('Loading checkpoint: {} ...'.format(config.resume))
    checkpoint = torch.load(config.resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # prepare model for testing
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = model.to(device)
    model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    MAX_IMAGES = 20
    current = 0
    with torch.no_grad():
        for i, (data, target) in enumerate(tqdm(data_loader)):
            data, target = data.to(device), target.to(device)
            output = model(data)

            for i in range(data.shape[0]):
                if current <= MAX_IMAGES:
                    current += 1

                    pred = torch.sigmoid(output.squeeze(dim=1)[i, :, :]) > 0.5
                    pred = tensor_to_image(pred)

                    true = tensor_to_image(target[i, :, :])

                    pred.save(f'images/{current}-pred.png')
                    true.save(f'images/{current}-true.png')

            # computing loss, metrics on test set
            loss = loss_fn(output, target)
            batch_size = data.shape[0]
            total_loss += loss.item() * batch_size
            for i, metric in enumerate(metric_fns):
                total_metrics[i] += metric(output, target) * batch_size

    n_samples = len(data_loader.sampler)
    log = {'loss': total_loss / n_samples}
    log.update({
        met.__name__: total_metrics[i].item() / n_samples
        for i, met in enumerate(metric_fns)
    })
    logger.info(log)
Пример #16
0
                    type=int,
                    help='iterations to update image')
opt = parser.parse_args()

if opt.use_cuda:
    # checking if cuda is available
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# load in content and style image
content = image_loader(opt.content_path, shape=(512, 512)).to(device)
style = image_loader(opt.style_path, shape=(512, 512)).to(device)

# display images
if opt.show_img:
    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
    ax1.imshow(tensor_to_image(content))
    ax2.imshow(tensor_to_image(style))

# weights for each style layer
# weighting earlier layers more will result in *larger* style artifacts
# notice we are excluding `conv4_2` our content representation
style_weights = {
    'conv1_1': 1.,
    'conv2_1': 0.75,
    'conv3_1': 0.2,
    'conv4_1': 0.2,
    'conv5_1': 0.2
}

content_weight = opt.content_weight  # alpha
style_weight = opt.style_weight  # beta
Пример #17
0
import matplotlib.pyplot as plt
from pycocotools.coco import COCO

train_path = '/home/user/Data/coco2014/train2014'
train_ann_file = '/home/user/Data/coco2014/annotations/instances_train2014.json'
val_path = '/home/user/Data/coco2014/val2014'
val_ann_file = '/home/user/Data/coco2014/annotations/instances_val2014.json'
coco = COCO(train_ann_file)

num_labels = 80

transform = transforms.Compose([
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])


train_dataset = CocoDataset(train_path, train_ann_file, transform, num_labels)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=2, shuffle=True)
images, labels = next(iter(train_loader))

print(f'images.shape: {images.shape}')
print(labels.shape)
img = tensor_to_image(images[0])
class_names = get_classes_from_labels(labels[0])
print(class_names)

plt.imshow(img)
plt.show()