Example #1
0
    def __getitem__(self, index):
        img_name = self.imgs[index]
        img = jpg_loader(os.path.join(self.root_img, img_name))

        if not self.is_predict:
            label = self.labels[index]
            img_mask_name = get_image_mask_name(self.root_img_mask, img_name)
            img_mask = gif_loader(os.path.join(self.root_img_mask, img_mask_name))
            if self.transform_img:
                for tran in self.transform_img:
                    img, img_mask = tran(img, img_mask)

            img = img.resize((1024, 512), Image.BILINEAR)
            img_arr = np.asarray(img) / 255.0
            img_mask = img_mask.resize((1024, 512), Image.BILINEAR)

            img_mask_arr = np.asarray(img_mask)

            if self.transform:
                for tran in self.transform:
                    img_arr, img_mask_arr = tran(img_arr, img_mask_arr)
            img_mask_tensor = label_to_tensor(img_mask_arr)
            img_tensor = image_to_tensor(img_arr)
            return img_tensor, label, img_mask_tensor

        else:
            img = img.resize((1024, 512), Image.BILINEAR)
            img_arr = np.asarray(img) / 255.0
            if self.transform:
                for tran in self.transform:
                    img_arr = tran(img_arr)
            img_tensor = image_to_tensor(img_arr)
            return img_tensor,img_name
Example #2
0
def style_transfer_image(args):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    image = Image.open(args.content_target)
    size = image.size
    content_target = image_to_tensor(image, size).to(device)
    style_targets = [
        image_to_tensor(Image.open(image), size).to(device)
        for image in args.style_targets
    ]
    n = len(style_targets)
    style_weights = np.ones(
        n) / n if args.style_weights is None else args.style_weights
    input_image = content_target.clone().to(device)

    neural_style = NeuralStyle(content_layers=CONTENT_LAYERS,
                               style_layers=STYLE_LAYERS)
    neural_style.content_target = content_target
    neural_style.set_style_targets(style_targets, style_weights)

    output_image, _ = neural_style.transfer(
        input_image=input_image,
        epochs=args.epochs,
        style_weight=args.style_weight,
        content_weight=args.content_weight,
        verbose=args.verbose,
    )

    to_image(output_image, size=size).save(args.output)
 def detect(self,
            filename,
            answer_text,
            verbose=False,
            layer_index=-1,
            mode="local"):
     if mode == "local":
         input_tensor = image_to_tensor(filename, self.img_height,
                                        self.img_width)
     else:
         #             if True:
         try:
             input_tensor = image_to_tensor(filename, self.img_height,
                                            self.img_width)
         except:
             raise ValueError("This URL is not supported!! Use other one.")
     detection = self.model.predict(input_tensor)[0]
     a = np.array(detection)
     detect_label = self.labels[a.argmax(0)]
     if verbose is True:
         print("結果 .... " + str(answer_text[detect_label]))
         img1 = vis_utils.load_img(filename,
                                   target_size=(self.img_height,
                                                self.img_width))
         # Swap softmax with linear
         layer_idx = vis_utils.find_layer_idx(self.model, 'predictions')
         self.model.layers[layer_idx].activation = activations.linear
         vis_model = vis_utils.apply_modifications(self.model)
         filter_index = a.argmax(0)
         grads = visualize_cam(
             vis_model,
             layer_idx,
             filter_index,  #クラス番号
             img1[:, :, :],
             backprop_modifier='guided')
         a = np.array(detection)
         fig = plt.figure(figsize=(10, 5))
         ax1 = fig.add_subplot(1, 2, 1)
         ax1.tick_params(labelbottom="off", bottom="off")
         ax1.grid(False)
         ax1.tick_params(labelleft="off", left=False)
         plt.yticks(color="None")
         ax1.set_xticklabels([])
         ax1.imshow(img1)
         ax1.imshow(grads, cmap='jet', alpha=0.6)
         ax1.set_title("Heat Map")
         sns.set(style="white", context="talk")
         f, ax1 = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
         sns.barplot(self.labels, detection, palette="PiYG", ax=ax1)
         ax1.set_ylabel("Value")
         plt.tick_params(length=0)
         plt.grid(False)
         plt.show()
     else:
         print(detect_label)
         print(detection)
     print("detectメソッドが完了しました.")
 def on_epoch_end(self, epoch, logs={}):
     clear_output(wait = True)
     index = 1
     print('------------------------------------------------------------------------------------------------')
     print('------------------------------------------------------------------------------------------------')
     if self.verbose is True:
         for image in self.test_img_list:
             if self.color_mode == "rgb":
                 input_tensor = image_to_tensor(image, self.img_height, self.img_width)
             elif self.color_mode == "grayscale":
                 input_tensor = image_to_tensor(image, self.img_height, self.img_width, color_mode="grayscale")
             detection = self.model.predict(input_tensor)[0]
             layer_idx = utils.find_layer_idx(self.model, 'predictions')
             test_label = image.split("/")[-2]
             filter_index = self.labels.index(test_label)
             print(filter_index)
             img1 = utils.load_img(image, target_size=(self.img_height, self.img_width))
             grads = visualize_cam(
                 self.model,
                 layer_idx,
                 filter_indices=None, 
                 seed_input=img1, 
                 backprop_modifier='guided'
             )
             print('\nIndex' + str(index))
             print(detection)
             a = np.array(detection)
             print('Estimation:' +  self.labels[a.argmax(0)])
             fig = plt.figure(figsize=(10, 5))
             ax1 = fig.add_subplot(1, 2, 1)
             ax1.tick_params(labelbottom="off",bottom="off")
             ax1.tick_params(labelleft="off",left="off")
             ax1.set_xticklabels([]) 
             ax1.imshow(overlay(grads, img1))
             ax2 = fig.add_subplot(1, 2, 2)
             ax2.tick_params(labelbottom="off",bottom="off")
             ax2.tick_params(labelleft="off",left="off")
             ax2.set_xticklabels([])
             ax2.imshow(Image.open(image))
             plt.show()
             sns.set(style="white", context="talk")
             f, ax1 = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
             sns.barplot(self.labels, detection, palette="PiYG", ax=ax1)
             ax1.set_ylabel("Value")
             plt.show()
             index = index + 1
     if self.now_epoch % 5 == 0 or self.now_epoch == 1:
         _index = str(self.now_epoch)
         if self.now_epoch < 10:
             _index = "0" + _index
         self.model.save("./models/"+ self.task_name+"/"+"epoch_"+ _index +".hdf5")
     self.now_epoch = self.now_epoch + 1
Example #5
0
 def on_epoch_end(self, epoch, logs={}):
     if self.color_mode == "rgb":
         input_tensor = image_to_tensor(self.test_img_path, self.img_height,
                                        self.img_width)
     elif self.color_mode == "grayscale":
         input_tensor = image_to_tensor(self.test_img_path,
                                        self.img_height,
                                        self.img_width,
                                        color_mode="grayscale")
     detection = self.model.predict(input_tensor)[0]
     a = np.array(detection)
     sns.set(style="white", context="talk")
     f, ax1 = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
     gif_img = sns.barplot(self.labels, detection, palette="PiYG", ax=ax1)
     self.gif_images.append([gif_img])
async def face(ctx: discord.ext.commands.Context) -> None:
    """Create the reaction panel for attribute selection."""
    try:
        # Fetch the sent image
        url = ctx.message.attachments[0].url
        image = Image.open(requests.get(url, stream=True).raw)
    except:
        # Image could not be found
        await ctx.send(error_message)
        return

    # If the image is a PNG file, it will have 4 channels
    # The model cannot handle 4 channels, so convert it to 3
    image = image.convert('RGB')

    bot.image_data = image_to_tensor(image, bot.config)

    # Guess labels with discriminator
    _, labels = bot.discriminator(bot.image_data)

    # Round labels to 0 and 1
    bot.labels = (labels > 0.5).float()

    bot.sent_panel = await ctx.send(embed=get_info_panel())

    # Add reactions
    for emoji in bot.label_emojis:
        await bot.sent_panel.add_reaction(emoji)

    await bot.sent_panel.add_reaction(bot.go_emoji)
Example #7
0
def disrupt_stimuli(save_folder, inputs_and_targets, encoder, roi_mask,
                    target_roi, loss_method):
    roi_mask = torch.from_numpy(roi_mask.astype(np.uint8))

    towards_target = True if loss_method == 'towards' else False
    if not target_roi:
        loss_func = roi_loss_func(None, towards_target)
    else:
        loss_func = roi_loss_func(roi_mask, towards_target)

    for input_and_target in tqdm(inputs_and_targets):
        orig_image = input_and_target['stimulus_path']
        orig_image = utils.image_to_tensor(orig_image)

        target = input_and_target['target_voxels']
        target = torch.from_numpy(target)
        if not target_roi:
            target = target[roi_mask]
        else:
            with torch.no_grad():
                orig_voxels = encoder(orig_image.unsqueeze(0)).squeeze(0)
            target[1 - roi_mask] = orig_voxels[1 - roi_mask]

        disrupted_image = deepdream(orig_image, target, encoder, loss_func)
        metrics = loss_metrics(orig_image, disrupted_image, target, encoder,
                               roi_mask if target_roi else None)

        disrupted_image = utils.tensor_to_image(disrupted_image)
        save_disrupted_image(save_folder, input_and_target, disrupted_image,
                             metrics)
Example #8
0
 def detect(self, filename):
     input_tensor = image_to_tensor(filename, self.img_height,
                                    self.img_width)
     detection = self.model.predict(input_tensor)[0]
     a = np.array(detection)
     detect_label = self.labels[a.argmax(0)]
     print(detect_label)
     print(detection)
Example #9
0
def style_transfer_video(args):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    loader = transforms.ToPILImage()
    reader = imageio.get_reader(args.content_target)

    frames = [
        image_to_tensor(loader(reader.get_data(i)), (512, 512))
        for i in range(reader.count_frames())
    ]
    style_targets = [
        image_to_tensor(Image.open(image), (512, 512))
        for image in args.style_targets
    ]
    style_weights = np.linspace(0, 1, num=len(frames))

    neural_style = NeuralStyle(content_layers=CONTENT_LAYERS,
                               style_layers=STYLE_LAYERS).to(device)
    input_image = frames[0].to(device)
    outputs = []
    for i in trange(len(frames)):
        neural_style.content_target = frames[i].to(device)
        neural_style.set_style_targets(
            style_targets, [1 - style_weights[i], style_weights[i]])
        output_image = neural_style.transfer(
            input_image=input_image,
            epochs=args.epochs,
            style_weight=args.style_weight,
            content_weight=args.content_weight,
            verbose=args.verbose,
        )
        # del frames[i]
        input_image = output_image.clone().to(device)
        outputs.append(output_image.to("cpu"))
        del output_image

    writer = imageio.get_writer("output.mp4",
                                fps=reader.get_meta_data()["fps"])
    shape = reader.get_data(0).shape[:2]
    outputs = [to_image(output, (shape[1], shape[0])) for output in outputs]

    for output in outputs:
        writer.append_data(np.asarray(output))
    writer.close()
Example #10
0
    def on_epoch_end(self, epoch, logs={}):
        index = 1
        for image in self.test_img_list:
            if self.color_mode == "rgb":
                input_tensor = image_to_tensor(image, self.img_height, self.img_width)
            elif self.color_mode == "grayscale":
                input_tensor = image_to_tensor(image, self.img_height, self.img_width, color_mode="grayscale")
            detection = self.model.predict(input_tensor)[0]
            print '\nIndex %d' % index
            print detection
            a = np.array(detection)
            print '予測:  %s' % self.labels[a.argmax(0)]
            display(Image(filename=image, width=300, height=300))
            sns.set(style="white", context="talk")
            f, ax1 = plt.subplots(1, 1, figsize=(8, 6), sharex=True)
            sns.barplot(self.labels, detection, palette="PiYG", ax=ax1)
            ax1.set_ylabel("Value")
            plt.show()

            index = index + 1
        self.now_epoch = self.now_epoch + 1
Example #11
0
def condition_features(stimuli, model):
    print('Extracting features')
    condition_features = []
    batch_size = 32
    for i in tqdm(range(0, len(stimuli), batch_size)):
        batch_names = stimuli[i:i + batch_size]
        batch = [utils.image_to_tensor(s, resolution=256) for s in batch_names]
        batch = torch.stack(batch)
        if torch.cuda.is_available():
            batch = batch.cuda()
        with torch.no_grad():
            batch_feats = model(batch).cpu().numpy()
        condition_features.append(batch_feats)
    condition_features = np.concatenate(condition_features)
    return condition_features
Example #12
0
def stimulus_predictions(stimuli_folder, model):
    print('Extracting features')
    stimuli = os.listdir(stimuli_folder)
    condition_features = {}
    batch_size = 32
    for i in tqdm(range(0, len(stimuli), batch_size)):
        batch_names = stimuli[i:i + batch_size]
        batch = [utils.image_to_tensor(os.path.join(stimuli_folder, n), resolution=resolution)
                 for n in batch_names]
        batch = torch.stack(batch)
        if torch.cuda.is_available():
            batch = batch.cuda()
        with torch.no_grad():
            batch_feats = model(batch).cpu().numpy()
        for name, feats in zip(batch_names, batch_feats):
            condition_features[name] = feats
    return condition_features
Example #13
0
def main():
    parser = ArgumentParser(description='Validate model on .png files')
    parser.add_argument('-m',
                        type=str,
                        required=False,
                        default=None,
                        help='Model to validate')
    parser.add_argument('input_dir', type=str, help='Input directory')
    parser.add_argument('target_dir', type=str, help='Output directory')
    args = parser.parse_args()

    device = get_device()
    model, _, _ = load_model(load_config().model if args.m is None else args.m,
                             device=device)
    loss_f = BCELoss()
    count, loss_sum, acc_sum = 0, 0, 0
    with torch.no_grad():
        for fn in listdir(args.input_dir):
            input_path = join(args.input_dir, fn)
            target_path = join(args.target_dir, fn)
            if fn.endswith('.png') and isfile(input_path) and isfile(
                    target_path):
                print('Validating %s with target %s' %
                      (input_path, target_path))
                data = image_to_tensor(imread(input_path), device=device)
                target = image_to_probs(imread(target_path), device=device)
                data = model(data).squeeze(0)
                loss = loss_f(data, target).item()
                acc = check_accuracy(data, target)
                print('Loss %f, acc %s' % (loss, acc_to_str(acc)))
                count += 1
                acc_sum += acc
                loss_sum += loss
        print('\nSUMMARY\nLoss %f, acc %s' %
              (loss_sum / count, acc_to_str(acc_sum)))
        print(acc_to_details(acc_sum))
Example #14
0
dir = '/home/eelmozn1/datasets/adversarial_tms'
datasets = ['scenecats']
feature_names = ['conv_3']
rois = ['LOC', 'PPA', 'RANDOM']
resolution = 256

for feat_name in feature_names:
    for roi in rois:
        encoder = torch.load(
            'saved_models/study=bold5000_featextractor=alexnet_featname={}_rois={}.pth'
            .format(feat_name, roi),
            map_location=lambda storage, loc: storage)
        for dataset in datasets:
            data_dir = os.path.join(dir, dataset)
            target_dir = os.path.join(
                dir, 'targets_bold5000',
                '{}_roi={}_feat={}'.format(dataset, roi, feat_name))
            os.mkdir(target_dir)
            folder_names = os.listdir(data_dir)
            folder_names = [f for f in folder_names if f != '.DS_Store']
            for name in folder_names:
                folder = os.path.join(data_dir, name)
                image_paths = utils.listdir(folder)
                images = torch.stack([
                    image_to_tensor(path, resolution=resolution)
                    for path in image_paths
                ])
                target = encoder(images).mean(dim=0)
                torch.save(target, os.path.join(target_dir, name + '.pth'))
Example #15
0
 def _resized_S(self, size):
     return utils.image_to_tensor(self.source_image,
                                  [transforms.Resize(size)])
Example #16
0
 def run(self):
     S = utils.image_to_tensor(self.source_image)
     R = utils.image_to_tensor(self.style_image)
     print(S)
     self._color_transfer(S, R)
Example #17
0
image_path = '/home/eelmozn1/datasets/adversarial_tms/scenecats/3-3-japaneseroom/3-3-japaneseroom_9.jpg'
save_folder = '/home/eelmozn1/Desktop/parameter_sweep'
encoder_file = 'study=bold5000_featextractor=alexnet_featname=conv_3_rois=PPA.pth'
generator = DeePSiM()
encoder = torch.load(os.path.join('saved_models', encoder_file),
                     map_location=lambda storage, loc: storage)
if torch.cuda.is_available():
    encoder.cuda()
    generator.cuda()

shutil.rmtree(save_folder, ignore_errors=True)
os.mkdir(save_folder)

shutil.copyfile(image_path, os.path.join(save_folder, 'original.jpg'))

image = image_to_tensor(image_path, resolution=256)
with torch.no_grad():
    if torch.cuda.is_available():
        target = encoder(image.unsqueeze(0).cuda()).squeeze(0).cpu()
    else:
        target = encoder(image.unsqueeze(0)).squeeze(0)

loss_func = roi_loss_func(roi_mask=None, towards_target=True)

gen_images = []
fig, axs = plt.subplots(len(alphas),
                        len(decays),
                        squeeze=False,
                        figsize=(len(decays) * 10, len(alphas) * 5))
for i, alpha in tqdm(enumerate(alphas)):
    for j, decay in enumerate(decays):
Example #18
0
    disrupted_stimuli = os.listdir(args.disrupted_folder)
    disrupted_stimuli = [d for d in disrupted_stimuli if '_disrupted.' in d]
    original_stimuli = [
        d.replace('_disrupted.', '_original.') for d in disrupted_stimuli
    ]

    roi_mask = torch.from_numpy(
        utils.get_roi_mask(args.roi, args.encoder_file).astype(np.uint8))

    encoder = torch.load(os.path.join('saved_models', args.encoder_file))

    on_roi_distances, off_roi_distances = [], []
    for original, disrupted in tqdm(
            list(zip(original_stimuli, disrupted_stimuli))):
        original = utils.image_to_tensor(os.path.join(args.disrupted_folder,
                                                      original),
                                         resolution=resolution)
        disrupted = utils.image_to_tensor(os.path.join(args.disrupted_folder,
                                                       disrupted),
                                          resolution=resolution)

        with torch.no_grad():
            orig_voxels = encoder(original.unsqueeze(0)).squeeze(0)

        metrics = loss_metrics(original, disrupted, orig_voxels, encoder,
                               roi_mask)
        on_roi_distances.append(metrics['Disrupted to Target (ON ROI)'])
        off_roi_distances.append(metrics['Disrupted to Target (OFF ROI)'])

    plot_compare_dists(
        on_roi_distances, off_roi_distances, args.roi + ' disruption',
Example #19
0
    parser.add_argument('--save_folder', required=True, type=str, help='folder to save generated images')
    parser.add_argument('--image_folder', required=True, type=str, help='images to generate for')
    parser.add_argument('--model', default='deepsim', type=str, choices=['deepsim', 'biggan'],
                        help='which generator model to use for optimizing images')
    args = parser.parse_args()

    shutil.rmtree(args.save_folder, ignore_errors=True)
    os.mkdir(args.save_folder)

    encoder = alexnet(pretrained=True)
    encoder.classifier = encoder.classifier[:-1]
    encoder.eval()
    if args.model == 'deepsim':
        generator = DeePSiM()
    elif args.model == 'biggan':
        generator = BigGAN.from_pretrained('biggan-deep-256')
    if torch.cuda.is_available():
        encoder.cuda()
        generator.cuda()

    for image_file in tqdm(os.listdir(args.image_folder)):
        image = image_to_tensor(os.path.join(args.image_folder, image_file), resolution=256)
        with torch.no_grad():
            target = encoder(image.unsqueeze(0)).squeeze(0)
            target_mean_square = (target ** 2).mean().item()
        generated_image, _, lowest_loss, _ = optimize(generator, encoder, target, F.mse_loss)
        generated_image = to_pil_image(generated_image)
        generated_image.save(os.path.join(args.save_folder, image_file))
        print('Lowest loss for {}:\t{}\nMean square of target for {}:\t{}\n'
              .format(image_file, lowest_loss, image_file, target_mean_square))
Example #20
0
    parser.add_argument('--input',
                        default='images/LowResolution.png',
                        type=str,
                        help='path to input')
    parser.add_argument('--input-gt',
                        default='images/SR_GT.png',
                        type=str,
                        help='path to input ground truth')
    parser.add_argument('--no-cuda', action='store_true', help='disable cuda')
    parser.add_argument('--output', type=str, help='directory to output')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    truth = np.array(Image.open(args.input_gt))
    target = image_to_tensor(args.input)
    input_depth = 32
    input_size = torch.Size([1, input_depth] +
                            (np.asarray(target.shape[2:]) * 4).tolist())
    noise = torch.FloatTensor(input_size).uniform_(0, 0.1)  # U(0, 0.1)
    sigma = 1. / 30
    if use_cuda:
        target = target.cuda()
        noise = noise.cuda()

    net = SkipHourglass(
        input_depth,
        3,
        #down_channels=[8, 16, 32, 64, 128],
        #up_channels=[8, 16, 32, 64, 128],
        #skip_channels=[0, 0, 0, 4, 4],
Example #21
0
from models import SkipHourglass

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Image Denoising')
    parser.add_argument('--step', default=2400, type=int, help='number of steps')
    parser.add_argument('--lr', default=1e-2, type=float, help='learning rate')
    parser.add_argument('--input', default='images/bonus/2.png', type=str, help='path to input')
    parser.add_argument('--mask', default='images/bonus/2_mask.png', type=str, help='path to mask')
    parser.add_argument('--no-cuda', action='store_true', help='disable cuda')
    parser.add_argument('--output', type=str, help='directory to output')
    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    #mask = np.array(Image.open(args.mask))
    mask = image_to_tensor(args.mask)
    target_np = np.array(Image.open(args.input))
    target = image_to_tensor(args.input)
    input_depth = 32
    input_size = torch.Size([1, input_depth] + list(target.shape[2:]))
    noise = torch.FloatTensor(input_size).uniform_(0, 0.1) # U(0, 0.1)
    sigma = 1./30
    if use_cuda:
        mask = mask.cuda()
        target = target.cuda()
        noise = noise.cuda()

    net = SkipHourglass(input_depth, 3,
                        down_channels=[128, 128, 128, 128, 128],
                        up_channels=[128, 128, 128, 128, 128],
                        skip_channels=[4, 4, 4, 4, 4],
Example #22
0
def map_to_grid(images):
    images = make_grid(images, int(math.sqrt(len(images))), sample_pad)
    return images


files = os.listdir(dir)
files = [f for f in files if '.jpg' in f]

model = AlexNet(feature_name='pool')

images = [
    Image.open(os.path.join(dir, f)).convert('L').convert('RGB') for f in files
]
image_tensors = torch.stack(
    [image_to_tensor(img, resolution=224) for img in images])
with torch.no_grad():
    features = model(image_tensors).numpy()

pcs = PCA(n_components=n_pcs).fit_transform(features)
sorted_indices = np.argsort(pcs, axis=0)

low_pcs = [[images[j] for j in sorted_indices[:n_exemplars, i]]
           for i in range(n_pcs)]
high_pcs = [[images[j] for j in sorted_indices[-n_exemplars:, i]]
            for i in range(n_pcs)]
for pc in range(n_pcs):
    os.mkdir(os.path.join(save_dir, 'pc_{}'.format(pc)))
    for i, img in enumerate(low_pcs[pc]):
        img.save(
            os.path.join(save_dir, 'pc_{}'.format(pc), 'neg_{}.jpg'.format(i)))
Example #23
0
        feature_extractor = VGG16()
    else:
        raise ValueError('Unimplemented feature extractor: {}'.format(
            args.model))

    feature_extractor.conv1.register_forward_hook(get_activation('conv1'))
    feature_extractor.conv2.register_forward_hook(get_activation('conv2'))
    feature_extractor.conv3.register_forward_hook(get_activation('conv3'))
    feature_extractor.conv4.register_forward_hook(get_activation('conv4'))
    feature_extractor.conv5.register_forward_hook(get_activation('conv5'))
    feature_extractor.fc6.register_forward_hook(get_activation('fc6'))
    feature_extractor.fc7.register_forward_hook(get_activation('fc7'))

    conditions = listdir('data/image/' + args.dataset)
    for c in tqdm(conditions):
        stimuli = listdir(c)
        c_name = c.split('/')[-1]
        os.mkdir('processed/feature/' + args.dataset + '/' + args.model + '/' +
                 c_name)
        stimuli_tensor = [
            image_to_tensor(s, resolution=args.resolution) for s in stimuli
        ]
        for name, tensor in zip(stimuli, stimuli_tensor):
            activation = {}
            output = feature_extractor(tensor.unsqueeze(0))
            file = name.split('/')[-1] + '.pth'
            torch.save(
                activation,
                os.path.join('processed/feature', args.dataset, args.model,
                             c_name, file))
Example #24
0
def main(show=True, images_dir='images', image_fmt='%.3d.png'):
    with open('config.json', 'r') as f:
        config = json.load(f)
    enc_cfg = config['video_enc']
    parser = ArgumentParser(description='Process .svo files')
    parser.add_argument('input', type=str, help='Input directory or file')
    parser.add_argument('output',
                        type=str,
                        nargs='?',
                        default=None,
                        help='Output directory or file')
    parser.add_argument('-m',
                        type=str,
                        required=False,
                        help='Model to process with')
    parser.add_argument('-f',
                        type=int,
                        required=False,
                        default=1,
                        help='Reduce factor')
    parser.add_argument('-v',
                        action='store_true',
                        required=False,
                        default=False,
                        help='Mix processed with input')
    parser.add_argument('-e',
                        action='store_true',
                        required=False,
                        default=False,
                        help='Estimate motion')
    parser.add_argument('-r',
                        action='store_true',
                        required=False,
                        default=False,
                        help='Read right image')
    parser.add_argument('-p',
                        type=str,
                        required=False,
                        default=enc_cfg.get('default'),
                        help='Select parameters preset for FFMPEG')
    args = parser.parse_args()
    enc_cfg = enc_cfg[args.p]

    model = None if args.m is None else load_model(args.m, device=device)[0]
    view = sl.VIEW.VIEW_RIGHT if args.r else sl.VIEW.VIEW_LEFT
    svo_path = config['svo_path']

    if isdir(args.input):
        files = sorted([
            join(args.input, fn) for fn in listdir(args.input)
            if isfile(join(args.input, fn)) and fn.endswith('.svo')
        ],
                       key=file_idx)
    elif isfile(args.input):
        files = [args.input]
    else:
        try:
            files = decode_name(args.input)
        except ValueError:
            print('No such file or directory: %s' % args.input)
            return

    pause = False
    save_idx = get_save_idx(images_dir, image_fmt)

    writer = None
    out_path = args.output
    if out_path == 'auto':
        model_name = args.m
        if model_name is not None and args.v:
            model_name += '-v'
        out_path = config['mp4_path'].replace('{model}', model_name or '')
        if not isdir(out_path):
            mkdir(out_path)

    to_dir = False if out_path is None else isdir(out_path)
    out_height, out_width = None, None
    estimator = MotionEstimator('fly1', with_gui=out_path is None)
    with torch.no_grad():
        for dn in files:
            if type(dn) == str:
                fn = dn
                out_name = basename(fn)[:-4]
            else:
                fn, out_name, view = dn
                fn = join(svo_path, fn)
            out_name = None if out_path is None else join(
                out_path, out_name + '.mp4')

            if not isfile(fn):
                print('File %s not found!' % fn)
                continue
            print('Processing %s' % fn)
            for frame_idx, source in enumerate(read_svo(fn, view)):

                # Processing:
                if model is not None:
                    data = image_to_tensor(source, device=device)
                    data = model(data).squeeze(0)  # [:, :2]
                    if args.e:
                        mask = (data[4:6] < 0.5).all(0)
                        estimator.add(source,
                                      mask.cpu().numpy(), out_name, frame_idx)

                    result = probs_to_image(data, mask=True)
                    output = visualize(source, result) if args.v else result
                else:
                    result = None
                    output = source

                if out_height is None:
                    out_height, out_width = map(lambda s: s // args.f,
                                                output.shape[:2])
                if args.f != 1:
                    output = cv2.resize(output, (out_width, out_height))

                # Open writer:
                if writer is None and out_path is not None:
                    dst = out_name if to_dir else out_path
                    print('Write to %s' % dst)
                    writer = open_ffmpeg(dst, (out_width, out_height),
                                         params=enc_cfg)

                # Return output:
                if writer is not None:
                    write_ffmpeg(writer, output)
                else:
                    cv2.imshow('output', output)
                    while True:
                        key = cv2.waitKey(1)
                        if key == ord('p'):
                            pause = not pause
                        elif key == ord('s'):
                            cv2.imwrite(
                                join('images', 'saved-in',
                                     image_fmt % save_idx), source)
                            if result is not None:
                                cv2.imwrite(
                                    join('images', 'saved-out',
                                         image_fmt % save_idx), result)
                            save_idx += 1
                        elif key == ord('q'):
                            return
                        elif key == ord('m'):
                            cv2.imshow('output', result)
                        elif key == ord('i'):
                            cv2.imshow('output', source)
                        if not pause:
                            break
            if to_dir and writer is not None:
                close_ffmpeg(writer)
                writer = None
        if writer is not None:
            close_ffmpeg(writer)
Example #25
0
                content_mask_tensor[i, :, :]
            ],
                        dim=0), 'content_mask_' + str(i))

    # Using GPU or CPU
    device = torch.device(config.device0)

    style_img = utils.load_image(style_image_path, None)
    content_img = utils.load_image(content_image_path, None)
    width_s, height_s = style_img.size
    width_c, height_c = content_img.size

    # print(height_s, width_s)
    # print(height_c, width_c)

    style_img = utils.image_to_tensor(style_img).unsqueeze(0)
    content_img = utils.image_to_tensor(content_img).unsqueeze(0)

    style_img = style_img.to(device, torch.float)
    content_img = content_img.to(device, torch.float)

    # print('content_img size: ', content_img.size())
    # utils.show_pic(style_img, 'style image')
    # utils.show_pic(content_img, 'content image')

    # -------------------------
    # Eval() means the parameters of cnn are frozen.
    cnn = models.vgg19(pretrained=True).features.to(config.device0).eval()

    cnn_normalization_mean = torch.tensor([0.485, 0.456,
                                           0.406]).to(config.device0)
Example #26
0
                        help='whether to use an encoder with random weights')
    args = parser.parse_args()

    shutil.rmtree(args.save_folder, ignore_errors=True)
    os.mkdir(args.save_folder)

    stimuli = os.listdir(args.stimuli_folder)
    stimuli = [s for s in stimuli if s != '.DS_Store']

    roi_mask = utils.get_roi_mask(args.roi, args.encoder_file)

    encoder = torch.load(os.path.join('saved_models', args.encoder_file))

    print('Generating disruption examples')
    for stimulus_name in tqdm(stimuli):
        stimulus = utils.image_to_tensor(
            os.path.join(args.stimuli_folder, stimulus_name), resolution)
        target = torch.load(
            os.path.join(args.targets_folder, stimulus_name + '.target.pth'))
        disrupted, metrics = disrupt_stimulus(stimulus, target, encoder,
                                              roi_mask, args.towards_target,
                                              args.random)

        shutil.copyfile(
            os.path.join(args.stimuli_folder, stimulus_name),
            os.path.join(args.save_folder,
                         stimulus_name).replace('.', '_original.'))
        utils.tensor_to_image(disrupted).save(
            os.path.join(args.save_folder,
                         stimulus_name.replace('.', '_disrupted.')))
        with open(
                os.path.join(args.save_folder,