def disrupt_stimuli(save_folder, inputs_and_targets, encoder, roi_mask, target_roi, loss_method): roi_mask = torch.from_numpy(roi_mask.astype(np.uint8)) towards_target = True if loss_method == 'towards' else False if not target_roi: loss_func = roi_loss_func(None, towards_target) else: loss_func = roi_loss_func(roi_mask, towards_target) for input_and_target in tqdm(inputs_and_targets): orig_image = input_and_target['stimulus_path'] orig_image = utils.image_to_tensor(orig_image) target = input_and_target['target_voxels'] target = torch.from_numpy(target) if not target_roi: target = target[roi_mask] else: with torch.no_grad(): orig_voxels = encoder(orig_image.unsqueeze(0)).squeeze(0) target[1 - roi_mask] = orig_voxels[1 - roi_mask] disrupted_image = deepdream(orig_image, target, encoder, loss_func) metrics = loss_metrics(orig_image, disrupted_image, target, encoder, roi_mask if target_roi else None) disrupted_image = utils.tensor_to_image(disrupted_image) save_disrupted_image(save_folder, input_and_target, disrupted_image, metrics)
def disrupt_stimulus(stimulus, target, encoder, roi_mask, towards_target, random): roi_mask = torch.from_numpy(roi_mask.astype(np.uint8)) loss_func = roi_loss_func(roi_mask, towards_target) with torch.no_grad(): if random: encoder = encoder.random_weights() orig_voxels = encoder(stimulus.unsqueeze(0)).squeeze(0) if random and not towards_target: target = orig_voxels else: target[1 - roi_mask] = orig_voxels[1 - roi_mask] noise = (torch.rand_like(target) - 0.5) * 1e-5 disrupted = deepdream(stimulus, target + noise, encoder, loss_func) metrics = loss_metrics(stimulus, disrupted, target, encoder, roi_mask) return disrupted, metrics
def generate_stimulus(target, encoder, towards_target): loss_func = roi_loss_func(roi_mask=None, towards_target=towards_target) noise = utils.sample_imagenet_noise(resolution=256) generated = deepvisualize(noise, target, encoder, loss_func) metrics = loss_metrics(noise, generated, target, encoder, roi_mask=None) return generated, metrics
encoder.cuda() generator.cuda() shutil.rmtree(save_folder, ignore_errors=True) os.mkdir(save_folder) shutil.copyfile(image_path, os.path.join(save_folder, 'original.jpg')) image = image_to_tensor(image_path, resolution=256) with torch.no_grad(): if torch.cuda.is_available(): target = encoder(image.unsqueeze(0).cuda()).squeeze(0).cpu() else: target = encoder(image.unsqueeze(0)).squeeze(0) loss_func = roi_loss_func(roi_mask=None, towards_target=True) gen_images = [] fig, axs = plt.subplots(len(alphas), len(decays), squeeze=False, figsize=(len(decays) * 10, len(alphas) * 5)) for i, alpha in tqdm(enumerate(alphas)): for j, decay in enumerate(decays): gen_image, _, loss, losses = optimize(generator, encoder, target, loss_func, alpha=alpha, decay=decay) gen_images.append(to_pil_image(gen_image))