Beispiel #1
0
    def deepdream(self, img_path, layer, filter_idx, lr=.1, num_iter=20,
                  figsize=(4, 4), title='DeepDream', return_output=False):
        """Creates DeepDream.

        It applies the optimization on the image provided. The image is loaded
        and made into a torch.Tensor that is compatible as the input to the
        network.

        Read the original blog post by Google for more information on
        `DeepDream <https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html>`_.

        Args:
            img_path (str): A path to the image you want to apply DeepDream on
            layer (torch.nn.modules.conv.Conv2d): The target Conv2d layer from
                which the filter to be chosen, based on `filter_idx`.
            filter_idx (int): The index of the target filter.
            lr (float, optional, default=.1): The step size of optimization.
            num_iter (int, optional, default=30): The number of iteration for
                the gradient ascent operation.
            figsize (tuple, optional, default=(4, 4)): The size of the plot.
                Relevant in case 1 above.
            title (str, optional default='Conv2d'): The title of the plot.
            return_output (bool, optional, default=False): Returns the
                output(s) of optimization if set to True.

        Returns:
            output (list of torch.Tensor): With dimentions
                :math:`(num_iter, C, H, W)`. The size of the image is
                determined by `img_size` attribute which defaults to 224.

        """ # noqa

        input_ = apply_transforms(load_image(img_path), self.img_size)

        self._lr = lr
        output = self.optimize(layer, filter_idx, input_, num_iter=num_iter)

        plt.figure(figsize=figsize)
        plt.axis('off')
        plt.title(title)

        plt.imshow(format_for_plotting(
            standardize_and_clip(output[-1],
                                 saturation=0.15,
                                 brightness=0.7))); # noqa

        if return_output:
            return output
Beispiel #2
0
def visualise_cnn(model_name, image, class_label, model_path=None, title=None):
    if model_path is None:
        model = get_model(model_name)
    else:
        model = get_model(model_name)
        model = load_model(model, model_path)
    model.eval()
    backprop = Backprop(model)
    # Transform the input image to a tensor
    img = apply_transforms(image)
    # Set a target class from ImageNet task: 24 in case of great gray owl

    imagenet = ImageNetIndex()
    target_class = imagenet[class_label]
    # Ready to roll!
    backprop.visualize(img, target_class, guided=True, title=title)
Beispiel #3
0
def get_type(path='./owl.jpg'):
    init_model()
    global error
    if error:
        return 'unknown'

    try:
        image = load_image(path)

        o = apply_transforms(image)
        outputs = model(o)
        result = int(torch.max(outputs.data, 1)[1])
        sftm = torch.nn.functional.softmax(outputs[0], dim=0)
        return tag[result]
    except:
        return 'unknown'
Beispiel #4
0
def saliency():

    """### 1. Load an image"""

    buho = 'images/great_grey_owl.jpg'
    di1 = 'images/1.png'
    image = load_image(buho)
    # image = load_image(buho)

    # plt.imshow(image)
    # plt.title('Original image'+str(type(image)))
    # plt.axis('off');
    # plt.show()
    """### 2. Load a pre-trained Model"""

    model = models.alexnet(pretrained=True)
    # model = torch.load('/content/alexnet-frames-Finetuned:False-1di-tempMaxPool-OnPlateau.tar')

    """### 3. Create an instance of Backprop with the model"""

    backprop = Backprop(model)

    """### 4. Visualize saliency maps"""

    # Transform the input image to a tensor

    owl = apply_transforms(image)
    # print(owl.size()) #torch.Size([/1, 3, 224, 224])
    # input_size = 224
    # data_transforms = createTransforms(input_size)
    # owl = data_transforms['test'](image)
    # owl = owl.unsqueeze(dim=0)
    # owl = owl.unsqueeze(dim=0)
    # owl = owl.permute(1, 0, 2, 3, 4)
    # print(owl.size())

    # Set a target class from ImageNet task: 24 in case of great gray owl

    target_class = 24

    # Ready to roll!

    backprop.visualize(owl, target_class=target_class, guided=True, use_gpu=True)
Beispiel #5
0
def test_denormalize_tensor(image):
    transformed = apply_transforms(image)
    denormalized = denormalize(transformed)

    assert denormalized.shape == transformed.shape
    assert denormalized.min() >= 0.0 and denormalized.max() <= 1.0
Beispiel #6
0
def test_crop_to_custom_size(image):
    transformed = apply_transforms(image, 299)

    assert transformed.shape == (1, 3, 299, 299)
Beispiel #7
0
def test_crop_to_224_by_default(image):
    transformed = apply_transforms(image)

    assert transformed.shape == (1, 3, 224, 224)
Beispiel #8
0
def test_transform_image_to_tensor(image):
    transformed = apply_transforms(image)

    assert isinstance(transformed, torch.Tensor)
Beispiel #9
0
NIH_CXR_BASE = CXR_BASE.joinpath("nih/v1").resolve()
test_df = pd.read_csv("~/cxr-jingyi/Age/NIH_test_2500.csv")

path1 = test_df.iloc[0]['path']
path1 = NIH_CXR_BASE.joinpath(path1).resolve()

import matplotlib.pyplot as plt
from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop

image = load_image(str(path1))
plt.imshow(image)

model = MobileNet(16)
checkpoint = torch.load(
    '/home/jingyi/cxr-jingyi/Age/result/supervised/model_best.pth.tar')
model.load_state_dict(checkpoint['state_dict'])

backprop = Backprop(model)
# Transform the input image to a tensor

owl = apply_transforms(image)

# Set a target class from ImageNet task: 24 in case of great gray owl

target_class = 16

# Ready to roll!

backprop.visualize(owl, target_class, guided=True)
Beispiel #10
0
import torchvision.models as models
from flashtorch.saliency import Backprop
from CNN.loadPretrainedCNN2 import VGG16_NoSoftmax_OneChannel
import matplotlib.pyplot as plt
import io
from PIL import Image


C=preprossingPipeline(BC_datapath=r"/Users/villadsstokbro/Dokumenter/DTU/KID/3. semester/Fagprojekt/BrainCapture/dataEEG",mac=True)
path1=r'/Volumes/B/spectograms_rgb'
N=2
windows, labels, filenames, window_idx_full = C.make_label_cnn(make_from_filenames=None, quality=None, is_usable=None, max_files=N, max_windows = 10,
                   path=path1, seed=0, ch_to_include=range(1))
img=windows[0].unsqueeze(0)
img=load_image('Ricardo_rip.jpg')
img= apply_transforms(img,size=224)
img.detach().requires_grad_(requires_grad=True)

#window = torch.load('/Volumes/B/spectograms_rgb/sbs2data_2018_08_30_19_39_35_288 part 2.edf.pt')
#img=window.detach().requires_grad_(requires_grad=True)[0,0,:,:].unsqueeze(0)

def visualize_helper(model_module, tensor=img, k=854):
    model = model_module(pretrained=True).float()
    backprop = Backprop(model)
    backprop.visualize(tensor, k, guided=True)

model=models.vgg16(pretrained=True)
model.eval()
torch.argmax(model(img))
visualize_helper(models.vgg16,tensor=img)
plt.show()
Beispiel #11
0
import matplotlib.pyplot as plt
import torchvision.models as models
from flashtorch.utils import load_image
from flashtorch.saliency import Backprop
from flashtorch.utils import apply_transforms
image = load_image("G:\EEGNet/test/test.jpg")

plt.imshow(image)

net = models.vgg16(pretrained=1)

backprop = Backprop(net)

input_ = apply_transforms(image)

target_class = 24

backprop.visualize(input_, target_class, guided=True)
     pass
 else:
     spec = C.get_spectrogram(file)
     j = 0
     for window_value in spec.keys():
         if window_value == 'annotations':
             break
         i = 0
         for channel in spec[window_value].keys():
             if i == 0:
                 a = np.array(spec[window_value][channel])
                 buf = io.BytesIO()
                 plt.imsave(buf, a, format='jpg')
                 buf.seek(0)
                 image = load_image(buf)
                 img = apply_transforms(image)
                 imgs = img
                 buf.close()
             else:
                 a = np.array(spec[window_value][channel])
                 buf = io.BytesIO()
                 plt.imsave(buf, a, format='jpg')
                 buf.seek(0)
                 image = load_image(buf)
                 img = apply_transforms(image)
                 imgs = torch.cat((imgs, img), axis=0)
                 buf.close()
             i += 1
         if j == 0:
             window_values = imgs.resize(1, 14, 3, 224, 224)
         else:
Beispiel #13
0
import os  # NOQA: E402
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # NOQA: E402
import matplotlib.pyplot as plt
import torchvision.models as models

from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop

### 1. Load a pre-trained Model
# model = models.alexnet(pretrained=True)
model = models.vgg16(pretrained=True)
### 2. Create an instance of Backprop with the model
backprop = Backprop(model)
path = './figure'
path_names = os.listdir(path)
for i in range(len(path_names)):
    peacock = apply_transforms(load_image(os.path.join(path, path_names[i])))
    backprop.visualize(peacock, None, guided=True, use_gpu=True)
    plt.show()