Beispiel #1
0
 def spectrogramMake(self,
                     EEGseries=None,
                     t0=0,
                     tWindow=120,
                     resized=False):
     #Not debygged
     edfFs = EEGseries.info["sfreq"]
     chWindows = EEGseries.get_data(start=int(t0), stop=int(t0 + tWindow))
     ch_dict = defaultdict()
     for i, ch in enumerate(EEGseries.ch_names):
         if resized:
             fTemp, tTemp, Sxx = signal.spectrogram(chWindows[i], fs=edfFs)
             #ch_dict[ch]=torch.tensor(image_resized)
             buf = io.BytesIO()
             plt.imsave(buf,
                        np.log(Sxx + np.finfo(float).eps)[0:90],
                        format='png')
             buf.seek(0)
             image = load_image(buf)
             img = apply_transforms_new(image)
             buf.close()
             ch_dict[ch] = img
         else:
             fTemp, tTemp, Sxx = signal.spectrogram(chWindows[i], fs=edfFs)
             ch_dict[ch] = torch.tensor(
                 np.log(Sxx +
                        np.finfo(float).eps))  # for np del torch.tensor
     return ch_dict
Beispiel #2
0
    def deepdream(self,
                  img_path,
                  layer,
                  filter_idx,
                  lr=.1,
                  num_iter=20,
                  figsize=(4, 4),
                  title='DeepDream',
                  return_output=False):
        """Creates DeepDream.

        It applies the optimization on the image provided. The image is loaded
        and made into a torch.Tensor that is compatible as the input to the
        network.

        Read the original blog post by Google for more information on
        `DeepDream <https://ai.googleblog.com/2015/06/inceptionism-going-deeper-into-neural.html>`_.

        Args:
            img_path (str): A path to the image you want to apply DeepDream on
            layer (torch.nn.modules.conv.Conv2d): The target Conv2d layer from
                which the filter to be chosen, based on `filter_idx`.
            filter_idx (int): The index of the target filter.
            lr (float, optional, default=.1): The step size of optimization.
            num_iter (int, optional, default=30): The number of iteration for
                the gradient ascent operation.
            figsize (tuple, optional, default=(4, 4)): The size of the plot.
                Relevant in case 1 above.
            title (str, optional default='Conv2d'): The title of the plot.
            return_output (bool, optional, default=False): Returns the
                output(s) of optimization if set to True.

        Returns:
            output (list of torch.Tensor): With dimentions
                :math:`(num_iter, C, H, W)`. The size of the image is
                determined by `img_size` attribute which defaults to 224.

        """ # noqa

        input_ = apply_transforms(load_image(img_path), self.img_size)

        self._lr = lr
        output = self.optimize(layer, filter_idx, input_, num_iter=num_iter)

        plt.figure(figsize=figsize)
        plt.axis('off')
        plt.title(title)

        plt.imshow(
            format_for_plotting(
                standardize_and_clip(output[-1],
                                     saturation=0.15,
                                     brightness=0.7)))
        # noqa

        if return_output:
            return output
Beispiel #3
0
def get_type(path='./owl.jpg'):
    init_model()
    global error
    if error:
        return 'unknown'

    try:
        image = load_image(path)

        o = apply_transforms(image)
        outputs = model(o)
        result = int(torch.max(outputs.data, 1)[1])
        sftm = torch.nn.functional.softmax(outputs[0], dim=0)
        return tag[result]
    except:
        return 'unknown'
Beispiel #4
0
def saliency():

    """### 1. Load an image"""

    buho = 'images/great_grey_owl.jpg'
    di1 = 'images/1.png'
    image = load_image(buho)
    # image = load_image(buho)

    # plt.imshow(image)
    # plt.title('Original image'+str(type(image)))
    # plt.axis('off');
    # plt.show()
    """### 2. Load a pre-trained Model"""

    model = models.alexnet(pretrained=True)
    # model = torch.load('/content/alexnet-frames-Finetuned:False-1di-tempMaxPool-OnPlateau.tar')

    """### 3. Create an instance of Backprop with the model"""

    backprop = Backprop(model)

    """### 4. Visualize saliency maps"""

    # Transform the input image to a tensor

    owl = apply_transforms(image)
    # print(owl.size()) #torch.Size([/1, 3, 224, 224])
    # input_size = 224
    # data_transforms = createTransforms(input_size)
    # owl = data_transforms['test'](image)
    # owl = owl.unsqueeze(dim=0)
    # owl = owl.unsqueeze(dim=0)
    # owl = owl.permute(1, 0, 2, 3, 4)
    # print(owl.size())

    # Set a target class from ImageNet task: 24 in case of great gray owl

    target_class = 24

    # Ready to roll!

    backprop.visualize(owl, target_class=target_class, guided=True, use_gpu=True)
Beispiel #5
0
def image():
    image_path = path.join(path.dirname(__file__),
                           'resources',
                           'test_image.jpg')

    return load_image(image_path)
Beispiel #6
0
h = HeatmapGenerator(pathModel, transCrop)
h.generate(pathInputImage, pathOutputImage, transCrop)

CXR_BASE = Path("/home/jingyi/cxr-jingyi/data").resolve()
NIH_CXR_BASE = CXR_BASE.joinpath("nih/v1").resolve()
test_df = pd.read_csv("~/cxr-jingyi/Age/NIH_test_2500.csv")

path1 = test_df.iloc[0]['path']
path1 = NIH_CXR_BASE.joinpath(path1).resolve()

import matplotlib.pyplot as plt
from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop

image = load_image(str(path1))
plt.imshow(image)

model = MobileNet(16)
checkpoint = torch.load(
    '/home/jingyi/cxr-jingyi/Age/result/supervised/model_best.pth.tar')
model.load_state_dict(checkpoint['state_dict'])

backprop = Backprop(model)
# Transform the input image to a tensor

owl = apply_transforms(image)

# Set a target class from ImageNet task: 24 in case of great gray owl

target_class = 16
Beispiel #7
0
from flashtorch.utils import apply_transforms, load_image, format_for_plotting, denormalize
import torchvision.models as models
from flashtorch.saliency import Backprop
from CNN.loadPretrainedCNN2 import VGG16_NoSoftmax_OneChannel
import matplotlib.pyplot as plt
import io
from PIL import Image


C=preprossingPipeline(BC_datapath=r"/Users/villadsstokbro/Dokumenter/DTU/KID/3. semester/Fagprojekt/BrainCapture/dataEEG",mac=True)
path1=r'/Volumes/B/spectograms_rgb'
N=2
windows, labels, filenames, window_idx_full = C.make_label_cnn(make_from_filenames=None, quality=None, is_usable=None, max_files=N, max_windows = 10,
                   path=path1, seed=0, ch_to_include=range(1))
img=windows[0].unsqueeze(0)
img=load_image('Ricardo_rip.jpg')
img= apply_transforms(img,size=224)
img.detach().requires_grad_(requires_grad=True)

#window = torch.load('/Volumes/B/spectograms_rgb/sbs2data_2018_08_30_19_39_35_288 part 2.edf.pt')
#img=window.detach().requires_grad_(requires_grad=True)[0,0,:,:].unsqueeze(0)

def visualize_helper(model_module, tensor=img, k=854):
    model = model_module(pretrained=True).float()
    backprop = Backprop(model)
    backprop.visualize(tensor, k, guided=True)

model=models.vgg16(pretrained=True)
model.eval()
torch.argmax(model(img))
visualize_helper(models.vgg16,tensor=img)
Beispiel #8
0
import matplotlib.pyplot as plt
import torchvision.models as models
from flashtorch.utils import load_image
from flashtorch.saliency import Backprop
from flashtorch.utils import apply_transforms
image = load_image("G:\EEGNet/test/test.jpg")

plt.imshow(image)

net = models.vgg16(pretrained=1)

backprop = Backprop(net)

input_ = apply_transforms(image)

target_class = 24

backprop.visualize(input_, target_class, guided=True)
 if os.path.exists(wdir + r'/spectograms_rgb/' + file + ".pt") == True:
     pass
 else:
     spec = C.get_spectrogram(file)
     j = 0
     for window_value in spec.keys():
         if window_value == 'annotations':
             break
         i = 0
         for channel in spec[window_value].keys():
             if i == 0:
                 a = np.array(spec[window_value][channel])
                 buf = io.BytesIO()
                 plt.imsave(buf, a, format='jpg')
                 buf.seek(0)
                 image = load_image(buf)
                 img = apply_transforms(image)
                 imgs = img
                 buf.close()
             else:
                 a = np.array(spec[window_value][channel])
                 buf = io.BytesIO()
                 plt.imsave(buf, a, format='jpg')
                 buf.seek(0)
                 image = load_image(buf)
                 img = apply_transforms(image)
                 imgs = torch.cat((imgs, img), axis=0)
                 buf.close()
             i += 1
         if j == 0:
             window_values = imgs.resize(1, 14, 3, 224, 224)
Beispiel #10
0
def show_image(image_address):
    image = load_image(image_address)
    plt.imshow(image)
    plt.title('Original image')
    plt.axis('off')
    return image
Beispiel #11
0
# pip install flashtorch

import matplotlib.pyplot as plt

import torch
import torchvision.models as models

from flashtorch.utils import (apply_transforms, denormalize,
                              format_for_plotting, load_image, visualize)

from flashtorch.utils import ImageNetIndex

from flashtorch.saliency import Backprop

image = load_image(
    "D:/Projects Machine Learning/Data/Some from ImageNet/tabby cat.jpg")

plt.imshow(image)
plt.title('Original image')
plt.axis('off')

# next is needed, otherwise no image pops up
plt.waitforbuttonpress()

model = models.alexnet(pretrained=True)
backprop = Backprop(model)

imagenet = ImageNetIndex()
target_class = imagenet['tabby cat']

input_ = apply_transforms(image)
Beispiel #12
0
import os  # NOQA: E402
os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # NOQA: E402
import matplotlib.pyplot as plt
import torchvision.models as models

from flashtorch.utils import apply_transforms, load_image
from flashtorch.saliency import Backprop

### 1. Load a pre-trained Model
# model = models.alexnet(pretrained=True)
model = models.vgg16(pretrained=True)
### 2. Create an instance of Backprop with the model
backprop = Backprop(model)
path = './figure'
path_names = os.listdir(path)
for i in range(len(path_names)):
    peacock = apply_transforms(load_image(os.path.join(path, path_names[i])))
    backprop.visualize(peacock, None, guided=True, use_gpu=True)
    plt.show()
Beispiel #13
0
import torch
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.config import cfg
import os
from tqdm import tqdm

cfg.merge_from_file('./maskrcnn-benchmark/configs/e2e_faster_rcnn_R_50_FPN_1x.yaml')
model=build_detection_model(cfg)    # loaded from the checkpoint model
model=model.backbone

path='/home/wangfa/Workspace/jupiter/maskrcnn-benchmark/datasets/coco/train2014/'

for root,dirs,files in os.walk(path):
    for file in tqdm(files):
        backprop = Backprop(model)
        image = load_image(os.path.join(root,file))
        input_ = apply_transforms(image)

        target_class = 0
        backprop.visualize(input_,target_class,guided=True)


        # model = models.vgg16(pretrained=True)
        # g_ascent= GradientAscent(model.features)
        # conv5_1=model.features[24]
        # conv5_1_filters= [45,271,363,409]
        #
        # g_ascent.visualize(conv5_1,conv5_1_filters,title='vgg16: conv5_1')
        # plt.savefig('/home/wangfa/Desktop/output/'+file)
        plt.show()