Exemplo n.º 1
0
def main():
    criterion = nn.CrossEntropyLoss()
    criterion = criterion.cuda()
    model = Network(args.init_channels, 10, args.layers, criterion, spaces_dict[args.search_space]).cuda()
    model.load_state_dict(torch.load("weights-3.pt"))
    model.eval()
    
    #for module in model.named_modules():
    #    print (module[0])

    # Open image
    raw_image = cv2.imread(args.img_path)

    tens = np.load(args.tens_path, allow_pickle=True)
    image = torch.from_numpy(tens).unsqueeze(0)#.unsqueeze(0)
    image = image.cuda()
    print (image.size())
    pred = model(image)
    print (pred)

    # GCAM
    gcam = GradCAM(model=model)
    predictions = gcam.forward(image)
    top_idx = predictions[0][1]
    print(predictions, len(predictions), top_idx)
    target_layer = "cells.19"
    gcam.backward(idx=top_idx)
    region = gcam.generate(target_layer=target_layer)
    cmap = cm.jet_r(region)[..., :3] * 255.0
    cmap = cv2.resize(cmap, (32, 32))
    blend = (cmap+raw_image)/2
    cv2.imwrite("blend_4.png", blend)
    print (region.shape, cmap.shape, raw_image.shape)
Exemplo n.º 2
0
def get_data_of_gradcam(gcam: Tensor,
                        raw_image: Tensor,
                        paper_cmap: bool = False) -> Tensor:
    r"""Returns Grad-CAM data.

    Args:
        gcam (Tensor): Grad-CAM data.
        raw_image (Tensor): raw image data.
        paper_cmap (bool, optional): cmap. Defaults to False.

    Returns:
        Tensor: [description]
    """
    np_gcam = gcam.cpu().numpy()
    del gcam
    cmap = cm.jet_r(np_gcam)[..., :3] * 255.0  # type: ignore
    if paper_cmap:
        alpha = np_gcam[..., None]
        np_gcam = alpha * cmap + ([1] - alpha) * raw_image
        # np_gcam = alpha * cmap + ([torch.tensor(1)] - alpha) * raw_image
    else:
        np_gcam = (cmap.astype(np.float) +
                   raw_image.clone().cpu().numpy().astype(np.float)) / 2

    np_gcam = np.uint8(np_gcam)  # type: ignore
    return np_gcam
Exemplo n.º 3
0
def save_gradcam(gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    #print("raw_image shape: ", raw_image.shape)
    # extract dog from colormap
    temp = cmap.astype(np.float)
    """TODO: tune the below parameters"""
    rgb_lower1 = np.array([0, 0, 35], dtype='uint8')
    rgb_upper1 = np.array([255, 255, 255], dtype='uint8')
    mask1 = cv2.inRange(temp, rgb_lower1, rgb_upper1)
    contours1, _ = cv2.findContours(mask1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    contour1 = sorted(contours1, key=cv2.contourArea, reverse=True)[0]
    x, y, w, h = cv2.boundingRect(contour1)
    dog = [x,y,w,h]
    # dog = raw_image[:, y:y+h, x:x+w]
    # dog = torch.unsqueeze(dog, 0)
    # dog = F.upsample(dog, (224, 224), mode="bilinear", align_corners=False)

    rgb_lower2 = np.array([120, 240, 100], dtype='uint8')
    rgb_upper2 = np.array([140, 255, 130], dtype='uint8')
    mask2 = cv2.inRange(temp, rgb_lower2, rgb_upper2)
    if (mask2 is not None):
        contours2, _ = cv2.findContours(mask2.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        contour2 = sorted(contours2, key=cv2.contourArea, reverse=True)[0]
    x, y, w, h = cv2.boundingRect(contour2)
    dog_face = [x, y, w, h]
    #dog_face = torch.tensor(dog_face)
    ####
    return dog, dog_face
Exemplo n.º 4
0
def main(config, model_path, cuda, crf, camera_id):
    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    cuda = cuda and torch.cuda.is_available()
    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on', torch.cuda.get_device_name(current_device))

    # Label list
    with open(CONFIG.LABELS) as f:
        classes = {}
        for label in f:
            label = label.rstrip().split('\t')
            classes[int(label[0])] = label[1].split(',')[0]

    # Load a model
    state_dict = torch.load(model_path)

    # Model
    model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.N_CLASSES)
    model.load_state_dict(state_dict)
    model.eval()
    if cuda:
        model.cuda()

    image_size = (CONFIG.IMAGE.SIZE.TEST, ) * 2

    cap = cv2.VideoCapture(camera_id)
    cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUYV'))

    while True:
        # Image preprocessing
        ret, frame = cap.read()
        image = cv2.resize(frame.astype(float), image_size)
        raw_image = image.astype(np.uint8)
        image -= np.array([
            float(CONFIG.IMAGE.MEAN.B),
            float(CONFIG.IMAGE.MEAN.G),
            float(CONFIG.IMAGE.MEAN.R),
        ])
        image = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
        image = image.cuda() if cuda else image

        # Inference
        output = model(Variable(image, volatile=True))
        output = F.upsample(output, size=image_size, mode='bilinear')
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()[0]

        if crf:
            output = dense_crf(raw_image, output)
        labelmap = np.argmax(output.transpose(1, 2, 0), axis=2)

        labelmap = labelmap.astype(float) / CONFIG.N_CLASSES
        labelmap = cm.jet_r(labelmap)[..., :-1] * 255.0
        cv2.addWeighted(np.uint8(labelmap), 0.5, raw_image, 0.5, 0.0,
                        raw_image)
        cv2.imshow('DeepLabV2', raw_image)
        cv2.waitKey(50)
Exemplo n.º 5
0
def massflux_ss(init, end):
    """draw th in average"""
    ##### retrieve variables #####
    xx, zz = np.meshgrid(x, z)
    dt = netCDF4.Dataset(dy_files[0])
    t = int(np.array(dt['Time']))
    density = np.loadtxt('../density.txt')[:len(z)]
    dwdzss = np.zeros((end - init, len(z)))
    for tidx in range(init, end):
        td = netCDF4.Dataset(td_files[tidx])
        progressbar(now=tidx - init,
                    length=end - init,
                    text='theta in snap shot')

        dy = netCDF4.Dataset(dy_files[tidx])
        w = np.mean(dy['w'][0, :len(z)] * area, axis=(1, 2))
        dwdzss[tidx - init, :] = w * density

    figure()
    for tidx in range(init, end):
        plot(dwdzss[tidx - init, :],
             z,
             c=cm.jet_r((tidx - init) / (end - init)),
             label='%s' % tidx,
             alpha=0.5)
    plot(np.mean(dwdzss, axis=0), z, color='black', label='average')
    legend(fontsize=5)
    title(r'Convective Mass Flux $\rho w$')
    savefig('dwdzss_ct.jpg', dpi=300)
    clf()
Exemplo n.º 6
0
def save_gradcam(filename, gcam, paper_cmap=False):
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap
    else:
        gcam = cmap.astype(np.float)
    cv2.imwrite(filename, np.uint8(gcam))
Exemplo n.º 7
0
def save_gradcam_over_image(filename, gcam, raw_image, paper_cmap=False):
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    cv2.imwrite(filename, np.uint8(gcam))
Exemplo n.º 8
0
def getCAM(feature_conv, weight_fc, discard):
    _, nc, h, w = feature_conv.shape
    cam = weight_fc[discard].dot(feature_conv.reshape((nc, h*w)))
    cam = cam.reshape(h, w)
    cam = cam - np.min(cam)
    cam_img = cam / np.max(cam)
    cam_img = cm.jet_r(cam_img)[..., :3] * 255.0
    cam_img *= pixel_intensity
    return cam_img
Exemplo n.º 9
0
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    cv2.imwrite(filename, np.uint8(gcam))
def get_gradcam_image(gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    return np.uint8(gcam)
Exemplo n.º 11
0
    def colorize(self, labelmap):
        #print(labelmap.shape)
        # Assign a unique color to each label
        labelmap = labelmap.astype(np.float32) / self.CONFIG.DATASET.N_CLASSES
        if self.autumn:
            colormap = cm.autumn(labelmap)[..., :-1] * 255.0
        else:
            colormap = cm.jet_r(labelmap)[..., :-1] * 255.0

        return np.uint8(colormap)
Exemplo n.º 12
0
def generate_gcam2d(attention_map, raw_input):
    assert (len(attention_map.shape) == 2)  # No batch dim
    assert (isinstance(attention_map, np.ndarray))  # Not a tensor

    if raw_input is not None:
        attention_map = overlay(raw_input, attention_map)
    else:
        attention_map = _resize_attention_map(attention_map, MIN_SHAPE)
        attention_map = cm.jet_r(attention_map)[..., :3] * 255.0
    return np.uint8(attention_map)
Exemplo n.º 13
0
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        gcam = (cmap.astype(np.float) +
                255 * raw_image[..., ::-1].astype(np.float)) / 2
    cv2.imwrite(filename, np.uint8(np.clip(gcam, 0, 255)))
Exemplo n.º 14
0
def save_gradcam(filename, gcam, raw_image, save_as_file=False):
    print(f"\t Generating Image : {filename}")
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2

    if save_as_file:
        cv2.imwrite(filename, np.uint8(gcam))
        return None
    return np.uint8(gcam)
Exemplo n.º 15
0
def overlay(raw_input, attention_map):
    if np.max(raw_input) > 1:
        raw_input = raw_input.astype(np.float)
        raw_input /= 255
    attention_map = cv2.resize(attention_map,
                               tuple(np.flip(raw_input.shape[:2])))
    attention_map = cm.jet_r(attention_map)[..., :3]
    attention_map = (attention_map.astype(np.float) +
                     raw_input.astype(np.float)) / 2
    attention_map *= 255
    return attention_map
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        #gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
        gcam = (cmap.astype(np.float) + raw_image.transpose(0, 1).transpose(
            1, 2).cpu().detach().numpy()) / 2

    cv2.imwrite(filename, np.uint8(gcam))
Exemplo n.º 17
0
    def _visualize(self, img, res):
        for obj in res.objects:
            tl_x = obj.region.x_offset
            tl_y = obj.region.y_offset
            br_x = tl_x + obj.region.width
            br_y = tl_y + obj.region.height
            color = np.array(cm.jet_r(obj.score)[0:3]) * 255
            cv2.rectangle(img, (tl_x, tl_y), (br_x, br_y), color, 3)
            cv2.putText(img, obj.class_name, (tl_x, tl_y - 2), cv2.FONT_HERSHEY_COMPLEX, 1.0, color, 2)

        cv2.imshow("color", img)
        cv2.waitKey(10)
Exemplo n.º 18
0
def makePreview(array, filename):
	import numpy as np
	import matplotlib
	import matplotlib.mlab as mlab
	import matplotlib.pyplot as plt
	import matplotlib.cm as cm
	from PIL import Image
	import cv2
	import scipy.ndimage
	x=array.shape[0]
	y=array.shape[1]
	min= 99999
	max=0

	for i in range (0,x):
		for ii in range (0,y):
			if (array[i][ii]!=0 and array[i][ii]<min):
				min=array[i][ii]
			if (array[i][ii]!=0 and array[i][ii]>max):
				max=array[i][ii]
	print('%s %s %s %s' %(min,max, x, y))

	

	mask=np.empty([x, y], dtype='float64')
	for i in range (0,x):
		for ii in range (0,y):
			if (array[i][ii]!=0):
				mask[i][ii]=255
			if (array[i][ii]==0):
				mask[i][ii]=0
	
	
	if (min!=max and max!=0):
		array=(1-(array-min)/(max-min))
		array = cv2.resize(array, (0,0), fx=5, fy=5) 
		mask = cv2.resize(mask, (0,0), fx=5, fy=5) 
		array= scipy.ndimage.median_filter(array, 4)	
		values = Image.fromarray(np.uint8(cm.jet_r(array)*255)).convert('RGB')
		
		
		
			
		hs_array = Image.fromarray(np.uint8(hillshade(array*255,45, 315, 0.5))).convert('RGB')
		new_img = Image.blend(values, hs_array, 0.5).convert('RGBA')
		mask = Image.fromarray(np.uint8(mask)).convert('L')
		new_img.putalpha(mask)
		new_img.save(pathTif+filename+'prev.png')
		img = Image.open(pathTif+filename+'prev.png')
		img.show() 
	else:
		print('error in reading image')
def colorize_depth(depth_map):
    # scale everything to [0, 255]
    sorted_depth = np.unique(np.sort(depth_map.flatten()))
    min_depth = sorted_depth[0]
    max_depth = sorted_depth[len(sorted_depth) - 1]

    depth_map = np.asarray(
        map(lambda pixel: (pixel - min_depth) * 1.0 / (max_depth - min_depth),
            depth_map))

    # Apply jet colormap to it
    depth_map = np.uint8(cm.jet_r(depth_map) * 255)
    return depth_map[:, :, 0:3]
Exemplo n.º 20
0
def overlay(raw_input, attention_map):
    if isinstance(raw_input, torch.Tensor):
        raw_input = raw_input.detach().cpu().numpy()
        if raw_input.shape[0] == 1 or raw_input.shape[0] == 3:
            raw_input = raw_input.transpose(1, 2, 0)
    if np.max(raw_input) > 1:
        raw_input = raw_input.astype(np.float)
        raw_input /= 255
    attention_map = cv2.resize(attention_map, tuple(np.flip(raw_input.shape[:2])))
    attention_map = cm.jet_r(attention_map)[..., :3]
    attention_map = (attention_map.astype(np.float) + raw_input.astype(np.float)) / 2
    attention_map *= 255
    return attention_map
Exemplo n.º 21
0
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
    gcam = gcam.cpu().numpy()
    cmap = cm.jet_r(gcam)[..., :3] * 255.0
    if paper_cmap:
        alpha = gcam[..., None]
        gcam = alpha * cmap + (1 - alpha) * raw_image
    else:
        #gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
        gcam = cmap.astype(np.float)
    gcam = gcam.astype(np.uint8)
    cv2.imwrite(filename, gcam)
    #cv2.imwrite(filename, np.uint8(gcam))
    return gcam
Exemplo n.º 22
0
	def makePreview(self,array, hillshade_mat, filename):
		import matplotlib
		import matplotlib.mlab as mlab
		import matplotlib.pyplot as plt
		import matplotlib.cm as cm
		from PIL import Image
		import cv2
		#import scipy.ndimage
		x=array.shape[0]
		y=array.shape[1]
		min= 99999
		max=0
		if self.stdClip_check.isChecked():
				array=self.stdClip(array, self.stdClip_slider.value()/float(100))
		for i in range (0,x):
			for ii in range (0,y):
				if (array[i][ii]!=0 and array[i][ii]<min):
					min=array[i][ii]
				if (array[i][ii]!=0 and array[i][ii]>max):
					max=array[i][ii]
		print('min: %.2f max: %.2f \nsize: %s x %s' %(min,max, x, y))
	

		mask=np.empty([x, y], dtype='float64')
		for i in range (0,x):
			for ii in range (0,y):
				if (array[i][ii]!=0):
					mask[i][ii]=255
				if (array[i][ii]==0):
					mask[i][ii]=0

		if (min!=max and max!=0):
			
			array=(1-(array-min)/(max-min))
			array = cv2.resize(array, (0,0), fx=5, fy=5) 
			hillshade_mat = cv2.resize(hillshade_mat, (0,0), fx=5, fy=5) 
			mask = cv2.resize(mask, (0,0), fx=5, fy=5,  interpolation=cv2.INTER_NEAREST) 
			#array= scipy.ndimage.median_filter(array, 4)	
			values = Image.fromarray(np.uint8(cm.jet_r(array)*255)).convert('RGB')
			 
			
			
				
			hs_array = Image.fromarray(np.uint8(hillshade_mat)).convert('RGB')
			new_img = Image.blend(values, hs_array, 0.3).convert('RGBA')
			mask = Image.fromarray(np.uint8(mask)).convert('L')
			new_img.putalpha(mask)
			new_img.save(str(self.pathOutput+filename)+'preview.png')
			# self.displayPreview()
		else:
			print('error in reading image')
Exemplo n.º 23
0
def saveGCam(gc, raw_image, opath, paper_cmap=False):
    """
    Save the grad-cam overlaid on the image 
    """
    gc = gc.cpu().numpy()
    cmap = cm.jet_r(gc)[..., :3] * 255.0
    if paper_cmap:
        alpha = gc[..., None]
        gc = alpha * cmap + (1 - alpha) * raw_image
    else:
        gc = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
    cv2.imwrite(opath, np.uint8(gc))

    return
Exemplo n.º 24
0
def save_gradcam_overlay(filename, gcam_img, input_img, gcam_alpha=0.2):
    ''' gcam_img and input_img should two tensors with the same shape
    '''
    gcam_img = gcam_img.numpy()
    input_img = input_img.numpy()

    assert np.shape(gcam_img) == np.shape(input_img),\
        f"gcam_img shape: {np.shape(gcam_img)}; input_img shape: {np.shape(input_img)}. "\
        "gcam_img and input_img should have the same shape!"

    gcam_img = cm.jet_r(gcam_img)[..., :3] * 255.0
    input_img = cm.gray(input_img)[..., :3] * 255.0

    gcam_overlay = gcam_alpha*gcam_img.astype(np.float) + \
            (1-gcam_alpha)*input_img.astype(np.float)

    cv2.imwrite(filename, gcam_overlay.astype(np.uint8))
Exemplo n.º 25
0
    def refresh(self):
        if not self.IsShown():
            return

        self.colPlotPan.draw()

        self.lFluorSpecies.DeleteAllItems()

        for key in self.pipeline.colour_mapper.species_ratios.keys():
            ind = self.lFluorSpecies.InsertStringItem(UI_MAXSIZE, key)

            ratio = self.pipeline.colour_mapper.species_ratios[key]
            self.lFluorSpecies.SetStringItem(ind, 1, '%3.3f' % ratio)
            self.lFluorSpecies.SetItemTextColour(
                ind, wx.Colour(*((128 * numpy.array(cm.jet_r(ratio)))[:3])))

            num_dyes = sum(self.pipeline.colourFilter._index(key))

            self.lFluorSpecies.SetStringItem(ind, 2, '%d' % num_dyes)
Exemplo n.º 26
0
    def __init__(self, res, vc, f, ap):
        mi = 'minimum'
        ma = 'maksimum'
        sr = 'srednje'
        arr = 'array'

        nplt = 500

        vplt = {mi: vc.min(), ma: vc.max(), sr: vc.mean()}
        vplt[arr] = np.linspace(vplt[mi], vplt[ma], nplt)

        fplt = {mi: f.min(), ma: f.max(), sr: f.mean()}
        fplt[arr] = np.linspace(fplt[mi], fplt[ma], nplt)

        applt = {mi: ap.min(), ma: ap.max(), sr: ap.mean()}
        applt[arr] = np.linspace(applt[mi], applt[ma], nplt)

        t = PostojanostAlata()

        self.d3 = plt.figure(figsize=(22, 11), dpi=300)
        ax = self.d3.add_subplot(111, projection='3d')
        ax.set_title('Postojanost alata pri razlicitim dubinama rezanja',
                     fntdict)
        ax.set_xlabel(r'Brzina rezanja $[\frac{m}{min}]$', fntdict)
        ax.set_ylabel(r'Posmak $[\frac{mm}{okr}]$', fntdict)
        ax.set_zlabel(r'Postojanost alata $[min]$', fntdict)
        V, F = np.meshgrid(vplt[arr], fplt[arr])
        Ap = np.empty(V.shape)
        Postmax = np.array([])
        for i in applt[mi], applt[sr], applt[ma]:
            Ap[:] = i
            Post = t(res.x, V, F, Ap)
            Postmax = np.append(Postmax, Post.max())
            N = Post / Postmax.max()
            ax.plot_surface(V,
                            F,
                            Post,
                            linewidth=0,
                            facecolors=cm.jet_r(N),
                            antialiased=False,
                            shade=False)
Exemplo n.º 27
0
    def preview(self, dataset):
        kwargs = {"nrow": 4, "padding": 40}
        for i, (_, images, labels) in enumerate(dataset):
            if i == 0:
                image = make_grid(images, pad_value=-1, **kwargs).numpy()
                image = np.transpose(image, (1, 2, 0))
                mask = np.zeros(image.shape[:2])
                mask[(image != -1)[..., 0]] = 255
                image = np.dstack((image, mask)).astype(np.uint8)

                labels = labels[:, np.newaxis, ...]
                label = make_grid(labels, pad_value=255, **kwargs).numpy()
                label_ = np.transpose(label, (1, 2, 0))[..., 0].astype(np.float32)
                label = cm.jet_r(label_ / 3.0) * 255
                label[..., 3][(label_ == 255)] = 0
                label = label.astype(np.uint8)

                tiled_images = np.hstack((image, label))
                # cv2.imwrite("./docs/datasets/voc12.png", tiled_images)
                plt.figure(figsize=(40, 20))
                plt.imshow(np.dstack((tiled_images[..., 2::-1], tiled_images[..., 3])), aspect='auto')
                plt.show()
                return
Exemplo n.º 28
0
    loader = data.DataLoader(dataset, batch_size=batch_size)

    for i, (image_ids, images,
            labels) in tqdm(enumerate(loader),
                            total=np.ceil(len(dataset) / batch_size),
                            leave=False):
        if i == 0:
            mean = torch.tensor((104.008, 116.669, 122.675))[None, :, None,
                                                             None]
            images += mean.expand_as(images)
            image = make_grid(images, pad_value=-1, **kwargs).numpy()
            image = np.transpose(image, (1, 2, 0))
            mask = np.zeros(image.shape[:2])
            mask[(image != -1)[..., 0]] = 255
            image = np.dstack((image, mask)).astype(np.uint8)

            labels = labels[:, np.newaxis, ...]
            label = make_grid(labels, pad_value=255, **kwargs).numpy()
            label_ = np.transpose(label, (1, 2, 0))[..., 0].astype(np.float32)
            label = cm.jet_r(label_ / 21.0) * 255
            mask = np.zeros(label.shape[:2])
            label[..., 3][(label_ == 255)] = 0
            label = label.astype(np.uint8)

            tiled_images = np.hstack((image, label))
            # cv2.imwrite("./docs/datasets/voc12.png", tiled_images)
            plt.imshow(
                np.dstack((tiled_images[..., 2::-1], tiled_images[..., 3])))
            plt.show()
            break
Exemplo n.º 29
0
cb.set_label('Battery Percentage')
cb.ax.tick_params(labelsize=8)

plt.savefig(os.path.join(dbdir, 'batyear.png'), dpi=300)

# yearview
#################################################
from matplotlib.colors import ListedColormap as LC

fig = plt.figure(figsize=(10,5), dpi=300)
ax = fig.add_subplot(111)

#c=ax.scatter(DOY, TOD, c=numpy.array(ssids), edgecolor='none', marker='s', 
#             s=1, cmap=cm.rainbow)

mp = cm.jet_r(numpy.linspace(0, 1, nSSID))
ssidcols = LC(mp, 'ssidcols')
cm.register_cmap(cmap = ssidcols)

ssidtick = AWAKE.ssids.unique()
ssidtick.sort()
ssidtick = ssidtick * (ssidtick.max() / float(nSSID))
ssidtick = ssidtick + ssidtick[1]/2.

c = ax.imshow(ssidgrid, interpolation='none', aspect='auto', cmap=ssidcols)

ax.set_xlabel('Day of Year')
ax.set_ylabel('Time of Day')

#ax.set_xlim((0, 366))
#ax.set_ylim((-288, 0))
Exemplo n.º 30
0
def get_normed_colormap(inarray):
    norm = colors.Normalize(inarray.min(), inarray.max())
    jj = jet_r(norm(inarray))

    cl = [colors.to_hex(c) for c in jj]
    return cl
import matplotlib as mpl

if __name__ == '__main__':
    files = list(glob.glob('evaluate-depths/predicted-*.png'))
    files.extend(list(glob.glob('evaluate-depths/orig-depth-*.png')))
    files.extend(list(glob.glob('evaluate-depths/gt-depth-*.png')))
    for file in files:
        im = np.array(Image.open(file))
        colored_im = np.copy(im)
        # if 'orig-' in file:
        #     im[im == 0] = 255
        if 'gt-' in file:
            colored_im[colored_im == 5] = 255
        elif 'predicted-' in file:
            colored_im[colored_im < 4] = 255
        colored_im = Image.fromarray(np.uint8(cm.jet_r(colored_im) * 255.0))
        colored_im.save('evaluate-depths/colored-' + os.path.basename(file) +
                        '.png')

    fig = plt.figure(figsize=(1, 3))
    ax1 = fig.add_axes([0, 0.05, 0.2, 0.9])
    cmap = mpl.cm.jet_r
    norm = mpl.colors.Normalize(vmin=50, vmax=0)
    cb1 = mpl.colorbar.ColorbarBase(ax1,
                                    cmap=cmap,
                                    norm=norm,
                                    orientation='vertical')
    cb1.set_label('distance [m]')
    plt.savefig('evaluate-depths/colorbar-cropped.png')

    fig = plt.figure(figsize=(1, 3))
Exemplo n.º 32
0
 def colorize(labelmap):
     # Assign a unique color to each label
     labelmap = labelmap.astype(np.float32) / CONFIG.DATASET.N_CLASSES
     colormap = cm.jet_r(labelmap)[..., :-1] * 255.0
     return np.uint8(colormap)
Exemplo n.º 33
0
def colors(number=None):
    if (number == None):
        return ["r", "coral", "gold", "limegreen", "seagreen", "aqua", \
               "royalblue", "b", "navy"]
    if (number != None):
        return [cm.jet_r((1.0 * i) / (1.0 * number)) for i in range(number)]
Exemplo n.º 34
0
        fig, ax = plt.subplots()

        # Plot HIV
        for ih in xrange(len(S_bins) - 1):
            ax.plot(binsc, hists[ih] / hists[ih, 0] * hists[-1, 0], lw=2, c='k', marker='o',
                    label='HIV, $S_{type M} \in ['+str(S_bins[ih])+', '+str(S_bins[ih + 1])+']$',
                    color=cm.jet(1.0 * ih / hists.shape[0]))

        # Plot theory
        al = hists[1, 0]
        if add_bsc:
            for (N, alpha) in sfs_bc:
                ax.plot(sfs_bc[(N, alpha)], sfs_bsc[(N, alpha)],
                        lw=2, ls = '-',
                        color=cm.jet_r(1.0 * (alpha - 1)),
                        label = 'Beta coalescent, $\\alpha = '+str(alpha)+'$')
        else:
            ax.plot(binsc[:8], al*binsc[0]**2/binsc[:8]**2, lw=2, c='r')
        ax.plot(binsc, al*binsc[0]/binsc, lw=2, c='b', label = 'Neutral, $\\alpha = 2$')

        ax.set_xlabel('derived allele frequency')
        ax.set_ylabel('SFS [density = counts / sum / binsize]')
        ax.set_xlim(10**(-3.1), 1.0 - 10**(-3.1))
        ax.set_ylim([3e1,3e7])
        ax.set_xscale('logit')
        ax.set_yscale('log')
        if len(fragments) == 6:
            ax.set_title('All patients, all fragments')
        else:
            ax.set_title('All patients, fragments '+str(fragments))
Exemplo n.º 35
0
def test(target_layer_name):
    model = Res101().to(device)
    model.eval()
    model.load_state_dict(
        torch.load('res101.pt', map_location=torch.device(device)))

    xs, ts, paths = data_load('../Dataset/test/images/')

    target_layer = None

    for name, module in model.named_modules():
        if target_layer_name == name:
            print('target:', name)
            target_layer = module

    if target_layer is None:
        for name, module in model.named_modules():
            print(name)
        raise Exception('invalid target layer name >>', target_layer_name)

    if type(target_layer) is torch.nn.Sequential:
        target_layer = target_layer[-1]

    print(target_layer)

    fmap_pool = OrderedDict()
    grad_pool = OrderedDict()

    def forward_hook(key):
        def forward_hook_(module, input, output):
            # Save featuremaps
            fmap_pool[key] = output.detach()

        return forward_hook_

    def backward_hook(key):
        def backward_hook_(module, grad_in, grad_out):
            # Save the gradients correspond to the featuremaps
            grad_pool[key] = grad_out[0].detach()

        return backward_hook_

    # If any candidates are not specified, the hook is registered to all the layers.
    for name, module in model.named_modules():
        module.register_forward_hook(forward_hook(name))
        module.register_backward_hook(backward_hook(name))

    for i in range(len(paths)):
        _x = xs[i]
        t = ts[i]
        path = paths[i]

        x = np.expand_dims(_x, axis=0)
        x = torch.tensor(x, dtype=torch.float).to(device)

        # forward network
        logit = model(x)
        pred = F.softmax(logit, dim=1).detach().cpu().numpy()

        raw_image = (_x).transpose(1, 2, 0)

        plt.subplot(1, Class_N + 1, 1)
        plt.imshow(raw_image)
        plt.title('input')

        for i, class_label in enumerate(Class_label):
            # set one-hot class activity
            class_index = torch.zeros(pred.shape).to(device)

            _index = Class_label.index(class_label)
            class_index[:, _index] = 1

            logit.backward(gradient=class_index, retain_graph=True)

            #target_layer_output = target_layer.forward(x)
            fmaps = fmap_pool[target_layer_name]
            grads = grad_pool[target_layer_name]
            weights = F.adaptive_avg_pool2d(grads, 1)

            gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)
            gcam = F.relu(gcam)

            gcam = F.interpolate(gcam, [img_height, img_width],
                                 mode="bilinear",
                                 align_corners=False)

            B, C, H, W = gcam.shape
            gcam = gcam.view(B, -1)
            gcam -= gcam.min(dim=1, keepdim=True)[0]
            gcam /= gcam.max(dim=1, keepdim=True)[0]
            gcam = gcam.view(B, C, H, W)

            gcam = gcam.cpu().numpy()[0, 0]
            cmap = cm.jet_r(gcam)[..., :3]
            gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2

            plt.subplot(1, Class_N + 1, i + 2)
            plt.imshow(gcam)
            plt.title('{} :{:.2f}'.format(class_label, pred[0, i]))

        plt.show()

        print("in {}, predicted probabilities >> {}".format(path, pred))