예제 #1
0
def ton_and_color_corrections():
    #色调和彩色校正
    image=data.astronaut()
    h1=color.rgb2hsv(image)
    h2=h1.copy()
    h1[:,:,1]=h1[:,:,1]*0.5
    image1=color.hsv2rgb(h1)
    h2[:,:,1]=h2[:,:,1]*0.5+0.5
    image2=color.hsv2rgb(h2)
    io.imshow(image)
    io.imsave('astronaut.png',image)
    io.imshow(image1)
    io.imsave('astronautlight.png',image1)
    io.imshow(image2)
    io.imsave('astronautdark.png',image2)
    
    imagered=image.copy()
    imagered[:,:,0]=image[:,:,0]*127.0/255+128
    io.imsave('astronautred.png',imagered)
    imageblue=image.copy()
    imageblue[:,:,2]=image[:,:,2]*127.0/255+128
    io.imsave('astronautblue.png',imageblue)
    imageyellow=image.copy()
    imageyellow[:,:,0]=image[:,:,0]*127.0/255+128
    imageyellow[:,:,1]=image[:,:,1]*127.0/255+128
    io.imsave('astronautyellow.png',imageyellow)
    io.imshow(imageyellow)
예제 #2
0
def classify_and_show(km, img_dir):
    '''Use a km object to display both the original and altered version of the image at
    img_dir, must be PNG'''
    img = rgb2hsv(mpimg.imread(img_dir)[...,:3])
    s = img.shape
    data = img.reshape((s[0] * s[1], s[2]))
    labels = km.classify(data)

    plt.imshow(hsv2rgb(img))
    plt.figure()
    plt.imshow(hsv2rgb(km.means[labels].reshape(s)))
예제 #3
0
def gaborbank_orientation_vis(d, method='mean', legend=True):
    """ Visualise the orientation for each point in the image.

    Method 'mean' uses the mean resultant vector over
    all orientation filters. Method 'max' takes the orientation
    at each pixel to be that of the filter with maximum energy
    at that pixel.

    Adapted from http://nbviewer.ipython.org/github/gestaltrevision\
    /python_for_visres/blob/master/Part7/Part7_Image_Statistics.ipynb

    Args:
        d: the dict output by gaborbank_convolve.
    """

    res = d['res']
    e = res.real**2 + res.imag**2  # energy
    e = e.sum(axis=3).sum(axis=2)  # sum energy over scales and orientations.

    if method == 'mean':
        ori = gaborbank_mean_orientation(d)
    elif method == 'max':
        ori = gaborbank_max_orientation(d)
    else:
        raise ValueError('Unknown method!')

    # output values range 0--pi; adjust hues accordingly:
    H = ori / np.pi
    S = np.ones_like(H)
    V = (e - e.min()) / e.max()
    HSV = np.dstack((H, S, V))
    RGB = hsv2rgb(HSV)

    if legend is True:
        # Render a hue circle
        sz = int(e.shape[0] * 0.1)
        r, a = pu.image.axes_polar(sz)
        a[a < 0] += np.pi
        a /= np.pi
        # a = (a - a.min()) / a.max()
        a = 1 - a  # not sure why I have to flip this, but
        # otherwise diagonals are reversed.
        mask = (r < 0.9) & (r > 0.3)
        hsv_legend = np.dstack((a,
                                np.ones_like(a, dtype='float'),
                                mask.astype('float')))
        rgb_legend = hsv2rgb(hsv_legend)
        RGB[:sz, :sz, :] = rgb_legend[::-1, ::]

    return RGB
예제 #4
0
파일: pic_an.py 프로젝트: varnivey/darfi
    def get_merged_pic(self, nuclei_color = 0.66, foci_color = 0.33, seeds = False):
        '''Return merged pic with foci and nuclei'''

        x_max, y_max = self.nuclei.shape

        active_cells = self.active_cells()

        cell_number = len(active_cells)

        if cell_number == 0:

            return np.zeros((x_max, y_max, 3), dtype = np.uint8)

        merged_pic_peaces = []

        nuclei_rgb_koef = hsv2rgb(np.array([nuclei_color, 1., 1.]).reshape((1,1,3))).reshape(3)

        foci_rgb_koef = hsv2rgb(np.array([foci_color, 1., 1.]).reshape((1,1,3))).reshape(3)

        for cur_cell in active_cells:

            if seeds:
                foci = cur_cell.foci_seeds
            else:
                foci = cur_cell.foci_binary

            nucleus_only = cur_cell.nucleus - foci

            pic_foci_enhanced = 255 - np.floor((255 - cur_cell.pic_foci)*0.6)

            pic_foci_enhanced = foci*pic_foci_enhanced

            pic_nucleus_only = cur_cell.pic_nucleus*nucleus_only

            pic_foci_enhanced_3d = np.dstack((pic_foci_enhanced, pic_foci_enhanced, pic_foci_enhanced))

            pic_nucleus_only_3d = np.dstack((pic_nucleus_only, pic_nucleus_only, pic_nucleus_only))

            pic_foci_rgb = pic_foci_enhanced_3d*foci_rgb_koef

            pic_nucleus_only_rgb = pic_nucleus_only_3d*nuclei_rgb_koef

            pic_merged_rgb = np.floor(pic_foci_rgb + pic_nucleus_only_rgb).astype(np.uint8)

            merged_pic_peaces.append(peace(pic_merged_rgb, cur_cell.coords))

        merged_pic = join_peaces_3d(merged_pic_peaces, x_max, y_max, dtype = np.uint8)

        return merged_pic
예제 #5
0
파일: RDMcolormap.py 프로젝트: ilogue/pyrsa
def RDMcolormap(nCols=256):

    # blue-cyan-gray-red-yellow with increasing V (BCGRYincV)
    anchorCols = np.array([
        [0, 0, 1],
        [0, 1, 1],
        [.5, .5, .5],
        [1, 0, 0],
        [1, 1, 0],
    ])

    # skimage rgb2hsv is intended for 3d images (RGB)
    # here we add a new axis to our 2d anchorCols to satisfy skimage, and then squeeze
    anchorCols_hsv = rgb2hsv(anchorCols[np.newaxis, :]).squeeze()

    incVweight = 1
    anchorCols_hsv[:, 2] = (1-incVweight)*anchorCols_hsv[:, 2] + \
        incVweight*np.linspace(0.5, 1, anchorCols.shape[0]).T

    # anchorCols = brightness(anchorCols)
    anchorCols = hsv2rgb(anchorCols_hsv[np.newaxis, :]).squeeze()

    cols = colorScale(nCols, anchorCols)

    return ListedColormap(cols)
예제 #6
0
파일: __init__.py 프로젝트: Tinrry/anna
def color_augment_image(data):
    image = data.transpose(1, 2, 0)
    hsv = color.rgb2hsv(image)

    # Contrast 2
    s_factor1 = numpy.random.uniform(0.25, 4)
    s_factor2 = numpy.random.uniform(0.7, 1.4)
    s_factor3 = numpy.random.uniform(-0.1, 0.1)

    hsv[:, :, 1] = (hsv[:, :, 1] ** s_factor1) * s_factor2 + s_factor3

    v_factor1 = numpy.random.uniform(0.25, 4)
    v_factor2 = numpy.random.uniform(0.7, 1.4)
    v_factor3 = numpy.random.uniform(-0.1, 0.1)

    hsv[:, :, 2] = (hsv[:, :, 2] ** v_factor1) * v_factor2 + v_factor3

    # Color
    h_factor = numpy.random.uniform(-0.1, 0.1)
    hsv[:, :, 0] = hsv[:, :, 0] + h_factor

    hsv[hsv < 0] = 0.0
    hsv[hsv > 1] = 1.0

    rgb = color.hsv2rgb(hsv)

    data_out = rgb.transpose(2, 0, 1)
    return data_out
예제 #7
0
파일: basic.py 프로젝트: jason2506/imeffect
 def _process(self, img):
     hsv = rgb2hsv(img)
     h = hsv[:, :, 0] + self._adjust
     h[h > 1] -= 1
     hsv[:, :, 0] = h
     img[:, :, :] = hsv2rgb(hsv)
     return img
예제 #8
0
 def test_hsv2rgb_conversion(self):
     rgb = self.img_rgb.astype("float32")[::16, ::16]
     # create HSV image with colorsys
     hsv = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2]) for pt in rgb.reshape(-1, 3)]).reshape(rgb.shape)
     # convert back to RGB and compare with original.
     # relative precision for RGB -> HSV roundtrip is about 1e-6
     assert_almost_equal(rgb, hsv2rgb(hsv), decimal=4)
예제 #9
0
def stretchImageHue(imrgb):
	# Image must be stored as 0-1 bound float. If it's 0-255 int, convert
	if( imrgb.max() > 1 ):
		imrgb = imrgb*1./255

	# Transform to HSV
	imhsv = rgb2hsv(imrgb)

	# Find 2-98 percentiles of H histogram (except de-saturated pixels)
	plt.figure()
	plt.hist(imhsv[imhsv[:,:,1]>0.1,0].flatten(), bins=360)
	p2, p98 = np.percentile(imhsv[imhsv[:,:,1]>0.1,0], (2, 98))
	print p2, p98

	imhsv[:,:,0] = doStretch(imhsv[:,:,0], p2, p98, 0.6, 0.99)
	plt.figure()
	plt.hist(imhsv[imhsv[:,:,1]>0.1,0].flatten(), bins=360)	

	imrgb_stretched = hsv2rgb(imhsv)
	plt.figure()
	plt.imshow(imrgb)
	plt.figure()
	plt.imshow(imrgb_stretched)

	plt.show()
예제 #10
0
def hsi_equalize_hist():
    image=data.astronaut()
    h=color.rgb2hsv(image)
    h[:,:,2]=exposure.equalize_hist(h[:,:,2])
    image_equal=color.hsv2rgb(h)
    io.imshow(image_equal)
    io.imsave('astronautequal.png',image_equal)
예제 #11
0
def saturate(im, amount=1.1):
  hsvim = skcolor.rgb2hsv(im)
  hue = np.take(hsvim, 0, axis=2)
  sat = np.take(hsvim, 1, axis=2)
  val = np.take(hsvim, 2, axis=2)
#   sat = sat * amount
  newhsv = np.dstack((hue, sat, val))
  return skcolor.hsv2rgb(newhsv)
예제 #12
0
def main():
    # read the images
    image_from = io.imread(name_from) / 256
    image_to = io.imread(name_to) / 256

    # change to hsv domain (if requested)
    if args.use_hsv:
        image_from[:] = rgb2hsv(image_from)
        image_to[:] = rgb2hsv(image_to)

    # get shapes
    shape_from = image_from.shape
    shape_to = image_to.shape

    # flatten
    X_from = im2mat(image_from)
    X_to = im2mat(image_to)

    # number of pixes
    n_pixels_from = X_from.shape[0]
    n_pixels_to = X_to.shape[0]

    # subsample
    X_from_ss = X_from[np.random.randint(0, n_pixels_from-1, n_pixels),:]
    X_to_ss = X_to[np.random.randint(0, n_pixels_to-1, n_pixels),:]

    if save_col_distribution:
        import matplotlib.pyplot as plt
        import seaborn as sns
        sns.set_style('white')

        fig, axes = plt.subplots(nrows=2, figsize=(5, 10))
        for ax, X in zip(axes, [X_from_ss, X_to_ss]):
            ax.scatter(X[:,0], X[:,1], color=X)
            if args.use_hsv:
                ax.set_xhsvel('hue')
                ax.set_yhsvel('value')
            else:
                ax.set_xhsvel('red')
                ax.set_yhsvel('green')
        axes[0].set_title('distr. from')
        axes[1].set_title('distr. to')
        fig.tight_layout()
        fig.savefig('color_distributions.png')

    # optimal tranportation
    ot_color = OptimalTransport(X_to_ss, X_from_ss, lam=lam,
                                    distance_metric=distance_metric)

    # model transfer
    transfer_model = KNeighborsRegressor(n_neighbors=n_neighbors)
    transfer_model.fit(X_to_ss, n_pixels * ot_color.P @ X_from_ss)
    X_transfered = transfer_model.predict(X_to)

    image_transferd = minmax(mat2im(X_transfered, shape_to))
    if args.use_hsv:
        image_transferd[:] = hsv2rgb(image_transferd)
    io.imsave(name_out, image_transferd)
예제 #13
0
 def _rotate_Scale_fired(self):
     """" _rotate_Scale_fired(self): rotates scale and re-displays image when button is pressed """
     max = 255. # this will only work with certain image types...
     hsvimage = rgb2hsv([x/max for x in self.image])
     hsvimage[:,:,1] = [np.mod(x+0.5,1) for x in hsvimage[:,:,1]]
     hsvimage = [np.uint8(x*max) for x in hsv2rgb(hsvimage)]
     self.image = hsvimage
     self.ax.imshow(hsvimage)
     self.figure.canvas.draw()
def colorize(image, hue, saturation=1):
    """ Add color of the given hue to an RGB image.

    By default, set the saturation to 1 so that the colors pop!
    """
    hsv = color.rgb2hsv(image)
    hsv[:, :, 1] = saturation
    hsv[:, :, 0] = hue
    return color.hsv2rgb(hsv)
예제 #15
0
 def _rotate_Hue_fired(self):
     """" _rotate_Hue_fired(self): rotates hue and re-displays image when button is pressed """
     max = 255. 
     hsvimage = rgb2hsv([x/max for x in self.image])
     hsvimage[:,:,0] = [np.mod(x+0.5,1) for x in hsvimage[:,:,0]]
     hsvimage = [np.uint8(x*max) for x in hsv2rgb(hsvimage)]
     self.image = hsvimage
     self.ax.imshow(hsvimage)
     self.figure.canvas.draw()
def adjust_saturation_hue(image, saturation_factor, hue_delta):
    """Adjust saturation and hue of an RGB image.
    Converts to HSV, add an offset to the saturation channel and
    converts back to RGB.
    """
    img_hsv = color.rgb2hsv(image)
    # Adjust saturation and hue channels.
    img_hsv[:, :, 1] = np.clip(img_hsv[:, :, 1] * np.abs(saturation_factor), 0.0, 1.0)
    img_hsv[:, :, 0] = np.mod(img_hsv[:, :, 0] + 1. + hue_delta, 1.0)
    # Back to RGB mod.
    return color.hsv2rgb(img_hsv)
예제 #17
0
def generate(image, width=4096, height=4096):
  image_width, image_height, image_bands = image.shape
  left = center(image_width, width)
  top = center(image_height, height)
  result = np.zeros((width, height, image_bands), dtype=image.dtype)
  result[left:left+image_width, top:top+image_height] = image

  result = rgb2hsv(result)
  flattened = result.view(hsv_dtype)
  flattened.shape = -1 # Throws an exception if a copy must be made
  flattened[np.argsort(flattened, order=['H', 'S', 'V'])] = get_hsv_spectrum()

  return hsv2rgb(result).view(image.dtype)
예제 #18
0
def match(img, ref, bins):
    hsv = color.rgb2hsv(img)

    vals = hsv[:, :, 2].flatten()
    vhist, vbins = numpy.histogram(vals, bins, density=True)
    vcdf = vhist.cumsum()

    a = numpy.interp(vals, vbins[:-1], vcdf)

    mapped = ref(a).reshape(hsv[:, :, 2].shape)
    hsv[:, :, 2] = mapped
    ret_img = color.hsv2rgb(hsv)
    return ret_img
예제 #19
0
 def call(self, image, saliency_image):
     img_resize = resize(image, saliency_image.shape)
     saliency_range = max(0.15, saliency_image.max() - saliency_image.min())
     saliency_norm = (saliency_image - saliency_image.min()) / saliency_range
     saliency_gamma = adjust_gamma(saliency_norm, gamma=self.gamma)
     cmap = matplotlib.cm.get_cmap('viridis')
     cmap_hsv = rgb2hsv(cmap(saliency_gamma)[:, :, :3])
     hsv = np.stack([
         cmap_hsv[:, :, 0],
         saliency_gamma,
         img_resize
     ], axis=-1)
     return hsv2rgb(hsv)
예제 #20
0
def make_hsv(magnitude, angle, img=None, alpha=0.5):
    """Convert the result of ``directionality_filter`` to an HSV
    image, then convert to RGB."""
    magnitude = scale(magnitude)
    angle = scale(angle)
    h = angle
    s = magnitude
    v = np.ones(h.shape)
    hsv = hsv2rgb(np.dstack([h, s, v]))
    if img is None:
        return hsv
    img = scale(crop_to(img, angle.shape))
    result = hsv + gray2rgb(img)
    return img_as_float(result)
예제 #21
0
파일: life.py 프로젝트: cwhy/AntWorld
 def updateColorAll(self):
     _Vs = list()
     for signalType in self.signalColors.keys():
         _v = self.signal[signalType][:,:,0]
         _v[_v > 100] = 100
         _Vs.append(_v/100)
     V = sum(_Vs)
     S = np.ones(V.shape)
     S[V == 0] = 0
     H = np.ones(V.shape)
     H[V == 0] = 0.5
     V = 1 - V
     HSV = np.dstack((H, S, V))
     self.color = 255*hsv2rgb(HSV).astype(np.uint8, copy=False)
예제 #22
0
파일: HueBar.py 프로젝트: colbrydi/VideoBar
def makeVideoBar(indir='/Volumes/Documents/colbrydi/Documents/DirksWork/chamview/ChamB/'):
    sz=''
    br=''
    for root, dirs, filenames in os.walk(indir):
        filenames.sort()
        for f in filenames:
            if fnmatch.fnmatch(f,'0*.jpeg'):
                (ib, sz) = HueBar(os.path.join(root,f),sz);
                if br == '':
                    br = ib
                else:
                    br = np.append(br, ib, axis=1)
    bar = color.hsv2rgb(br)
    bar = bar * 255
    return bar
예제 #23
0
파일: wavelet4.py 프로젝트: KWMalik/tau
 def embed(self, img, payload, k):
     if len(payload) > self.max_payload:
         raise ValueError("payload too long")
     payload = bytearray(payload) + "\x00" * (self.max_payload - len(payload))
     encoded = self.rscodec.encode(payload)
     
     if len(img.shape) == 2:
         return self._embed(img, encoded, k)
     elif len(img.shape) == 3:
         hsv = rgb2hsv(img)
         i = 0
         hsv[:,:,i] = self._embed(hsv[:,:,i], encoded, k)
         return hsv2rgb(hsv)
     else:
         raise TypeError("img must be a 2d or 3d array")
예제 #24
0
def preprocess_img(img):
    IMG_SIZE = 32
    # Histogram normalization in v channel
    hsv = color.rgb2hsv(img)
    hsv[:, :, 2] = exposure.equalize_hist(hsv[:, :, 2])
    img = color.hsv2rgb(hsv)

    # central square crop
    min_side = min(img.shape[:-1])
    centre = img.shape[0]//2, img.shape[1]//2
    img = img[centre[0]-min_side//2:centre[0]+min_side//2,
              centre[1]-min_side//2:centre[1]+min_side//2,
              :]

    # rescale to standard size
    img = transform.resize(img, (IMG_SIZE, IMG_SIZE))
    return img
예제 #25
0
def hsv_value(image_filter, image, *args, **kwargs):
    """Return color image by applying `image_filter` on HSV-value of `image`.

    Note that this function is intended for use with `adapt_rgb`.

    Parameters
    ----------
    image_filter : function
        Function that filters a gray-scale image.
    image : array
        Input image. Note that RGBA images are treated as RGB.
    """
    # Slice the first three channels so that we remove any alpha channels.
    hsv = color.rgb2hsv(image[:, :, :3])
    value = hsv[:, :, 2].copy()
    value = image_filter(value, *args, **kwargs)
    hsv[:, :, 2] = convert(value, hsv.dtype)
    return color.hsv2rgb(hsv)
예제 #26
0
파일: script.py 프로젝트: gyngyn1234/blog
def create_image(model, params, size):
    x_dim, y_dim = size
    X = np.concatenate(np.array(params), axis=1)

    pred = model.predict(X)

    img = []
    channels = pred.shape[1]
    for channel in range(channels):
        yp = pred[:, channel]
        yp = (yp - yp.min()) / (yp.max()-yp.min())
        img.append(yp.reshape(y_dim, x_dim))
    img = np.dstack(img)

    if channels == 3: img = hsv2rgb(img)
    img = (img * 255).astype(np.uint8)

    return img
예제 #27
0
def masked(img, gt, mask, alpha=1):
    """Returns image with GT lung field outlined with red, predicted lung field
    filled with blue."""
    rows, cols = img.shape
    color_mask = np.zeros((rows, cols, 3))
    boundary = morphology.dilation(gt, morphology.disk(3)) - gt
    color_mask[mask == 1] = [0, 0, 1]
    color_mask[boundary == 1] = [1, 0, 0]
    img_color = np.dstack((img, img, img))

    img_hsv = color.rgb2hsv(img_color)
    color_mask_hsv = color.rgb2hsv(color_mask)

    img_hsv[..., 0] = color_mask_hsv[..., 0]
    img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

    img_masked = color.hsv2rgb(img_hsv)
    return img_masked
예제 #28
0
def array2img(array, colorspace, classification=False):
    """ 
    INPUT:
            array: Lab layers in an array of shape=(3, image_x, image_y)
            colorspace: the colorspace that the array is in;
                        ''CIELab' for CIELab colorspace
                        'CIEL*a*b*' for the mapped CIELab colorspace (by function remap_CIELab in NNPreprocessor)
                        'RGB' for rgb mapped between [0 and 1]
                        'YCbCr' for YCbCr
                        'HSV' for HSV
    OUTPUT:
            Image
    """
    # Check if colorspace is properly defined
    assert_colorspace(colorspace)

    # Convert the image to shape=(image_x, image_y, 3)
    image = np.transpose(array,[1,2,0]).astype('float64')

    if (colorspace == 'CIEL*a*b*'):
        # Convert to CIELab:
        image = unmap_CIELab(image)
    
    if classification == True:
        # remap L layer 
        image[:,:,0] *= 1

    if ( (colorspace == 'CIELab') or (colorspace == 'CIEL*a*b*') ):
        # Convert to rgb:
        image = color.lab2rgb(image)
    
    if (colorspace == 'HSV'):
        image = color.hsv2rgb(image)

    # YCbCr is supported by the PIL Image pkg. so just change the mode that is passed
    if colorspace == 'YCbCr':
        # Convert to a PIL Image (Should be replaced by the scikit-image equivalent color.rgb2YCbCr in the future)
        im = Image.fromarray( np.uint8(image*255.), mode='YCbCr' )
        im = im.convert('RGB')
        # Put back into a numpy array
        image = np.array(im)/255.

    # Now the image is definitely in a supported colorspace
    return Image.fromarray(np.uint8(image*255.),mode='RGB')
예제 #29
0
def circle_colony(colony_geometry, colony_score, image_shape, score_min=0.0, score_max=255.0):
    # color of circle
    hue_start, hue_stop = [0.0, 1/3.0]  # from 0 to 120 degrees hue colors
    #score_min, score_max = [1.0, 3.0]  # colony scores from 1 till 3
    color_coeff = ((hue_stop - hue_start)/(score_max - score_min))*(colony_score - score_min)
    
    # create three layers of hsv image (black background) and binary mask
    x_max = image_shape[0]
    y_max = image_shape[1]
    mask_saturation = np.array([[0.0 for i in range(y_max)] for i in range(x_max)])
    mask_light = np.array([[0.0 for i in range(y_max)] for i in range(x_max)])
    mask_hue = np.array([[0.0 for i in range(y_max)] for i in range(x_max)])
    mask_binary = np.array([[False for i in range(y_max)] for i in range(x_max)])
    
    # find circle
    x, y, r = colony_geometry
    r = r*2
    rr, cc = circle_perimeter(x, y, np.round(r).astype(int))
    rr_new, cc_new = [], []
    
    for x_c,y_c in zip(rr,cc):
        if (x_c >= 0) and (x_c < x_max) and (y_c >= 0) and (y_c < y_max):
            rr_new.append(x_c)
            cc_new.append(y_c)
    
    # create binary mask with circle
    mask_binary[rr_new, cc_new] = True
    
    struct = generate_binary_structure(2, 1)
    for i in range(8):
        mask_binary = binary_dilation(mask_binary, struct)
    
    # paint circle in hsv layers
    mask_saturation[mask_binary] = 1.0
    mask_light[mask_binary] = 0.75
    mask_hue[mask_binary] = color_coeff
    
    hsv_image = np.dstack([mask_hue, mask_saturation, mask_light])
    
    rgb_image = img_as_ubyte(hsv2rgb(hsv_image))
    
    return rgb_image, mask_binary
def preprocess_img(img):
    # Histogram normalization in y
    hsv = color.rgb2hsv(img)
    hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2])
    img = color.hsv2rgb(hsv)

    # central scrop
    min_side = min(img.shape[:-1])
    centre = img.shape[0]//2, img.shape[1]//2
    img = img[centre[0]-min_side//2:centre[0]+min_side//2,
              centre[1]-min_side//2:centre[1]+min_side//2,
              :]

    # rescale to standard size
    img = transform.resize(img, (IMG_SIZE, IMG_SIZE))

    # roll color axis to axis 0
    img = np.rollaxis(img,-1)

    return img
예제 #31
0
 def get_preprocessed_state(self):
     frame = self.get_state()
     if frame is None:
         return self.terminal
     # Blur, crop, resize
     frame = cv2.GaussianBlur(frame, (39, 39), 0, 0)
     frame = tf.image.central_crop(frame, 0.5)
     frame = tf.image.resize(
         frame,
         self.resolution,
         align_corners=True,
         method=tf.image.ResizeMethod.NEAREST_NEIGHBOR).numpy()
     # Kmeans clustering
     frame = rgb2hsv(frame)
     kmeans = KMeans(n_clusters=4).fit(frame.reshape((-1, 3)))
     frame = kmeans.cluster_centers_[kmeans.labels_].reshape(frame.shape)
     frame = hsv2rgb(frame).astype(np.float32)
     # Greyscale
     if self.num_channels == 1:
         frame = tf.image.rgb_to_grayscale(frame).numpy()
         #plt.imshow(frame.reshape((frame.shape[0], frame.shape[1])), cmap="gray")
         #plt.show()
     return frame
예제 #32
0
def BeautifyLips(MouthImage, Choice):
    '''
    return the mouth image after processed with regard to color choice

    This will be implemented after Yi Liu implemented his matlab code

    and i will transform the code to python as soon as he finished that
    '''
    hsv_image = color.rgb2hsv(MouthImage)
    ratio = 0.25
    hsv_image[:, :, 0] = (1 - ratio) * hsv_image[:, :, 0] + ratio * (
        cmap[Choice][0] - hsv_image[:, :, 0])
    hsv_image[:, :, 1] = (1 - ratio) * hsv_image[:, :, 1] + ratio * (
        cmap[Choice][1] - hsv_image[:, :, 1])
    hsv_image[:, :, 2] = (1 - ratio) * hsv_image[:, :, 2] + ratio * (
        cmap[Choice][2] - hsv_image[:, :, 2])

    Mouth = color.hsv2rgb(hsv_image)
    #print(Mouth);
    print('dalong log : after beautify')

    #cv2.imwrite('/home/yuanxl/after_beautify.jpg',255*hsv_image[:,:,::-1])
    return np.array(Mouth * 255, dtype=np.uint8)
예제 #33
0
def RGB2HSV_shift_LAB(I,shift): # shift value0 ~1
    # Get Original L in LAB, shift H in HSV

    # Get Original LAB
    lab_original = color.rgb2lab(I)
    l_original = (lab_original[:, :, 0] / 100.0)
    
    # Shift HSV
    hsv = color.rgb2hsv(I)
    h = ((hsv[:, :, 0] + shift))
    s = (hsv[:, :, 1])
    v = (hsv[:, :, 2])
    hsv2 = color.hsv2rgb(np.dstack([h, s, v]).astype(np.float64))

    # Merge (Original LAB, Shifted HSV)
    lab = color.rgb2lab(hsv2)
    l = l_original
    #l = (lab[:, :, 0] / 100.0)
    a = (lab[:, :, 1] + 86.1830297444) / (98.2330538631 + 86.1830297444) #* 255.0         # a component ranges from -127 to 127
    b = (lab[:, :, 2] + 107.857300207) / (94.4781222765 + 107.857300207) #* 255.0         # b component ranges from -127 to 127


    return np.dstack([l, a, b])
예제 #34
0
def get_raw_data(f, seg, saturation_factor):

    serum = quantile_normalize(read_image(f, 'serum_IgG'))
    marker = quantile_normalize(read_image(f, 'marker'))
    nuclei = quantile_normalize(read_image(f, 'nuclei'))
    bg_mask = seg == 0

    def subtract_bg(im):
        bg = np.median(im[bg_mask])
        im -= bg
        return im

    serum = subtract_bg(serum)
    marker = subtract_bg(marker)
    nuclei = subtract_bg(nuclei)

    raw = np.concatenate([marker[..., None], serum[..., None], nuclei[..., None]], axis=-1)
    if saturation_factor > 1:
        raw = skc.rgb2hsv(raw)
        raw[..., 1] *= saturation_factor
        raw = skc.hsv2rgb(raw).clip(0, 1)

    return raw, marker
예제 #35
0
    def transform(X: np.ndarray):
        """ Randomly manipulate Hue(H), Saturation(S), Luminescence(L) of image. See change above to check how individually them impact the image 
		"""

        # pdb.set_trace()
        X = X.astype(np.float32)
        hue = np.random.uniform(-0.2, 0.6, 1).round(2)[0]
        saturation = np.random.uniform(-0.2, 0.2, 1).round(2)[0]
        luminescence = np.random.randint(-20, 90)

        # print('hue = {}, saturation= {},  luminescence = '.format(hue , saturation, luminescence))

        hsv = color.rgb2hsv(X)
        hsv[:, :, 0] += hue
        hsv[:, :, 1] += saturation
        hsv[:, :, 2] += luminescence

        hsv[:, :, 0:2] = hsv[:, :, 0:2].clip(min=0, max=1)
        hsv[:, :, 2] = hsv[:, :, 2].clip(min=60, max=250)

        X = color.hsv2rgb(hsv)
        X = X.astype(np.uint8)
        return X
예제 #36
0
    def __call__(self, image):
        power = np.random.uniform(self.power[0], self.power[1])
        factor = np.random.uniform(self.factor[0], self.factor[1])
        addition = np.random.uniform(self.addition[0], self.addition[1])

        im_np = np.asarray(image)
        #im_np = im_np.transpose(1,0,2)    # when we go from pil to numpy array the W and L dimensions are swaped
        im_hsv = color.rgb2hsv(im_np)

        im_hsv_t_h = [x for x in im_hsv[:, :, 0]]
        im_hsv_t_s = [(((x**power) * factor) + addition)
                      for x in im_hsv[:, :, 1]]
        im_hsv_t_v = [(((x**power) * factor) + addition)
                      for x in im_hsv[:, :, 2]]

        im_hsv_t = np.stack((im_hsv_t_h, im_hsv_t_s, im_hsv_t_v), axis=2)
        im_rgb = color.hsv2rgb(im_hsv_t)

        # Postprocessing to avoid saturating pixels
        im_end = im_rgb - im_rgb.min()
        im_end = im_end / im_end.max()

        return Image.fromarray(np.uint8(im_end * 255))
예제 #37
0
    def overlay_image_with_labels(self, image_data, predicted_labels):
        color_mask = np.zeros(
            (predicted_labels.shape[0], predicted_labels.shape[1], 3))
        color_mask[np.where(predicted_labels == 0)] = [128, 128, 128]
        color_mask[np.where(predicted_labels == 1)] = [128, 0, 0]
        color_mask[np.where(predicted_labels == 2)] = [128, 128, 0]
        color_mask[np.where(predicted_labels == 3)] = [0, 0, 192]
        color_mask[np.where(predicted_labels == 4)] = [128, 64, 128]
        color_mask[np.where(predicted_labels == 5)] = [64, 0, 128]

        alpha = 0.8
        # Convert the input image and color mask to Hue Saturation Value (HSV)
        # colorspace
        img_hsv = color.rgb2hsv(image_data)
        color_mask_hsv = color.rgb2hsv(color_mask)

        # Replace the hue and saturation of the original image
        # with that of the color mask
        img_hsv[..., 0] = color_mask_hsv[..., 0]
        img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

        img_masked = color.hsv2rgb(img_hsv)
        return img_masked
예제 #38
0
def sobel_edge_detection(img, size):
    '''Performs Sobel Edge detection and returns the magnitude and direction of gradients.'''
    # Gaussian Smooth
    image = gaussian_smooth(img, size, 5)

    # Sobel Operator
    gx, gy = sobel_operator(image)

    # Magnitude
    mag = np.sqrt(gx**2 + gy**2)

    # Direction
    m, n = image.shape
    hsv = np.zeros((m, n, 3))

    hsv[:, :,
        0] = (np.arctan2(gy, gx) + np.pi) / (2 * np.pi)  # Hue: Gradient angle
    hsv[:, :, 1] = np.ones((m, n))  # Saturation: Maximum, which is 1
    hsv[:, :, 2] = (mag - mag.min()) / (mag.max() - mag.min()
                                        )  # Value: Gradient magnitude
    dir_ = color.hsv2rgb(hsv)

    return [mag, dir_]
예제 #39
0
def hsv_decomposition(image, channel='H'):
    """Decomposes the image into HSV and only returns the channel specified.

    Args:
        image: numpy array of shape(image_height, image_width, 3).
        channel: str specifying the channel. Can be either "H", "S" or "V".

    Returns:
        out: numpy array of shape(image_height, image_width).
    """

    hsv = color.rgb2hsv(image)
    out = None

    ### YOUR CODE HERE
    lis = ['H', 'S', 'V']
    dic = {x: i for i, x in enumerate(lis)}
    mat = np.eye(3)
    onl = hsv * mat[dic[channel]]
    out = color.hsv2rgb(onl)
    ### END YOUR CODE

    return out
예제 #40
0
def create_chromakey_image(img_read, green_low_factor, green_high_factor,
                           saturation, brightness):
    hsv = rgb2hsv(img_read)

    for pixel_row in hsv:
        for pixel_col in pixel_row:

            if green_low_factor < pixel_col[0] <= green_high_factor \
                    and pixel_col[1] > saturation \
                    and pixel_col[2] > brightness:

                pixel_col[0] = 0
                pixel_col[1] = 0
                pixel_col[2] = 0
            else:

                pixel_col[0] = 1
                pixel_col[1] = 1
                pixel_col[2] = 1

    img_ndarray = np.array(hsv)
    img_rgb = hsv2rgb(img_ndarray)

    img_gray = rgb2gray(img_rgb)
    img_gray_copy = np.copy(img_gray)

    x, y = img_gray_copy.shape

    for i in range(0, x):
        for j in range(0, y):
            pixel = img_gray_copy[i, j]
            if pixel > 0:
                img_gray_copy[i, j] = 1
            else:
                img_gray_copy[i, j] = 0

    return img_gray_copy
예제 #41
0
def reconstruction(imagePath, outputPath):
    image = io.imread(imagePath)

    if (isGrayscale(imagePath)):
        image = gray2rgb(image)
    elif (isNotHsv(imagePath)):
        image = rgb2hsv(image)
    img_hsv = image
    img_hsv_copy = np.copy(img_hsv)

    imgSize = (image.shape)
    w = min(imgSize[0], 200)
    h = min(imgSize[1], 160)
    # flood function returns a mask of flooded pixels
    mask = flood(img_hsv[..., 0], (w - 1, h - 1), tolerance=0.016)
    # Set pixels of mask to new value for hue channel
    img_hsv[mask, 0] = 0.5
    # Post-processing in order to improve the result
    # Remove white pixels from flag, using saturation channel
    mask_postprocessed = np.logical_and(mask, img_hsv_copy[..., 1] > 0.4)
    # Remove thin structures with binary opening
    # mask_postprocessed = binary_opening(mask_postprocessed,
    #                                                np.ones((3, 3)))
    # Fill small holes with binary closing
    mask_postprocessed = binary_opening(mask_postprocessed, disk(20))
    img_hsv_copy[mask_postprocessed, 0] = 0.5
    # img_hsv_copy_uint8 = img_as_ubyte(img_hsv_copy)
    #is_hsv
    if (len(img_hsv_copy.shape) == 3 and img_hsv_copy.shape[2] == 3):
        output = hsv2rgb(img_hsv_copy)
    else:
        output = img_hsv_copy
    output_uint8 = img_as_ubyte(output)
    imsave('' + outputPath, output_uint8)
    # imsave('/home/marekk/workspace/tmp/1234.png', output_uint8)

    output_uint8
예제 #42
0
def convert_warp_to_color(uu: np.ndarray,
                          vv: np.ndarray,
                          min_vel_mag: float = MIN_VEL_MAG,
                          max_vel_mag: float = MAX_VEL_MAG) -> np.ndarray:
    """ Convert a warp matrix into a color matrix

    :param ndarray uu:
        The x-vectors for the flow image
    :param ndarray vv:
        The y-vectors for the flow image
    :param float min_vel_mag:
        Minimum vector magnitude to plot
    :param float max_vel_mag:
        Maximum vector magnitude to plot
    :returns:
        A color image where angle corresponds to hue and magnitude corresponds
        to value on the HSV color scale
    """
    # Convert to hsv
    mag = np.sqrt(uu**2 + vv**2)
    ang = np.arctan2(vv, uu)

    hue = (ang % np.pi) / np.pi  # hue between 0-1
    sat = np.ones_like(hue)

    # If no bounds passed, clamp by percentile
    if min_vel_mag is None:
        min_vel_mag = np.percentile(mag, 5)
    if max_vel_mag is None:
        max_vel_mag = np.percentile(mag, 95)

    # Rescale magnitude to between 0 and 1
    value = (mag - min_vel_mag) / (max_vel_mag - min_vel_mag)
    value[value < 0] = 0
    value[value > 1] = 1

    return hsv2rgb(np.stack((hue, sat, value), axis=2))
예제 #43
0
def deprecated_overlayImg_(img,
                           mask,
                           print_color=[5, 119, 72],
                           linewidth=1,
                           alpha=0.618):
    #img = img_as_float(data.camera())
    rows, cols = img.shape[0:2]
    # Construct a colour image to superimpose
    color_mask = np.zeros((rows, cols, 3))
    color_mask[mask == 1] = print_color
    color_mask[mask == 0] = img[mask == 0]
    # imshow(color_mask)

    if len(img.shape) == 2:
        img_color = np.dstack((img, img, img))
    else:
        img_color = img

    img_hsv = color.rgb2hsv(img_color)
    color_mask_hsv = color.rgb2hsv(color_mask)

    img_hsv[..., 0] = color_mask_hsv[..., 0]
    img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

    img_masked = color.hsv2rgb(img_hsv)
    # Display the output
    #f, (ax0, ax1, ax2) = plt.subplots(1, 3,
    #                                  subplot_kw={'xticks': [], 'yticks': []})
    #ax0.imshow(img, cmap=plt.cm.gray)
    #ax1.imshow(color_mask)
    #ax2.imshow(img_masked)
    #plt.show()

    img_masked = np.asarray((img_masked / np.max(img_masked)) * 255,
                            dtype=np.uint8)

    return img_masked
예제 #44
0
def alter_color(img, predominant_color, min_color, max_color, method=1):
    """ alters the color of the image to approximate it to the predominant_color parameter
        
        inputs:
            img: downscaled tile image.
            predominant_color: the predominant color of a given section of the canvas image (obtained using the mean).
            min_color: the minimum color value of a given section of the canvas image.
            max_color: the maximum color value of a given section of the canvas image.
            method: which color representation method to use in the color processing
                1 - RGB: applies a color balancing in the rgb model, so the tile image color moves to the desired color
                2 - HSV: use the hue and saturation of the desired color to replace these values in the tile image 
                In both methods, a normalization using the max and min intensities of the original image is required, 
                so it can accurately represent the color intensity of the original image.
    """

    out_img = np.array(img, copy=True)

    if method == 1:
        # changes the color channels proportions to the predominant color proportion
        out_img[:, :, 0] = out_img[:, :, 0] * predominant_color[0]  # R
        out_img[:, :, 1] = out_img[:, :, 1] * predominant_color[1]  # G
        out_img[:, :, 2] = out_img[:, :, 2] * predominant_color[2]  # B

    elif method == 2:
        # transforms to the HSV color representation
        out_img = rgb2hsv(out_img)
        predominant_color = rgb2hsv(np.reshape(predominant_color, (1, 1, 3)))

        out_img[:, :, 0] = (predominant_color[:, :, 0])  # alters hue
        out_img[:, :, 1] = (predominant_color[:, :, 1])  # alters saturation

        out_img = hsv2rgb(out_img)

    # returns to original color interval in rgb to accurately represent the intensity (black/white)
    out_img = normalize_image(out_img, new_min=min_color, new_max=max_color)

    return np.clip(out_img, 0, 255)
예제 #45
0
    def gen_bra(self, image):
        pantie = np.array(image)

        # pickup colors
        front = pantie[20:100, 30:80, :3] / 255.0
        front_shade = pantie[130:150, 0:40, :3] / 255.0
        front_color = self.pick_color(front)
        front_shade_color = self.pick_color(front_shade)
        front_shade_color = rgb2hsv(front_shade_color[None, None])
        front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3
        if front_shade_color[0, 0, 1] > 0.7:
            front_shade_color[0, 0, 1] *= 0.7
        front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4
        front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)
        ribbon = pantie[24:32, 15:27, :3] / 255.0
        ribbon_color = self.pick_color(ribbon)

        # making a center texture
        center = pantie[20:170, -200:-15, :3][:, ::-1]
        center = resize(center, [2.42, 2.42])

        bra_center = np.copy(self.bra_center)
        bra_center[225:225 + center.shape[0],
                   25:25 + center.shape[1], :3] = center * np.float32(
                       bra_center[225:225 + center.shape[0],
                                  25:25 + center.shape[1], :3] > 0)
        bra = self.bra[:, :, :3] * front_color
        bra_shade = (self.bra_shade[:, :, -1])[:, :, None] * front_shade_color
        bra_component = self.bra_component[:, :, :3] * ribbon_color

        # overlaying layers
        bra = alpha_brend(bra_center[:, :, :3], bra[:, :, :3],
                          bra_center[:, :, 0] > 0.1)
        bra = alpha_brend(bra_component, bra, self.bra_component_mask)
        bra = alpha_brend(bra_shade, bra, self.bra_shade_alpha)
        bra = np.dstack((bra, self.bra[:, :, 0] > 0.8))
        return Image.fromarray(np.uint8(np.clip(bra, 0, 1) * 255))
예제 #46
0
def quantizeImage(model, rgb_image, foreground_label_image):
    # Convert image to HSV space because cluster centers are in HSV space
    hsv_image = color.rgb2hsv(rgb_image)

    # Make a background mask
    in_foreground = foreground_label_image != 0
    foreground_pixels = hsv_image[in_foreground, :]

    # Assign each foreground pixel to the nearest cluster center
    cluster_labels = model.predict(foreground_pixels)
    quantized_hsv = model.cluster_centers_[cluster_labels]

    # Make an image displaying the segmentation
    cluster_label_image = np.zeros(foreground_label_image.shape, dtype=int)
    cluster_label_image[in_foreground] = cluster_labels + 1

    # Make a quantized image
    quantized_hsv_image = np.zeros_like(hsv_image)
    quantized_hsv_image[in_foreground] = quantized_hsv

    # Convert from HSV to RGB
    quantized_rgb_image = color.hsv2rgb(quantized_hsv_image)

    return quantized_rgb_image, cluster_label_image
예제 #47
0
def overlay_grey_and_color(Xgrey, Xcolor):
    """"
    overlay a grayscale image with some color (e.g. segmentation masks)

    note that this IGNORES the magnitude of the color image, essentially it becomes a MASK
    :param Xgrey:
    :param Xcolor:
    :return:
    """
    from skimage import color, io, img_as_float
    import numpy as np
    import matplotlib.pyplot as plt

    alpha = 0.6

    img = img_as_float(Xgrey)
    rows, cols = img.shape

    color_mask = Xcolor

    # Construct RGB version of grey-level image
    img_color = np.dstack((img, img, img))

    # Convert the input image and color mask to Hue Saturation Value (HSV)
    # colorspace
    img_hsv = color.rgb2hsv(img_color)
    color_mask_hsv = color.rgb2hsv(color_mask)

    # Replace the hue and saturation of the original image
    # with that of the color mask
    img_hsv[..., 0] = color_mask_hsv[..., 0]
    img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

    img_masked = color.hsv2rgb(img_hsv)

    return img_masked
def overlay_on_gray(oimg,
                    overlayGREEN=None,
                    overlayRED=None,
                    overlayBLUE=None,
                    alpha=0.6):
    rows, cols = oimg.shape
    color_mask = np.zeros((rows, cols, 3))
    if overlayGREEN is not None:
        color_mask[:, :, 1] = overlayGREEN
    if overlayRED is not None:
        color_mask[:, :, 2] = overlayRED
    if overlayBLUE is not None:
        color_mask[:, :, 0] = overlayBLUE

    img_color = np.dstack((oimg, oimg, oimg))

    img_hsv = color.rgb2hsv(img_color)
    color_mask_hsv = color.rgb2hsv(color_mask)

    img_hsv[..., 0] = color_mask_hsv[..., 0]
    img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
    img_masked = color.hsv2rgb(img_hsv)

    return img_masked, color_mask
예제 #49
0
def match_background_hsv(foreground_img, background_img):
    """use hsv histogram matching to match the foreground contrast more closely to background

    :param foreground_img: ndimage 4 channel array
    :param background_img: ndimage
    :return: PIL RGBA image
    """

    foreground_img = as_ndarray(foreground_img)
    background_img = as_ndarray(background_img)
    n_bins = 255

    foreground_img_hsv = color.rgb2hsv(foreground_img[:, :, :3])
    background_img_hsv = color.rgb2hsv(background_img[:, :, :3])
    foreground_alpha = np.expand_dims(foreground_img[:, :, 3], 2)
    foreground_img_hsva = np.concatenate(
        (foreground_img_hsv, foreground_alpha), axis=2)

    matched = match_channels(foreground_img_hsva, background_img_hsv, n_bins)
    matched_rgb = color.hsv2rgb(matched[:, :, :3]) * 255
    matched[:, :, :3] = matched_rgb[:, :, :3]
    matched = Image.fromarray(matched.astype('uint8'))

    return matched
예제 #50
0
def create_image(model, x, y, r, z):
    '''
    create an image for the given latent vector z 
    '''
    # create input vector
    Z = np.repeat(z, x.shape[0]).reshape((-1,x.shape[0]))
    X = np.concatenate([x, y, r, Z.T], axis=1)

    pred = model.predict(X)
    
    img = []
    for k in range(pred.shape[1]):
        yp = pred[:, k]
#        if k == pred.shape[1]-1:
#            yp = np.sin(yp)
        yp = (yp - yp.min()) / (yp.max()-yp.min())
        img.append(yp.reshape(y_dim, x_dim))
        
    img = np.dstack(img)
    if img.shape[-1] == 3:
        from skimage.color import hsv2rgb
        img = hsv2rgb(img)
        
    return (img*255).astype(np.uint8)
예제 #51
0
    def hsv_method(img, n_clusters, n_colors):
        img = rgb2hsv(img)

        # make img array for kmeans
        X = img.reshape(-1, 3)

        n_clusters = n_clusters if n_clusters > n_colors else n_colors
        km = MiniBatchKMeans(n_clusters=n_clusters,
                             init='k-means++',
                             n_init=3,
                             random_state=0)
        labels = km.fit_predict(X)
        cluster_centers = km.cluster_centers_
        bincount = np.bincount(labels)

        # find index of data point closest to each center
        closest, _ = pairwise_distances_argmin_min(cluster_centers, X)

        # sort colors by frequency
        dominants = 1. * X[closest][np.argsort(bincount,
                                               axis=0)[::-1]][:n_colors]

        colors = [hsv2rgb([[x]])[0][0] for x in dominants]
        return colors
예제 #52
0
def Filter_syn(I, mode, w, x, y, z, start_time):

    # Preprocessing
    this_time = time.time()
    elapsed_time = this_time - start_time

    if mode == "random":
        w = ((elapsed_time * 1000) % 1000) * (1.5) / 1000  # above 1
        if w > 1.3:
            w = 1.3
        if w < 0.4:
            w = 0.4

    # Change Saturation
    hsv = color.rgb2hsv(I)
    h = (hsv[:, :, 0] / 360.0) * 255.0
    s = (hsv[:, :, 1] / 100.0) * 255.0 * (w)
    v = (hsv[:, :, 2] / 100.0) * 255.0

    r = (h / 255.0) * 360.0
    g = (s / 255.0) * 100.0
    b = (v / 255.0) * 100.0

    rgb = color.hsv2rgb(numpy.dstack([r, g, b]).astype(numpy.float64))

    # Change Color
    rgb[:, :, 0] = rgb[:, :, 0] * x
    rgb[:, :, 1] = rgb[:, :, 1] * y
    rgb[:, :, 2] = rgb[:, :, 2] * z

    # Post-processing
    rgb[:, :, 0] = numpy.clip(rgb[:, :, 0], 0, 1)
    rgb[:, :, 1] = numpy.clip(rgb[:, :, 1], 0, 1)
    rgb[:, :, 2] = numpy.clip(rgb[:, :, 2], 0, 1)

    return rgb
def satura(s, i):
    img = io.imread(i)
    hsv = color.rgb2hsv(img)
    rows, cols, dim = hsv.shape
    if s > 1:
        s = 1
    if s < 0:
        s = 0
    for i in range(rows):
        for j in range(cols):
            if s >= 0.5:
                hsv[i, j, 1] += (1 - hsv[i, j, 1]) * (s - 0.5) / 0.5
            else:
                hsv[i, j, 1] = hsv[i, j, 1] * s / 0.5
    new_img = color.hsv2rgb(hsv)
    plt.figure(5)
    plt.title('a' + '')
    plt.imshow(img)
    plt.axis('off')

    plt.figure(6)
    plt.title('b Change the satura of "a" to %f' % s)
    plt.imshow(new_img)
    plt.axis('off')
예제 #54
0
 def add_class_colour(self, image, pred, label):
     # load the image and add a mask in the colour of the corresponding predicted label, with transparency alpha.
     # then resize the image to make it an appropriate plot point size and append it to the image list.
     img = img_as_float(image)[0]
     rows, cols = img.shape
     color_mask = np.zeros((rows, cols, 3))
     color_mask[0:rows, 0:cols] = Visualization.colours[pred]
     img_color = np.dstack((img, img, img))
     img_hsv = color.rgb2hsv(img_color)
     color_mask_hsv = color.rgb2hsv(color_mask)
     img_hsv[..., 0] = color_mask_hsv[..., 0]
     img_hsv[..., 1] = color_mask_hsv[..., 1] * Visualization.alpha
     img_masked = color.hsv2rgb(img_hsv)
     img_small = resize(img_masked, (40, 40),
                        anti_aliasing=True,
                        mode='constant')
     #img_small.clip(min=0, out=img_small)
     if pred != label:
         img_small[:2, :] = [0.7, 0, 0]
         img_small[38:, :] = [0.7, 0, 0]
         img_small[:, :2] = [0.7, 0, 0]
         img_small[:, 38:] = [0.7, 0, 0]
     img_small.clip(min=0, max=1, out=img_small)
     Visualization.plot_image.append(img_small)
예제 #55
0
    def gen_components(self, image):
        pantie = np.array(image)
        # pickup colors
        front = pantie[20:100, 30:80, :3] / 255.0
        front_shade = pantie[130:150, 0:40, :3] / 255.0
        front_color = self.pick_color(front)
        front_shade_color = self.pick_color(front_shade)
        front_shade_color = rgb2hsv(front_shade_color[None, None])
        front_shade_color[0, 0, 1] *= front_shade_color[0, 0, 2] / 0.3
        if front_shade_color[0, 0, 1] > 0.7:
            front_shade_color[0, 0, 1] *= 0.7
        front_shade_color[0, 0, 2] *= front_shade_color[0, 0, 2] / 0.4
        front_shade_color = np.clip(hsv2rgb(front_shade_color)[0, 0], 0, 1)

        components = np.copy(self.components)
        components = self.components[:, :, :3] * front_color
        components_shade = (
            self.components_shade[:, :, -1])[:, :, None] * front_shade_color

        # overlaying layers
        components = alpha_brend(components_shade, components,
                                 self.components_shade[:, :, -1])
        components = np.dstack((components, self.components[:, :, -1]))
        return Image.fromarray(np.uint8(np.clip(components, 0, 1) * 255))
예제 #56
0
def make_overlay(lac, label, void=0, saturation=0.7, blend=0.6, colors=None):
    """ Make overlay image of lac and label """
    if not isinstance(void, (list, tuple)):
        void = [void]

    rgb_colors = [hex2rgb(h) for h in colors]

    img = normalize_ndarray(lac)
    # Construct a colour image for the labels
    color_mask = np.dstack([img] * 3)
    # replace labels with color
    labelindecies = [i for i in range(np.max(label) + 1) if i not in void]
    for i in labelindecies:
        color_mask[label == i] = rgb_colors[i]

    # Convert the color mask to Hue Saturation Value (HSV)
    color_mask_hsv = color.rgb2hsv(color_mask)

    # decrease the saturation by saturation
    # blend the value with LAC
    color_mask_hsv[..., 1] *= saturation
    color_mask_hsv[..., 2] = (1.0 - blend) * img + color_mask_hsv[..., 2] * blend

    return color.hsv2rgb(color_mask_hsv)
예제 #57
0
def flow_to_color(flow, mask=None, max_flow=None):
    """Converts flow to 3-channel color image.
    Args:
        flow: tensor of shape [num_batch, 2, height, width].
        mask: flow validity mask of shape [num_batch, 1, height, width].
    """

    n = 8
    B, _, H, W = flow.size()
    mask = torch.ones(B, 1, H, W, dtype=flow.dtype, device=flow.device) \
        if mask is None else mask

    flow_u, flow_v = torch.split(flow, 1, dim=1)

    if max_flow is not None:
        max_flow = torch.max(torch.tensor(max_flow), torch.tensor(1.0))
    else:
        max_flow = torch.max(torch.abs(flow * mask))

    mag = torch.pow(torch.sum(torch.pow(flow, 2), dim=1, keepdim=True), 0.5)
    angle = torch.atan2(flow_v, flow_u)

    im_h = torch.fmod(angle / (2 * np.pi) + 1.0, 1.0)
    im_s = torch.clamp(mag * n / max_flow, 0., 1.)
    im_v = torch.clamp(n - im_s, 0., 1.)

    im_hsv = torch.cat((im_h, im_s, im_v), dim=1)

    im_hsv = im_hsv.permute(0, 2, 3, 1)

    im_rgb = np.empty((B, H, W, 3))

    for i in range(B):
        im_rgb[i, :, :, :] = hsv2rgb(im_hsv[i, :, :, :].cpu().numpy())

    return torch.tensor(im_rgb, dtype=im_hsv.dtype).permute(0, 3, 1, 2)
예제 #58
0
    def preprocess_img(self, img):
        # save non processed image as local variable
        self.nonprocessed_image = img
        # Histogram normalization in y
        hsv = color.rgb2hsv(img)
        hsv[:, :, 2] = exposure.equalize_hist(
            hsv[:, :, 2]
        )  # equalize_hist(image, nbins=256, mask=None) : return image array as HSV : (Hue (degrees), saturation (%), value (%))
        img = color.hsv2rgb(hsv)

        if (self.debug):
            io.imsave("1_image_after_equalizehist.ppm", img)

        # central scrop
        min_side = min(img.shape[:-1])
        centre = img.shape[0] // 2, img.shape[1] // 2
        img = img[centre[0] - min_side // 2:centre[0] + min_side // 2,
                  centre[1] - min_side // 2:centre[1] + min_side // 2, :]

        if (self.debug):
            io.imsave("2_image_after_centralscrop.ppm", img)

        # rescale to standard size
        img = transform.resize(img, (IMG_SIZE, IMG_SIZE))

        if (self.debug):
            io.imsave("3_image_after_resize.ppm", img)

        # roll color axis (RGB) to axis 0 : NEW IMAGE : (RGB, X, Y) instead of (X, Y, RGB)
        img = np.rollaxis(img, -1)
        # add one "id" axis at the beginning of the image. Not useful with only one image but required in the trained model
        img = np.expand_dims(img, axis=0)
        # save processed image in local variable
        self.processed_img = img
        return img
        pass
예제 #59
0
def overlay_cam(img, cam, alpha, show=True, outFile=None):
    if len(cam) > 0:
        cam_heatmap = get_heatmap(cam)

        img = img.astype(float)
        img = img - np.min(img)
        img = img / np.max(img)

        # Construct RGB version of grey-level image
        img_color = np.dstack((img, img, img))

        # Convert the input image and color mask to Hue Saturation Value (HSV)
        # colorspace
        img_hsv = color.rgb2hsv(img_color)
        color_mask_hsv = color.rgb2hsv(cam_heatmap)

        # Replace the hue and saturation of the original image
        # with that of the color mask
        img_hsv[..., 0] = color_mask_hsv[..., 0]
        img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

        img_masked = color.hsv2rgb(img_hsv)

        if show:
            fig = plt.figure(figsize=(12, 16))
            ax = fig.add_subplot(222)
            plt.imshow(img_masked)
            ax.axis('off')
            ax.set_title('Grad-CAM')

            plt.show()

            if outFile:
                plt.savefig(outFile)

        return img_masked
예제 #60
0
파일: utils.py 프로젝트: bottydim/keras-vis
def get_overlayed_image(x, c, gray_factor_bg=0.3,alpha = 0.5):
    '''
    For an image x and a relevance vector c, overlay the image with the
    relevance vector to visualise the influence of the image pixels.

    From: https://github.com/lmzintgraf/DeepVis-PredDiff/blob/master/utils_visualise.py
    '''
    imDim = x.shape[0]

    if np.ndim(c) == 1:
        c = c.reshape((imDim, imDim))
    if np.ndim(x) == 2:  # this happens with the MNIST Data
        x = 1 - np.dstack((x, x, x)) * gray_factor_bg  # make it a bit grayish
    if np.ndim(x) == 3:  # this is what happens with cifar data
        x = color.rgb2gray(x)
        x = 1 - (1 - x) * 0.5
        x = np.dstack((x, x, x))



    # Construct a colour image to superimpose
    im = plt.imshow(c, cmap=cm.seismic, vmin=-np.max(np.abs(c)), vmax=np.max(np.abs(c)), interpolation='nearest')
    color_mask = im.to_rgba(c)[:, :, [0, 1, 2]]

    # Convert the input image and color mask to Hue Saturation Value (HSV) colorspace
    img_hsv = color.rgb2hsv(x)
    color_mask_hsv = color.rgb2hsv(color_mask)

    # Replace the hue and saturation of the original image
    # with that of the color mask
    img_hsv[..., 0] = color_mask_hsv[..., 0]
    img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha

    img_masked = color.hsv2rgb(img_hsv)

    return img_masked