Esempio n. 1
0
 def generate_color_range(self):
     # color and marker range:
     self.colorrange = []
     self.markerrange = []
     mr2 = []
     # first color range:
     cc0 = plt.cm.gist_rainbow(np.linspace(0.0, 1.0, 8.0))
     # shuffle it:
     for k in range((len(cc0) + 1) // 2):
         self.colorrange.extend(cc0[k::(len(cc0) + 1) // 2])
     self.markerrange.extend(len(cc0) * 'o')
     mr2.extend(len(cc0) * 'v')
     # second darker color range:
     cc1 = plt.cm.gist_rainbow(np.linspace(0.33 / 7.0, 1.0, 7.0))
     cc1 = mc.hsv_to_rgb(mc.rgb_to_hsv(np.array([cc1])) * np.array([1.0, 0.9, 0.7, 0.0]))[0]
     cc1[:, 3] = 1.0
     # shuffle it:
     for k in range((len(cc1) + 1) // 2):
         self.colorrange.extend(cc1[k::(len(cc1) + 1) // 2])
     self.markerrange.extend(len(cc1) * '^')
     mr2.extend(len(cc1) * '*')
     # third lighter color range:
     cc2 = plt.cm.gist_rainbow(np.linspace(0.67 / 6.0, 1.0, 6.0))
     cc2 = mc.hsv_to_rgb(mc.rgb_to_hsv(np.array([cc2])) * np.array([1.0, 0.5, 1.0, 0.0]))[0]
     cc2[:, 3] = 1.0
     # shuffle it:
     for k in range((len(cc2) + 1) // 2):
         self.colorrange.extend(cc2[k::(len(cc2) + 1) // 2])
     self.markerrange.extend(len(cc2) * 'D')
     mr2.extend(len(cc2) * 'x')
     self.markerrange.extend(mr2)
Esempio n. 2
0
def test_rgb_hsv_round_trip():
    for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
        np.random.seed(0)
        tt = np.random.random(a_shape)
        assert_array_almost_equal(tt,
            mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
        assert_array_almost_equal(tt,
            mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
Esempio n. 3
0
    def image(self, img):
        """Colorize an image. Input can either be a single image or a stack of images.
        In either case, the first dimension must be the quantity to be used for colorizing.

        Parameters
        ----------
        img : array
            The image to colorize. Must be of shape (c, x, y, z) or (c, x, y), where
            c is the dimension containing the information for colorizing.

        Returns
        -------
        out : array
            Color assignments for images, either (x, y, z, 3) or (x, y, 3)
        """

        d = shape(img)
        self.checkargs(d[0])

        if img.ndim > 4 or img.ndim < 3:
            raise Exception("image data must have 3 or 4 dimensions, first is for coloring, remainder are xy(z)")

        if (self.totype == 'rgb') or (self.totype == 'hsv'):
            out = abs(img) * self.scale
            if img.ndim == 4:
                out = transpose(out, (1, 2, 3, 0))
            if img.ndim == 3:
                out = transpose(out, (1, 2, 0))

        elif self.totype == 'polar':
            theta = ((arctan2(-img[0], -img[1]) + pi/2) % (pi*2)) / (2 * pi)
            rho = sqrt(img[0]**2 + img[1]**2)
            if img.ndim == 4:
                saturation = ones((d[1],d[2]))
                out = zeros((d[1], d[2], d[3], 3))
                for i in range(0, d[3]):
                    out[:, :, i, :] = colors.hsv_to_rgb(dstack((theta[:, :, i], saturation, self.scale*rho[:, :, i])))
            if img.ndim == 3:
                saturation = ones((d[1], d[2]))
                out = colors.hsv_to_rgb(dstack((theta, saturation, self.scale*rho)))

        else:
            out = cm.get_cmap(self.totype, 256)(img[0] * self.scale)
            if img.ndim == 4:
                out = out[:, :, :, 0:3]
            if img.ndim == 3:
                out = out[:, :, 0:3]

        return clip(out, 0, 1)
Esempio n. 4
0
def random_cmap(ncolors=256, random_state=None):
    """
    Generate a matplotlib colormap consisting of random (muted) colors.

    A random colormap is very useful for plotting segmentation images.

    Parameters
    ----------
    ncolors : int, optional
        The number of colors in the colormap.  The default is 256.

    random_state : int or `~numpy.random.RandomState`, optional
        The pseudo-random number generator state used for random
        sampling.  Separate function calls with the same
        ``random_state`` will generate the same colormap.

    Returns
    -------
    cmap : `matplotlib.colors.Colormap`
        The matplotlib colormap with random colors.
    """

    from matplotlib import colors

    prng = check_random_state(random_state)
    h = prng.uniform(low=0.0, high=1.0, size=ncolors)
    s = prng.uniform(low=0.2, high=0.7, size=ncolors)
    v = prng.uniform(low=0.5, high=1.0, size=ncolors)
    hsv = np.dstack((h, s, v))
    rgb = np.squeeze(colors.hsv_to_rgb(hsv))

    return colors.ListedColormap(rgb)
Esempio n. 5
0
def chord_idx_to_colors(idx, hue_offset=0, max_idx=156,
                        no_chord_idx=156, x_chord_idx=-1):
    """Transform a chord class index to an (R, G, B) color.

    Parameters
    ----------
    idx : array_like
        Chord class index.
    max_idx : int, default=156
        Maximum index, for color scaling purposes.
    no_chord_idx : int, default=156
        Index of the no-chord class.
    x_chord_idx : int, default=-1
        Index of the X-chord class (ignored).

    Returns
    -------
    colors : np.ndarray, shape=(1, len(idx), 3)
        Matrix of color values.
    """
    hue = ((idx + hue_offset) % 12) / 12.0
    value = 0.9 - 0.7*(((idx).astype(int) / 12) / (max_idx / 12.0))
    hsv = np.array([hue, (hue*0) + 0.6, value]).T

    hsv[idx == no_chord_idx, :] = np.array([0, 0.8, 0.0])
    hsv[idx == x_chord_idx, :] = np.array([0.0, 0.0, 0.5])
    return hsv_to_rgb(hsv.reshape(1, -1, 3))
Esempio n. 6
0
def compute_color_map():
    """Compute a default QM colormap which can be used as mayavi/vtk lookup table.
    """
    k = linspace(-pi, pi, 256, endpoint=True)
    hsv_colors = ones((1, k.shape[0], 3))
    hsv_colors[:, :, 0] = 0.5 * fmod(k + 2 * pi, 2 * pi) / pi
    return 255 * squeeze(hsv_to_rgb(hsv_colors))
Esempio n. 7
0
def draw_3d(verts, ymin, ymax, line_at_zero=True, colors=True):
    '''Given verts as a list of plots, each plot being a list
       of (x, y) vertices, generate a 3-d figure where each plot
       is shown as a translucent polygon.
       If line_at_zero, a line will be drawn through the zero point
       of each plot, otherwise the baseline will be at the bottom of
       the plot regardless of where the zero line is.
    '''
    # add_collection3d() wants a collection of closed polygons;
    # each polygon needs a base and won't generate it automatically.
    # So for each subplot, add a base at ymin.
    if line_at_zero:
        zeroline = 0
    else:
        zeroline = ymin
    for p in verts:
        p.insert(0, (p[0][0], zeroline))
        p.append((p[-1][0], zeroline))

    if colors:
        # All the matplotlib color sampling examples I can find,
        # like cm.rainbow/linspace, make adjacent colors similar,
        # the exact opposite of what most people would want.
        # So cycle hue manually.
        hue = 0
        huejump = .27
        facecolors = []
        edgecolors = []
        for v in verts:
            hue = (hue + huejump) % 1
            c = mcolors.hsv_to_rgb([hue, 1, 1])
                                    # random.uniform(.8, 1),
                                    # random.uniform(.7, 1)])
            edgecolors.append(c)
            # Make the facecolor translucent:
            facecolors.append(mcolors.to_rgba(c, alpha=.7))
    else:
        facecolors = (1, 1, 1, .8)
        edgecolors = (0, 0, 1, 1)

    poly = PolyCollection(verts,
                          facecolors=facecolors, edgecolors=edgecolors)

    zs = range(len(data))
    # zs = range(len(data)-1, -1, -1)

    fig = plt.figure()
    ax = fig.add_subplot(1,1,1, projection='3d')

    plt.tight_layout(pad=2.0, w_pad=10.0, h_pad=3.0)

    ax.add_collection3d(poly, zs=zs, zdir='y')

    ax.set_xlabel('X')
    ax.set_ylabel('Y')
    ax.set_zlabel('Z')

    ax.set_xlim3d(0, len(data[1]))
    ax.set_ylim3d(-1, len(data))
    ax.set_zlim3d(ymin, ymax)
Esempio n. 8
0
 def color_from_internal_spec(self, feat, label, hue, sat, val):
     geology_code = feat.GetField("LEGEND_ID")
     geology_hue = feat.GetField(hue)/256
     geology_sat = 0.25*feat.GetField(sat)/256
     geology_val = feat.GetField(val)/256
     mapcolor = colors.hsv_to_rgb([geology_hue,geology_sat,geology_val])
     return mapcolor        
Esempio n. 9
0
 def color_from_external_spec(self,feat):
     field_name = self.config['map']['field_name']
     field_value = feat.GetField(field_name)
     hsv = self.config['map'][field_value].split(",")
     hsv = [float(num) for num in hsv]
     mapcolor = colors.hsv_to_rgb(hsv) 
     return mapcolor
Esempio n. 10
0
def plot_clusters(ax, x, y, labels=None):
    ax.scatter(x, y, s=50,
               c='b' if labels is None else [hsv_to_rgb((l/(max(labels)+1), 1, 0.9)) for l in labels])
    ax.set_xticks([])
    ax.set_yticks([])
    ax.set_xlim(-3, 10)
    ax.set_ylim(-3, 10)
Esempio n. 11
0
def hsv_hist(filename):
    hsv = pl.hist_hsv(filename)

    fig, ax = plt.subplots(4, 1, figsize=(10, 5))

    types = ['hue', 'saturation', 'value']
    cl = ['r', 'g', 'b']

    V, H = np.mgrid[0.45:0.55:10j, 0:1:300j]
    S = np.ones_like(V)
    HSV = np.dstack((H, S, V))
    RGB = hsv_to_rgb(HSV)

    ax[0].imshow(RGB, origin="lower", extent=[0, 360, 0, 1], aspect=10)
    ax[0].xaxis.set_major_locator(plt.NullLocator())
    ax[0].yaxis.set_major_locator(plt.NullLocator())

    idx_array = np.arange(0, 256, 1)

    for i, t in enumerate(types):
        ax[i+1].fill_between(idx_array, 0, hsv[i, :], color=cl[i], label=t)
        ax[i+1].set_xlim(0, 255)
        ax[i+1].legend()

    plt.show()
Esempio n. 12
0
def plot(data, scatter=False, sample_axis=False, hsv=None, **args):
    data = np.array(data)
    if len(data.shape) > 1:
        dimensions = data.shape[1]
    else:
        dimensions = 1
        sample_axis = True
    if sample_axis:
        data = np.column_stack((range(0, len(data)), data))
        dimensions += 1        
    if dimensions == 3:
        _3d()
    f = ax.plot if not scatter else ax.scatter
    if hsv is not None:
        args['c'] = colors.hsv_to_rgb(hsv[:3])
        if len(hsv) == 4:
            args['alpha'] = hsv[3]
    if 'label' in args:
        global legend
        legend = True
        args['label'] = args['label'].upper()
    if dimensions == 2:
        f(data[:,0], data[:,1], **args)
    if dimensions == 3:
        f(data[:,0], data[:,1], data[:,2], **args)
Esempio n. 13
0
def shadow_filter(image, dpi):
    """This filter creates a metallic look on patches.

    image : the image of the patch
    dpi   : the resultion of the patch"""
    # Get the shape of the image
    nx, ny, depth = image.shape
    # Create a mash grid
    xx, yy = np.mgrid[0:nx, 0:ny]
    # Draw a circular "shadow"
    circle = (xx + nx * 4) ** 2 + (yy + ny) ** 2
    # Normalize
    circle -= circle.min()
    circle = circle / circle.max()
    # Steepness
    value = circle.clip(0.3, 0.6) + 0.4
    saturation = 1 - circle.clip(0.7, 0.8)
    # Normalize
    saturation -= saturation.min() - 0.1
    saturation = saturation / saturation.max()
    # Convert the rgb part (without alpha) to hsv
    hsv = mc.rgb_to_hsv(image[:, :, :3])
    # Multiply the value of hsv image with the shadow
    hsv[:, :, 2] = hsv[:, :, 2] * value
    # Highlights with saturation
    hsv[:, :, 1] = hsv[:, :, 1] * saturation
    # Copy the hsv back into the image (we haven't touched alpha)
    image[:, :, :3] = mc.hsv_to_rgb(hsv)
    # the return values are: new_image, offset_x, offset_y
    return image, 0, 0
Esempio n. 14
0
    def plot_candidates(self):
        """Plot a representation of candidate periodicity

        Size gives the periodicity strength, 
        color the order of preference
        """

        fig, ax = pl.subplots(2, sharex=True)

        hues = np.arange(self.ncand)/float(self.ncand)
        hsv = np.swapaxes(np.atleast_3d([[hues, np.ones(len(hues)),
                                          np.ones(len(hues))]]), 1, 2)
        cols = hsv_to_rgb(hsv).squeeze()

        for per in self.periods:
            nc = len(per.cand_period)

            ax[0].scatter(per.time*np.ones(nc), per.cand_period,
                          s=per.cand_strength*100,
                          c=cols[0:nc], alpha=.5)

        ax[0].plot(*zip(*[[per.time, float(per.get_preferred_period())]
                        for per in self.periods]), color='k')

        ax[1].plot(self.get_times(), self.get_strength())
Esempio n. 15
0
def plotDataPoints(X, idx, K):
    V = H = np.linspace(0, 1, K).reshape((-1, 1))
    S = np.ones_like(V)
    HSV = np.hstack((H, S, V))
    RGB = hsv_to_rgb(HSV)
    colors = np.array([RGB[int(i[0])] for i in idx])
    scatter(X[:, 0], X[:, 1], s=np.pi * 5 ** 2, alpha=0.1, c=colors)
Esempio n. 16
0
def flow_visualize(flow, mode='Y'):
    if mode == 'Y':
        # Ccbcr color wheel
        img = fl.flow_to_image(flow)
        plt.imshow(img)
        plt.show()
    elif mode == 'RGB':
        (h, w) = flow.shape[0:2]
        du = flow[:, :, 0]
        dv = flow[:, :, 1]
        valid = flow[:, :, 2]
        max_flow = max(np.max(du), np.max(dv))
        img = np.zeros((h, w, 3), dtype=np.float64)
        # angle layer
        img[:, :, 0] = np.arctan2(dv, du) / (2 * np.pi)
        # magnitude layer, normalized to 1
        img[:, :, 1] = np.sqrt(du * du + dv * dv) * 8 / max_flow
        # phase layer
        img[:, :, 2] = 8 - img[:, :, 1]
        # clip to [0,1]
        small_idx = img[:, :, 0:3] < 0
        large_idx = img[:, :, 0:3] > 1
        img[small_idx] = 0
        img[large_idx] = 1
        # convert to rgb
        img = cl.hsv_to_rgb(img)
        # remove invalid point
        img[:, :, 0] = img[:, :, 0] * valid
        img[:, :, 1] = img[:, :, 1] * valid
        img[:, :, 2] = img[:, :, 2] * valid
        # show
        plt.imshow(img)
        plt.show()

    return
Esempio n. 17
0
def imgcolor(arr ,normal = True, 
             #spectrum =True, 
             BW = False,
             alpha = False,
             color = [1,0,0]
             ):
    acopy = array(arr)
    if normal:
        acopy -= np.min(acopy)
        acopy /= np.max(acopy)
    

        
    if BW:
        cell = array([1,1,1])
        a3d = array(acopy[:,:,newaxis] * cell)
        return a3d
    elif alpha:
        cell = array([0,0,0,1])
        a4d = array(acopy[:,:,newaxis] * cell)
        cell2 = concatenate((color,[0]))
        a4d = a4d + cell2
        return a4d
    else:
        #Assume spectrum....
        a3d = array([[array([j,1,1]) for j in i] for i in acopy])
        rgb = mplcolors.hsv_to_rgb(a3d)
        return rgb
Esempio n. 18
0
def plot_data_points(X, centroid_indices, num_centroids):
    """ Plots input data with colors according to current cluster assignments.

    Args:
      X: Matrix of data features.
      centroid_indices: Vector where each entry contains index of closest 
                        centroid to corresponding example.
      num_centroids: Number of centroids.

    Returns:
      None.

    Raises:
      An error occurs if the number of data examples is 0.
    """
    num_data = X.shape[0]
    if (num_data == 0): raise Error('num_data == 0')
    palette = numpy.zeros((num_centroids+1, 3))
    for centroid_idx in range(0, num_centroids+1):
        hsv_h = centroid_idx/(num_centroids+1)
        hsv_s = 1
        hsv_v = 1
        palette[centroid_idx, :] = colors.hsv_to_rgb(numpy.r_[hsv_h, hsv_s,
                                                              hsv_v])
    curr_colors = numpy.zeros((num_data, 3))
    for data_idx in range(0, num_data):
        curr_centroid_idx = centroid_indices[data_idx].astype(int)
        curr_colors[curr_centroid_idx, 0] = palette[curr_centroid_idx, 0]
        curr_colors[curr_centroid_idx, 1] = palette[curr_centroid_idx, 1]
        curr_colors[curr_centroid_idx, 2] = palette[curr_centroid_idx, 2]
        pyplot.scatter(X[data_idx, 0], X[data_idx, 1], s=80, marker='o',
                       facecolors='none',
                       edgecolors=curr_colors[curr_centroid_idx, :])
    return None
Esempio n. 19
0
def colormap(x, col0=None, col1=None):
    """Colorize a 2D grayscale array.
    
    Arguments: 
      * x:an NxM array with values in [0,1].
      * col0=None: a tuple (H, S, V) corresponding to color 0. By default, a
        rainbow color gradient is used.
      * col1=None: a tuple (H, S, V) corresponding to color 1.
    
    Returns:
      * y: an NxMx3 array with a rainbow color palette.
    
    """
    x = np.clip(x, 0., 1.)
    
    shape = x.shape
    
    if col0 is None:
        col0 = (.67, .91, .65)
    if col1 is None:
        col1 = (0., 1., 1.)
    
    col0 = np.array(col0).reshape((1, 1, -1))
    col1 = np.array(col1).reshape((1, 1, -1))
    
    col0 = np.tile(col0, x.shape + (1,))
    col1 = np.tile(col1, x.shape + (1,))
    
    x = np.tile(x.reshape(shape + (1,)), (1, 1, 3))
    
    return hsv_to_rgb(col0 + (col1 - col0) * x)
Esempio n. 20
0
File: _color.py Progetto: ablot/phy
def _random_color(h_range=(0., 1.),
                  s_range=(.5, 1.),
                  v_range=(.5, 1.),
                  ):
    """Generate a random RGB color."""
    h, s, v = uniform(*h_range), uniform(*s_range), uniform(*v_range)
    r, g, b = hsv_to_rgb(np.array([h, s, v])).flat
    return r, g, b
Esempio n. 21
0
def N_colors(N, Srange=(.5, 1), Vrange=(.5, 1)):
    """returns N unique rgb colors for plotting,
    chosen via maximal hue distance in HSV space"""
    H = np.linspace(0, 1-1./N, N)
    S = np.random.rand(N)*(Srange[1]-Srange[0]) + Srange[0]
    V = np.random.rand(N)*(Vrange[1]-Vrange[0]) + Vrange[0]
    HSV = np.dstack((H,S,V))
    return hsv_to_rgb(HSV)
Esempio n. 22
0
def hsv_to_rgb_tuple(hsv_tuple):
    """
    Convert 3 tuple that represents a HSV color 
    to a 3 tuple in RGB color space (values between 0..1).
    If you have an array of color values use: ``matplotlib.colors.hsv_to_rgb``.
    """ 
    colarr = hsv_to_rgb(np.array([[hsv_tuple]]))
    return tuple(colarr[0, 0, :])
Esempio n. 23
0
def angle2color(angle):
    """
    Convert angle in degree [0:360] (float) to color in RGB.
    """
    from matplotlib import colors
    sat = 1.0
    val = 0.8
    hue = angle/360
    return colors.hsv_to_rgb((hue,sat,val))
def hsv(n=63):
    """
    adapted from Octave's (NOT matlab's) hsv(N) function
      which is equivalent to matlab's hsv2rgb([(0:N-1)'/N, ones(N,2)])
      from: http://octave.sourceforge.net/octave/function/hsv.html
            http://stackoverflow.com/q/23945764/583834
    """

    # return colors.hsv_to_rgb( np.column_stack([ np.array(range(n+1)).T / float(n), np.ones( ((n+1), 2) ) ]) )
    return colors.hsv_to_rgb( np.column_stack([ np.linspace(0, 1, n+1)            , np.ones( ((n+1), 2) ) ]) )
Esempio n. 25
0
	def get_colour(self):

		hsv_colors = numpy.empty((1, 1, 3))

		hsv_colors[:, :, 0] = self.get_normalized_weight()
		hsv_colors[:, :, 1] = 1.0
		hsv_colors[:, :, 2] = 0.75

		(rgb_colors,) = hsv_to_rgb(hsv_colors)

		return rgb_colors[0]
Esempio n. 26
0
File: _color.py Progetto: ablot/phy
def _apply_color_masks(color, masks=None, alpha=None):
    alpha = alpha or .5
    hsv = rgb_to_hsv(color[:, :3])
    # Change the saturation and value as a function of the mask.
    if masks is not None:
        hsv[:, 1] *= masks
        hsv[:, 2] *= .5 * (1. + masks)
    color = hsv_to_rgb(hsv)
    n = color.shape[0]
    color = np.c_[color, alpha * np.ones((n, 1))]
    return color
Esempio n. 27
0
 def create_brightness_colormap(self,principal_rgb_color, scale_size):
     '''
     Create brightness colormap based on one principal RGB color
     '''
     if np.any(principal_rgb_color > 1):
         raise Exception('principal_rgb_color values should  be in range [0,1]')
     hsv_color = colors.rgb_to_hsv(principal_rgb_color)
     hsv_colormap = np.concatenate((np.tile(hsv_color[:-1][None, :], (scale_size, 1))[:],
                                    np.linspace(0, 1, scale_size)[:, None]),
                                   axis=1)
     self.colormap=self.array2cmap(colors.hsv_to_rgb(hsv_colormap))
Esempio n. 28
0
def _domain_map(z, satu, mapType=0):
    """domain color the array `z`, with the mapping
    type `mapType`, using saturation `s`. Currently
    there is only one domain coloring type
    """
    h = _hue(z)
    s = satu*_np.ones_like(h, _np.float)
    v = _absolute_map(_np.absolute(z))
    hsv_map = _np.dstack((h, s, v))
    rgb_map = _mplc.hsv_to_rgb(hsv_map)
    return rgb_map
Esempio n. 29
0
def get_color_scheme(base_color, num=4, spread=1.):
    """ Distributes num colors around the color wheel starting with a base
    color and converting the fraction `spread` of the circle """
    base_rgb = mclr.colorConverter.to_rgb(base_color)
    base_rgb = np.reshape(np.array(base_rgb), (1, 1, 3))
    base_hsv = mclr.rgb_to_hsv(base_rgb)[0, 0]
    res_hsv = np.array([[
        ((base_hsv[0] + dh) % 1., base_hsv[1], base_hsv[2])
        for dh in np.linspace(-0.5*spread, 0.5*spread, num, endpoint=False)
    ]])
    return mclr.hsv_to_rgb(res_hsv)[0]
def desaturate(image):
    """
    Desaturate, or Greyscale a color image
    :param image: 3D ndarray
    :return: 2D array
    """
    greyscale = colors.rgb_to_hsv(image)
    greyscale[:, :, 1] = 0  # Desaturate the image
    greyscale = colors.hsv_to_rgb(greyscale)
    greyscale = greyscale[:, :, 0]  # 3D array to 2D
    return greyscale
Esempio n. 31
0
def get_random_data(annotation_line,
                    input_shape,
                    random=True,
                    max_boxes=20,
                    jitter=.3,
                    hue=.1,
                    sat=1.5,
                    val=1.5,
                    proc_img=True):
    '''random preprocessing for real-time data augmentation
    '''
    """
    将annotation_line按空格分割为line列表;
    使用PIL读取图片image;
    图片的宽和高,iw和ih;
    输入尺寸的高和宽,h和w;
    图片中的标注框box,box是5维,4个点和1个类别;
    """
    line = annotation_line.split()
    image = Image.open(line[0])
    iw, ih = image.size
    h, w = input_shape
    box = np.array(
        [np.array(list(map(int, box.split(',')))) for box in line[1:]])

    if not random:
        """
        如果是非随机,即if not random
        将图片等比例转换为416x416的图片,其余用灰色填充,即(128, 128, 128),
        同时颜色值转换为0~1之间,即每个颜色值除以255;
        将边界框box等比例缩小,再加上填充的偏移量dx和dy,
        因为新的图片部分用灰色填充,影响box的坐标系,box最多有max_boxes个,即20个。
        """
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.

        # correct boxes
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes: box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

    # resize image
    """
    如果是随机:
    通过jitter参数,随机计算new_ar和scale,生成新的nh和nw,
    将原始图像随机转换为nw和nh尺寸的图像,即非等比例变换图像
    """
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(
        1 - jitter, 1 + jitter)
    scale = rand(.25, 2)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    # 将变换后的图像,转换为416x416的图像,其余部分用灰色值填充。
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    # 根据随机数flip,随机左右翻转FLIP_LEFT_RIGHT图片。
    flip = rand() < .5
    if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    """
    在HSV坐标域中,改变图片的颜色范围,hue值相加,sat和vat相乘,
    先由RGB转为HSV,再由HSV转为RGB,添加若干错误判断,避免范围过大。
    """
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
    x = rgb_to_hsv(np.array(image) / 255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # correct boxes
    """
    将所有的图片变换,增加至检测框中,
    并且包含若干异常处理,避免变换之后的值过大或过小,去除异常的box
    """
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
        if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes: box = box[:max_boxes]
        box_data[:len(box)] = box
    """
    返回图像数据image_data和边框数据box_data。
    box的4个值是(xmin, ymin, xmax, ymax),
    第5位不变,是标注框的类别,如0~n
    """
    return image_data, box_data
Esempio n. 32
0
def data_visualisation( nb_clust, mask_type, dct_patch, IMG_SIZE, LOD, img_id, terminaison='', namefolder='./',suffixe='', nClassest=2 ):


    # print('Total time in seconds:', interval)

    term_score = {}
    class_color = 0
    fig, ax = plt.subplots()

    mask_type = load_annotations("./annoWeird.csv", "./anno.csv", img_id, nb_clust, ax)

    pred = []
    real = []
    classesorder = []
    for key, ptch in dct_patch.items():

        try:
            colour = ptch.colour
            if colour != -1 :
                sh = shapely.geometry.Polygon(
                    [(ptch.column * ptch.size * LOD, IMG_SIZE[1] * LOD - ptch.row * ptch.size * LOD),
                     (ptch.column * ptch.size * LOD + ptch.size * LOD, IMG_SIZE[1] * LOD - ptch.row * ptch.size * LOD),
                     (ptch.column * ptch.size * LOD + ptch.size * LOD,
                      IMG_SIZE[1] * LOD - ptch.row * ptch.size * LOD + ptch.size * LOD),
                     (ptch.column * ptch.size * LOD, IMG_SIZE[1] * LOD - ptch.row * ptch.size * LOD + ptch.size * LOD)])
                for i, j in mask_type.items():
                    if j["WKT"].contains(sh):  # j["WKT"].intersects(sh): #
                        if j["Term"] not in term_score:
                            term_score[j["Term"]] = {}
                            term_score[j["Term"]]["Predicted"] = []
                            term_score[j["Term"]]["Patches"] = []
                            term_score[j["Term"]]["Real"] = class_color
                            classesorder.append(j["Term"])
                            class_color += 1
                        ax.add_patch(PolygonPatch(sh, color=hsv_to_rgb( [colour/ nb_clust, 1, 1])))
                        j["Clust"][colour].append(colour)
                        term_score[j["Term"]]["Predicted"].append(colour)
                        term_score[j["Term"]]["Patches"].append(key)
                        pred.append(colour)
                        real.append(term_score[j["Term"]]["Real"])
        except KeyError:
            pass

    # just show the
    fig.set_size_inches(30, 15)
    plt.title("coloration des zones déjà annotées" + terminaison)
    plt.savefig(namefolder + "coloration_zones_annotées_" + terminaison + suffixe + ".png", format="png")
    # ax.cla()

    print("Pour", nb_clust, "clusters \nARI=", adjusted_mutual_info_score(real, pred), "\nNMI=",
          normalized_mutual_info_score(real, pred), "\nhomogenitiy=", )
    predt=[]
    rt=[]
    for i in range(len(pred)):
        if real[i] == term_score["tumor"]["Real"]:
            predt.append(pred[i])
            rt.append(term_score["tumor"]["Real"])
    #True positive False_Positive for tumor


    maxs=[]

    for i in range(nb_clust):
      if i in pred:
        q=(predt.count(i)/pred.count(i))
        if q>0.5:
               maxs.append(i)

    fsc, sens, spec = 0 ,0 ,0
    if maxs:
        cptf=0
        cptt=0
        for i in maxs:
            cptt += predt.count(i)
            cptf += pred.count(i)
        print(maxs)
        print(cptt)
        print(len(predt))
        vp = cptt
        fp = cptf - cptt
        vn = len(pred) - len(predt) - cptf + cptt
        fn = len(predt) - cptt

        spec = vn / (vn + fp)
        sens = vp / (vp + fn)
        fsc =vp/(vp+(fp+fn)/2)

        print("vrai positif:", vp)
        print("faux positif:", fp )
        print("vrai négatif:", vn )
        print("faux négatif:",fn)
        print("sensibilité:", sens)
        print("spécificité:", spec)
        print("fscore:", fsc)





    classesorder ={}
    for k, i in term_score.items():
        classesorder[len(i["Predicted"])] = k
        print(k, len(i["Predicted"]))




    fig, ax = plt.subplots()
    fig.set_size_inches(20, 20)
    confusion_matrix( pred, real, class_color, nb_clust, ax, list(classesorder.values()))

    plt.title("Matrice de confusion" + terminaison)
    plt.savefig(namefolder + "Matrice_confusion_" + terminaison +suffixe+ ".png", format="png")
    ax.cla()

    nb_square = 100
    a = [(j["Term"], j) for i, j in mask_type.items()]

    rms = []
    for i, j in a:
        rm = True
        for b in j["Clust"]:
            if b:
                rm = False
                break
        if rm:
            rms.append((i, j))

    for r in rms:
        a.remove(r)
    a = sample(a, k=min(nb_square, len(a)))
    a = sorted(a, key=lambda x: x[0])

    nb_lines = 5

    x = 1
    fig, ax = plt.subplots()
    for i, j in a:
        ax = plt.subplot(nb_lines, nb_square // nb_lines, x)
        ax.set_title(j["Term"])
        draw_square_xy(j["Clust"], ax)
        ax.axis([0, 10, 0, 10])
        ax.axis("off")
        x += 1
    fig.set_size_inches(30, 15)
    plt.savefig(namefolder + "coloration_carrés_annotées" + terminaison + suffixe+ ".png", format="png")
    plt.close('all')
    return adjusted_mutual_info_score(real, pred), fsc
Esempio n. 33
0
import numpy as np
from matplotlib.colors import hsv_to_rgb

# colors
Ivory = hsv_to_rgb((47 / 360, 0.06, 1.00))
Red = hsv_to_rgb((14 / 360, 1.00, 0.85))
LightGreyGreen = hsv_to_rgb((150 / 360, 0.24, 0.66))
DarkerGreen = hsv_to_rgb((170 / 360, 0.84, 0.38))
DarkGreyBlue = hsv_to_rgb((193 / 360, 0.87, 0.21))


# system of ODEs and their solution
def f(t, z):
    """
    RHS of equation dz/dt = f(t,z)
    """
    # this makes an array of length 2, each element of which is zero
    dzdt = np.zeros(2)

    dzdt[0] = 2.0 * np.pi * z[1]
    dzdt[1] = -2.0 * np.pi * z[0]
    return dzdt


def sol(t):
    # this allows us to generate solution if x is a scalar
    # could probably do it better with some kind of spread function
    t = np.atleast_1d(t)
    z = np.zeros(shape=(2, t.size))
    z[0] = np.sin(2.0 * np.pi * t)
    z[1] = np.cos(2.0 * np.pi * t)
Esempio n. 34
0
File: HSSA.py Progetto: HSIYJND/HSSA
    def png(self, title=False, labels=False, segments=False):
        # Join FLR-s
        union = self.heterogenous + self.homogenous

        # Generate title if not provided.
        if not title:
            title = 'hssa_i%i_t%.0f.png' % (self.iteration,
                                            1000 * self.threshold)

        # Establish base resolution on a iterated power of 2.
        base = pow(2, self.iteration)
        img = np.ones((base, base, 3))

        if len(union):
            # Scale intensivity according to values in FLR-s.
            minN = min(union, key=attrgetter('intensity')).intensity
            maxN = max(union, key=attrgetter('intensity')).intensity

            # Iterate every frame
            for frame in union:
                amount = pow(2, frame.fold)
                length = base / amount
                intensity = (frame.intensity - minN) / (maxN - minN)
                hue = 0
                if frame.isHomo:
                    if labels:
                        hue = frame.label / float(self.hs.maxlabel)
                    else:
                        hue = frame.segment / 17.
                        if self.segments > 17:
                            hue = frame.segment / float(self.segments)
                x = frame.location % amount
                y = frame.location / amount
                for i in xrange(length):
                    for j in xrange(length):
                        if frame.isHomo:
                            # Homogeniczna poetykietowana
                            if labels:
                                img[length * x + i, length * y + j] = \
                                    colors.hsv_to_rgb([hue, .75, .75])
                            # Homogeniczna nieetykietowana
                            else:
                                if segments:
                                    # Homogeniczna z segmentami
                                    img[length * x + i, length * y + j] = \
                                        colors.hsv_to_rgb([hue, 1, .5 + intensity / 4])

                                else:
                                    # Homogeniczna naga
                                    img[length * x + i, length * y + j] = \
                                        colors.hsv_to_rgb([0, 0, .1 + intensity / 4])
                        else:
                            # Heterogeniczna
                            img[length * x + i, length * y + j] = \
                                colors.hsv_to_rgb([0, 0, .75 + intensity/4])

        # Plot
        plt.imshow(img, interpolation="nearest")
        plt.axis('off')
        #'''
        plt.title(
            '%s image, iteration %i, t = %.3f\n%i h**o / %i hetero / %i segments'
            % (self.hs.name, self.iteration, self.threshold,
               len(self.homogenous), len(self.heterogenous), self.segments))
        #'''
        plt.savefig(title)
Esempio n. 35
0
    def get_random_data(self,
                        annotation_line,
                        input_shape,
                        jitter=.1,
                        hue=.1,
                        sat=1.1,
                        val=1.1):
        '''r实时数据增强的随机预处理'''
        line = annotation_line.split()
        image = Image.open(line[0])
        iw, ih = image.size
        h, w = input_shape
        box = np.array(
            [np.array(list(map(int, box.split(',')))) for box in line[1:]])

        # resize image
        new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(
            1 - jitter, 1 + jitter)
        scale = rand(.25, 2)
        if new_ar < 1:
            nh = int(scale * h)
            nw = int(nh * new_ar)
        else:
            nw = int(scale * w)
            nh = int(nw / new_ar)
        image = image.resize((nw, nh), Image.BICUBIC)

        # place image
        dx = int(rand(0, w - nw))
        dy = int(rand(0, h - nh))
        new_image = Image.new('RGB', (w, h), (128, 128, 128))
        new_image.paste(image, (dx, dy))
        image = new_image

        # flip image or not
        flip = rand() < .5
        if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)

        # distort image
        hue = rand(-hue, hue)
        sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
        val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
        x = rgb_to_hsv(np.array(image) / 255.)
        x[..., 0] += hue
        x[..., 0][x[..., 0] > 1] -= 1
        x[..., 0][x[..., 0] < 0] += 1
        x[..., 1] *= sat
        x[..., 2] *= val
        x[x > 1] = 1
        x[x < 0] = 0
        image_data = hsv_to_rgb(x) * 255  # numpy array, 0 to 1

        # correct boxes
        box_data = np.zeros((len(box), 5))
        if len(box) > 0:
            np.random.shuffle(box)
            box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
            box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
            if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
            box[:, 0:2][box[:, 0:2] < 0] = 0
            box[:, 2][box[:, 2] > w] = w
            box[:, 3][box[:, 3] > h] = h
            box_w = box[:, 2] - box[:, 0]
            box_h = box[:, 3] - box[:, 1]
            box = box[np.logical_and(box_w > 1,
                                     box_h > 1)]  # discard invalid box
            box_data = np.zeros((len(box), 5))
            box_data[:len(box)] = box
        if len(box) == 0:
            return image_data, []

        if (box_data[:, :4] > 0).any():
            return image_data, box_data
        else:
            return image_data, []
Esempio n. 36
0
def hsv2rgb(cols):
    cols = cols[:,np.newaxis]
    cols = np.concatenate((cols, np.ones_like(cols), np.ones_like(cols)), axis=-1)
    cols = (255 * hsv_to_rgb(cols)).astype(np.uint8)
    return cols
Esempio n. 37
0
def banas_hsv_cm(a,b,c,d,N=100):
	norm = Normalize(vmin=a,vmax=d,clip=False)
	cdict = {'red':[],'green':[],'blue':[]}
	if N >= 100:
		n = N
	else:
		n = 100
	aa = norm(a) # 0.0
	bb = norm(b)
	cc = norm(c)
	yy = 0.5*(bb+cc) # yellow is half way between blue and red
	dd = norm(d) # 1.0
	center_value = 0.87
	end_value = 0.65
	tail_end_value = 0.3
	
	blue_hue = 0.55
	yellow_hue = 1./6.
	red_hue = 0.04
	green_hue = 1./3.
	
	gg = ((green_hue - blue_hue)/(yellow_hue - blue_hue))*(yy-bb) + bb
	green_desaturation_width = 0.67
	green_desaturation_amount = 0.5
	
	ii = np.linspace(0.,1.,n)
	hue = np.zeros(ii.shape)
	sat = np.ones(ii.shape)
	val = np.zeros(ii.shape)
	hsv = np.zeros((1,n,3))
	
	val_scaler = -(center_value - end_value)/((cc-yy)*(cc-yy))
	hue_scaler = -(blue_hue - yellow_hue)/((yy-bb)*(yy-bb))
	
	for i in range(len(ii)):
		if ii[i] < bb: # if true then aa is less than bb
			#hue[i] = blue_hue
			hsv[0,i,0] = blue_hue
			#val[i] = tail_end_value*(1 - (ii[i]-aa)/(bb-aa) ) + end_value*( (ii[i]-aa)/(bb-aa) )
			hsv[0,i,2] = tail_end_value*(1 - (ii[i]-aa)/(bb-aa) ) + end_value*( (ii[i]-aa)/(bb-aa) )
		elif ii[i] <= yy:
			#hsv[0,i,0] = blue_hue*(1 - (ii[i]-bb)/(yy-bb) ) + yellow_hue*( (ii[i]-bb)/(yy-bb) )
			hsv[0,i,0] = hue_scaler*(ii[i] -2*bb + yy)*(ii[i] - yy)+yellow_hue
			hsv[0,i,2] = end_value*(1 - (ii[i]-bb)/(yy-bb) ) + center_value*( (ii[i]-bb)/(yy-bb) )
		elif ii[i] <= cc:
			hsv[0,i,0] = yellow_hue*(1 - (ii[i]-yy)/(cc-yy) ) + red_hue*( (ii[i]-yy)/(cc-yy) )
			#hsv[0,i,2] = center_value*(1 - (ii[i]-yy)/(cc-yy) ) + end_value*( (ii[i]-yy)/(cc-yy) )
			hsv[0,i,2] = val_scaler*(ii[i] -2*yy + cc)*(ii[i] - cc)+end_value
		elif ii[i] <= dd:
			hsv[0,i,0] = red_hue
			hsv[0,i,2] = end_value*(1 - (ii[i]-cc)/(dd-cc) ) + tail_end_value*( (ii[i]-cc)/(dd-cc) )
		hsv[0,i,1] = 1.0 - green_desaturation_amount * np.exp(-np.power(3.0*(ii[i]-gg)/((cc-bb)*green_desaturation_width),2.0))
	

#	plt.plot(np.linspace(a,d,n),hsv[0,:,0],'r',np.linspace(a,d,n),hsv[0,:,1],'g',np.linspace(a,d,n),hsv[0,:,2],'b')
#	plt.show()
	
	
	rgb = hsv_to_rgb(hsv)
	cdict['red'].append((0.,0.,rgb[0,0,0]))
	cdict['green'].append((0.,0.,rgb[0,0,1]))
	cdict['blue'].append((0.,0.,rgb[0,0,2]))
	
	for j in range(len(ii)-2):
		i = j+1
		cdict['red'].append((ii[i],rgb[0,i,0],rgb[0,i+1,0]))
		cdict['green'].append((ii[i],rgb[0,i,1],rgb[0,i+1,1]))
		cdict['blue'].append((ii[i],rgb[0,i,2],rgb[0,i+1,2]))

	cdict['red'].append((1.0,rgb[0,-1,0],rgb[0,-1,0]))
	cdict['green'].append((1.0,rgb[0,-1,1],rgb[0,-1,1]))
	cdict['blue'].append((1.0,rgb[0,-1,2],rgb[0,-1,2]))
	
	return LinearSegmentedColormap('banas_cm',cdict,N=N)
Esempio n. 38
0
def get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5,
                    proc_img=True):
    """random preprocessing for real-time data augmentation"""
    line = annotation_line.split()
    image = Image.open(line[0])
    iw, ih = image.size
    h, w = input_shape
    box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])

    if not random:
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.

        # correct boxes
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes: box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[:len(box)] = box

        return image_data, box_data

    # resize image
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)
    scale = rand(.25, 2)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < .5
    if flip:
        image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
    x = rgb_to_hsv(np.array(image) / 255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # correct boxes
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
        if flip: box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes: box = box[:max_boxes]
        box_data[:len(box)] = box

    return image_data, box_data

def RiemannSphere(z):
    #mapping to Riemann Sphere via stereographic projection
    t = 1 + z.real**2 + z.imag**2
    return 2 * z.real / t, 2 * z.imag / t, 2 / t - 1


def Mobius(z):
    #distort the result image by a mobius transformation
    return (z - 20) / (3 * z + 1j)


x, y = np.ogrid[-5:5:800j, -5:5:800j]
z = x + y * 1j
z = RiemannSphere(Klein(Mobius(Klein(z))))

H = np.sin(z[0] * np.pi)**2
S = np.cos(z[1] * np.pi)**2
V = abs(np.sin(z[2] * np.pi) * np.cos(z[2] * np.pi))**0.2

HSV = np.dstack((H, S, V))
RGBImage = hsv_to_rgb(HSV)

fig = plt.figure(figsize=(4, 4))
ax = fig.add_axes([0, 0, 1, 1], aspect=1)
ax.axis('off')
plt.imshow(RGBImage)
#plt.show()
plt.savefig('Icosa_Symmetry.png')
Esempio n. 40
0
def get_random_data(annotation_line,
                    input_shape,
                    random=True,
                    max_boxes=80,
                    jitter=.1,
                    hue=.1,
                    sat=1.5,
                    val=1.5,
                    proc_img=True):
    '''random preprocessing for real-time data augmentation'''
    #print(annotation_line)
    line = annotation_line
    #line = annotation_line.split()

    #for element in range(1, len(line)):
    #    # TODO: Udelat check na pripad, kdy je v souboru vice vrcholu, nez mame max vertices
    #    for symbol in range(line[element].count(',') - 4, MAX_VERTICES * 2, 2):
    #        line[element] = line[element] + ',0,0'

    image = Image.open(line[0])
    iw, ih = image.size
    h, w = input_shape
    # rozseka radek v label textaku prve na boxy a potom samotne hodnoty boxu podle carky
    box = np.array(
        [np.array(list(map(float, box.split(',')))) for box in line[1:]])

    if not random:
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new('RGB', (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.

        # correct boxes
        #box_data = np.zeros((max_boxes, 5))
        box_data = np.zeros((max_boxes, 5 + NUM_ANGLES3))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes: box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[:len(box), 0:5] = box[:, 0:5]

            for b in range(0, len(box)):
                for i in range(5, MAX_VERTICES * 2, 2):
                    if box[b, i] == 0 and box[b, i + 1] == 0:
                        continue
                    box[b, i] = box[b, i] * scale + dx
                    box[b, i + 1] = box[b, i + 1] * scale + dy

            box_data[:, i:NUM_ANGLES3 + 5] = 0
            #for i in range(5, NUM_ANGLES3 + 5, 3):
            #    box_data[:, i] = 0  # vzdalenost
            #    box_data[:, i + 1] = 0  # uhel
            #    box_data[:, i + 2] = 0  # confidence

            for i in range(0, len(box)):
                boxes_xy = (box[i, 0:2] + box[i, 2:4]) // 2
                boxes_wh = (box[i, 2:4] - box[i, 0:2])

                for ver in range(
                        5, MAX_VERTICES * 2, 2
                ):  # zde je inkrementace o 2, protoze se zpracovavaji data z annotation line, jeste neexpandovana
                    if box[i, ver] == 0 and box[i, ver + 1] == 0:
                        break
                    dist_x = boxes_xy[0] - box[
                        i,
                        ver]  # x vzdalenost vrcholu polygonu od stredu boxu, absolutni vzdalenost
                    dist_y = boxes_xy[1] - box[
                        i, ver +
                        1]  # y vzdalenost vrcholu polygonu od stredu boxu, absolutni vzdalenost
                    dist = np.sqrt(np.power(dist_x, 2) +
                                   np.power(dist_y, 2))  # mame vzdalenost
                    if (dist < 1): dist = 1

                    angle = np.degrees(np.arctan2(dist_y, dist_x))
                    if (angle < 0): angle += 360
                    iangle = int(angle) // ANGLE_STEP
                    relative_angle = (angle -
                                      (iangle * int(ANGLE_STEP))) / ANGLE_STEP

                    if dist > box_data[
                            i, 5 + iangle *
                            3]:  # koukame, jestli uz existuje a bereme ten vzdalenejsi vrchol
                        box_data[i, 5 + iangle * 3] = dist
                        box_data[i, 5 + iangle * 3 + 1] = relative_angle
                        box_data[i, 5 + iangle * 3 + 2] = 1
        return image_data, box_data

    # resize image
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(
        1 - jitter, 1 + jitter)
    #new_ar = 1.0
    scale = rand(.6, 1.8)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    nwiw = nw / iw
    nhih = nh / ih

    # place image
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new('RGB', (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < .5
    if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < .5 else 1 / rand(1, val)
    x = rgb_to_hsv(np.array(image) / 255.)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # correct boxes
    box_data = np.zeros((max_boxes, 5 + NUM_ANGLES3))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nwiw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nhih + dy
        if flip: box[:, [0, 2]] = (w - 1) - box[:, [2, 0]]

        for b in range(0, len(box)):
            for i in range(5, MAX_VERTICES * 2, 2):
                if box[b, i] == 0 and box[b, i + 1] == 0:
                    continue
                box[b, i] = np.clip(box[b, i] * nwiw + dx, 0, w - 1)
                box[b, i + 1] = np.clip(box[b, i + 1] * nhih + dy, 0, h - 1)
                if flip:
                    box[b, i] = (w - 1) - box[b, i]

        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] >= w] = w - 1
        box[:, 3][box[:, 3] >= h] = h - 1
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes: box = box[:max_boxes]
        box_data[:len(box), 0:5] = box[:, 0:5]

        box_data[:, i:NUM_ANGLES3 + 5] = 0

        for i in range(0, len(box)):
            boxes_xy = (box[i, 0:2] + box[i, 2:4]) // 2

            for ver in range(
                    5, MAX_VERTICES * 2, 2
            ):  # zde je inkrementace o 2, protoze se zpracovavaji data z annotation line, jeste neexpandovana
                if box[i, ver] == 0 and box[i, ver + 1] == 0:
                    break
                dist_x = boxes_xy[0] - box[
                    i,
                    ver]  # x vzdalenost vrcholu polygonu od stredu boxu, absolutni vzdalenost
                dist_y = boxes_xy[1] - box[
                    i, ver +
                    1]  # y vzdalenost vrcholu polygonu od stredu boxu, absolutni vzdalenost
                dist = np.sqrt(np.power(dist_x, 2) +
                               np.power(dist_y, 2))  # mame vzdalenost
                if (dist < 1): dist = 1

                angle = np.degrees(np.arctan2(dist_y, dist_x))
                if (angle < 0): angle += 360
                iangle = int(angle) // ANGLE_STEP
                if iangle == NUM_ANGLES: iangle = 0

                if dist > box_data[
                        i, 5 + iangle *
                        3]:  #koukame, jestli uz existuje a bereme ten vzdalenejsi vrchol
                    box_data[i, 5 + iangle * 3] = dist
                    box_data[i, 5 + iangle * 3 +
                             1] = (angle - (iangle * int(ANGLE_STEP))
                                   ) / ANGLE_STEP  #relative angle
                    box_data[i, 5 + iangle * 3 + 2] = 1

    return image_data, box_data
Esempio n. 41
0
def get_random_data(annotation_line, input_shape, random=True, hue=.1, sat=1.5, val=1.5, proc_img=True):
    '''random preprocessing for real-time data augmentation'''
    h, w = input_shape
    min_offset_x = 0.4
    min_offset_y = 0.4
    scale_low = 1-min(min_offset_x,min_offset_y)
    scale_high = scale_low+0.2

    image_datas = [] 
    box_datas = []
    index = 0

    place_x = [0,0,int(w*min_offset_x),int(w*min_offset_x)]
    place_y = [0,int(h*min_offset_y),int(w*min_offset_y),0]
    for line in annotation_line:
        # 每一行进行分割
        line_content = line.split()
        # 打开图片
        image = Image.open(line_content[0])
        image = image.convert("RGB") 
        # 图片的大小
        iw, ih = image.size
        # 保存框的位置
        box = np.array([np.array(list(map(int,box.split(',')))) for box in line_content[1:]])
        
        # image.save(str(index)+".jpg")
        # 是否翻转图片
        flip = rand()<.5
        if flip and len(box)>0:
            image = image.transpose(Image.FLIP_LEFT_RIGHT)
            box[:, [0,2]] = iw - box[:, [2,0]]

        # 对输入进来的图片进行缩放
        new_ar = w/h
        scale = rand(scale_low, scale_high)
        if new_ar < 1:
            nh = int(scale*h)
            nw = int(nh*new_ar)
        else:
            nw = int(scale*w)
            nh = int(nw/new_ar)
        image = image.resize((nw,nh), Image.BICUBIC)

        # 进行色域变换
        hue = rand(-hue, hue)
        sat = rand(1, sat) if rand()<.5 else 1/rand(1, sat)
        val = rand(1, val) if rand()<.5 else 1/rand(1, val)
        x = rgb_to_hsv(np.array(image)/255.)
        x[..., 0] += hue
        x[..., 0][x[..., 0]>1] -= 1
        x[..., 0][x[..., 0]<0] += 1
        x[..., 1] *= sat
        x[..., 2] *= val
        x[x>1] = 1
        x[x<0] = 0
        image = hsv_to_rgb(x)

        image = Image.fromarray((image*255).astype(np.uint8))
        # 将图片进行放置,分别对应四张分割图片的位置
        dx = place_x[index]
        dy = place_y[index]
        new_image = Image.new('RGB', (w,h), (128,128,128))
        new_image.paste(image, (dx, dy))
        image_data = np.array(new_image)/255

        # Image.fromarray((image_data*255).astype(np.uint8)).save(str(index)+"distort.jpg")
        
        index = index + 1
        box_data = []
        # 对box进行重新处理
        if len(box)>0:
            np.random.shuffle(box)
            box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
            box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
            box[:, 0:2][box[:, 0:2]<0] = 0
            box[:, 2][box[:, 2]>w] = w
            box[:, 3][box[:, 3]>h] = h
            box_w = box[:, 2] - box[:, 0]
            box_h = box[:, 3] - box[:, 1]
            box = box[np.logical_and(box_w>1, box_h>1)]
            box_data = np.zeros((len(box),5))
            box_data[:len(box)] = box
        
        image_datas.append(image_data)
        box_datas.append(box_data)

        img = Image.fromarray((image_data*255).astype(np.uint8))
        for j in range(len(box_data)):
            thickness = 3
            left, top, right, bottom  = box_data[j][0:4]
            draw = ImageDraw.Draw(img)
            for i in range(thickness):
                draw.rectangle([left + i, top + i, right - i, bottom - i],outline=(255,255,255))
        img.show()

    
    # 将图片分割,放在一起
    cutx = np.random.randint(int(w*min_offset_x), int(w*(1 - min_offset_x)))
    cuty = np.random.randint(int(h*min_offset_y), int(h*(1 - min_offset_y)))

    new_image = np.zeros([h,w,3])
    new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
    new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
    new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
    new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]

    # 对框进行进一步的处理
    new_boxes = merge_bboxes(box_datas, cutx, cuty)

    return new_image, new_boxes
Esempio n. 42
0
def get_mask_plot_colors(nr_colors):
    """Get nr_colors uniformly spaced hues to plot mask values."""
    hsv_colors = np.ones((nr_colors, 3), dtype=np.float32)
    hsv_colors[:, 0] = np.linspace(0, 1, nr_colors, endpoint=False)
    color_conv = hsv_to_rgb(hsv_colors)
    return color_conv
Esempio n. 43
0
def bootstrap2(filein, fileout, subsampling_factor, angles, low, recon,
               recon_args, n_resamps=None, sdn=None, viz=None):
    """
    plots thrice the average of bootstrapped errors in reconstruction

    Plots and saves thrice the average of the differences between
    a reconstruction of the original image in filein and n_resamps bootstrap
    reconstructions via recon applied to the k-space subsamplings specified
    by angles fed into radialines and by other masks generated similarly
    (retaining each radial "line" with probability subsampling_factor, then
    adding all frequencies between -low to low in both directions), corrupting
    the k-space values with independent and identically distributed centered
    complex Gaussian noise whose standard deviation is sdn*sqrt(2)
    (sdn=0 if not provided explicitly).
    Setting viz to be True yields colorized visualizations, too, including
    the error estimates overlaid over the reconstruction, the error estimates
    blurred overlaid over the reconstruction, the error estimates blurred,
    the error estimates subtracted from the reconstruction, the error estimates
    saturating the reconstruction in hue-saturation-value (HSV) color space,
    and the error estimates interpolating the reconstruction in HSV space.

    The calling sequence of recon must be  (m, n, f, mask, **recon_args),
    where filein contains an m x n image, f is the image in k-space subsampled
    to the mask, mask is the return from calls to radialines (with angles),
    supplemented by all frequencies between -low to low in both directions, and
    **recon_args is the unpacking of recon_args. The function recon must return
    a torch.Tensor (the reconstruction) and a float (the corresponding loss).

    Parameters
    ----------
    filein : str
        path to the file containing the image to be processed (the path may be
        relative or absolute)
    fileout : str
        path to the file to which the plots will be saved (the path may be
        relative or absolute)
    subsampling_factor : float
        probability of retaining a radial "line" in the subsampling masks
    angles : list of float
        angles of the radial "lines" in the mask that radialines will construct
    low : int
        bandwidth of low frequencies included in mask (between -low to low
        in both the horizontal and vertical directions)
    recon : function
        returns the reconstructed image
    recon_args : dict
        keyword arguments for recon
    n_resamps : int, optional
        number of bootstrap resampled reconstructions (defaults to 100)
    sdn : float, optional
        standard deviation of the noise to add (defaults to 0)
    viz : bool, optional
        indicator of whether to generate colorized visualizations
        (defaults to False)

    Returns
    -------
    float
        loss for the reconstruction using the original angles
    list of float
        losses for the reconstructions using other, randomly generated masks
    float
        square root of the sum of the square of the estimated errors
    float
        square root of the sum of the square of the estimated errors blurred
    """
    # Set default parameters.
    if n_resamps is None:
        n_resamps = 100
    if sdn is None:
        sdn = 0
    if viz is None:
        viz = False
    # Read the image from disk.
    with Image.open(filein) as img:
        f_orig = np.array(img).astype(np.float64) / 255.
    m = f_orig.shape[0]
    n = f_orig.shape[1]
    # Fourier transform the image.
    ff_orig = np.fft.fft2(f_orig) / np.sqrt(m * n)
    # Add noise.
    ff_noisy = ff_orig.copy()
    ff_noisy += sdn * (np.random.randn(m, n) + 1j * np.random.randn(m, n))
    # Select which frequencies to retain.
    mask = radialines.randradialines(m, n, angles)
    # Include all low frequencies.
    for km in range(low):
        for kn in range(low):
            mask[km, kn] = True
            mask[m - 1 - km, kn] = True
            mask[km, n - 1 - kn] = True
            mask[m - 1 - km, n - 1 - kn] = True
    # Subsample the noisy Fourier transform of the original image.
    f = ctorch.from_numpy(ff_noisy[mask]).cuda()
    logging.info(
        'computing bootstrap2 resamplings -- all {}'.format(n_resamps))
    # Perform the reconstruction using the mask.
    reconf, lossf = recon(m, n, f, mask, **recon_args)
    reconf = reconf.cpu().numpy()
    # Fourier transform the reconstruction.
    freconf = np.fft.fft2(reconf) / np.sqrt(m * n)
    # Perform the reconstruction resampling new masks and samples in k-space.
    recons = np.ndarray((n_resamps, m, n))
    loss = []
    for k in range(n_resamps):
        # Select which frequencies to retain.
        angles1 = np.random.uniform(
            low=0, high=(2 * np.pi),
            size=round(2 * (m + n) * subsampling_factor))
        mask1 = radialines.randradialines(m, n, angles1)
        # Include all low frequencies.
        for km in range(low):
            for kn in range(low):
                mask1[km, kn] = True
                mask1[m - 1 - km, kn] = True
                mask1[km, n - 1 - kn] = True
                mask1[m - 1 - km, n - 1 - kn] = True
        # Subsample the Fourier transform of the reconstruction.
        f1 = ctorch.from_numpy(freconf[mask1]).cuda()
        # Reconstruct the image from the subsampled data.
        recon1, loss1 = recon(m, n, f1, mask1, **recon_args)
        recon1 = recon1.cpu().numpy()
        # Record the results.
        recons[k, :, :] = recon1
        loss.append(loss1)
    # Calculate the sum of the bootstrap differences.
    sumboo = np.sum(recons - reconf, axis=0)
    scaled = sumboo * 3 / n_resamps
    # Blur the error estimates.
    sigma = 1
    blurred = skimage.filters.gaussian(scaled, sigma=sigma)
    rsse_estimated = np.linalg.norm(scaled, ord='fro')
    rsse_blurred = np.linalg.norm(blurred, ord='fro')

    # Plot errors.
    # Remove the ticks and spines on the axes.
    matplotlib.rcParams['xtick.top'] = False
    matplotlib.rcParams['xtick.bottom'] = False
    matplotlib.rcParams['ytick.left'] = False
    matplotlib.rcParams['ytick.right'] = False
    matplotlib.rcParams['xtick.labeltop'] = False
    matplotlib.rcParams['xtick.labelbottom'] = False
    matplotlib.rcParams['ytick.labelleft'] = False
    matplotlib.rcParams['ytick.labelright'] = False
    matplotlib.rcParams['axes.spines.top'] = False
    matplotlib.rcParams['axes.spines.bottom'] = False
    matplotlib.rcParams['axes.spines.left'] = False
    matplotlib.rcParams['axes.spines.right'] = False
    # Configure the colormaps.
    kwargs01 = dict(cmap='gray',
                    norm=matplotlib.colors.Normalize(vmin=0, vmax=1))
    kwargs11 = dict(cmap='gray',
                    norm=matplotlib.colors.Normalize(vmin=-1, vmax=1))
    # Separate the suffix (filetype) from the rest of the filename.
    suffix = '.' + fileout.split('.')[-1]
    rest = fileout[:-len(suffix)]
    assert fileout == rest + suffix
    # Plot the original.
    plt.figure(figsize=(5.5, 5.5))
    plt.title('Original')
    plt.imshow(f_orig, **kwargs01)
    plt.savefig(rest + '_original' + suffix, bbox_inches='tight')
    # Plot the reconstruction from the original mask provided.
    plt.figure(figsize=(5.5, 5.5))
    plt.title('Reconstruction')
    plt.imshow(reconf, **kwargs01)
    plt.savefig(rest + '_recon' + suffix, bbox_inches='tight')
    # Plot the difference from the original.
    plt.figure(figsize=(5.5, 5.5))
    plt.title('Error of Reconstruction')
    plt.imshow(reconf - f_orig, **kwargs11)
    plt.savefig(rest + '_error' + suffix, bbox_inches='tight')
    # Plot thrice the average of the bootstrap differences.
    plt.figure(figsize=(5.5, 5.5))
    plt.title('Bootstrap')
    plt.imshow(scaled, **kwargs11)
    plt.savefig(rest + '_bootstrap' + suffix, bbox_inches='tight')

    if viz:
        # Plot the reconstruction minus the bootstrap difference.
        plt.figure(figsize=(5.5, 5.5))
        plt.title('Reconstruction \u2013 Bootstrap')
        plt.imshow(reconf - scaled, **kwargs01)
        plt.savefig(rest + '_corrected' + suffix, bbox_inches='tight')
        # Overlay the error estimates on the reconstruction.
        plt.figure(figsize=(5.5, 5.5))
        threshold = np.abs(scaled).flatten()
        threshold = np.sort(threshold)
        maxthresh = threshold[-1]
        threshold = threshold[round(0.99 * threshold.size)]
        hue = 2. / 3 + (scaled / maxthresh) / 4 * 2 / 3
        saturation = np.abs(scaled) > threshold
        value = reconf * (1 - saturation) + saturation
        hsv = np.dstack((hue, saturation, value))
        rgb = hsv_to_rgb(hsv)
        plt.title('Errors Over a Threshold Overlaid')
        plt.imshow(rgb)
        plt.savefig(rest + '_overlaid' + suffix, bbox_inches='tight')
        # Overlay the blurred error estimates on the reconstruction.
        plt.figure(figsize=(5.5, 5.5))
        threshold = np.abs(blurred).flatten()
        threshold = np.sort(threshold)
        maxthresh = threshold[-1]
        threshold = threshold[round(0.99 * threshold.size)]
        hue = 2. / 3 + (blurred / maxthresh) / 4 * 2 / 3
        saturation = np.abs(blurred) > threshold
        value = reconf * (1 - saturation) + saturation
        hsv = np.dstack((hue, saturation, value))
        rgb = hsv_to_rgb(hsv)
        plt.title('Blurred Errors Over a Threshold Overlaid')
        plt.imshow(rgb)
        plt.savefig(rest + '_blurred_overlaid' + suffix, bbox_inches='tight')
        # Plot a bootstrap-saturated reconstruction.
        plt.figure(figsize=(5.5, 5.5))
        hue = (1 - np.sign(scaled)) / 4 * 2 / 3
        saturation = np.abs(scaled)
        saturation = saturation / np.max(saturation)
        value = np.clip(reconf, 0, 1)
        hsv = np.dstack((hue, saturation, value))
        rgb = hsv_to_rgb(hsv)
        plt.title('Bootstrap-Saturated Reconstruction')
        plt.imshow(rgb)
        plt.savefig(rest + '_saturated' + suffix, bbox_inches='tight')
        # Plot a bootstrap-interpolated reconstruction.
        plt.figure(figsize=(5.5, 5.5))
        hue = 7. / 12 + np.sign(scaled) * 3 / 12
        saturation = np.abs(scaled)
        saturation = saturation / np.max(saturation)
        value = np.clip(reconf, 0, 1)
        hsv = np.dstack((hue, saturation, value))
        rgb = hsv_to_rgb(hsv)
        plt.title('Bootstrap-Interpolated Reconstruction')
        plt.imshow(rgb)
        plt.savefig(rest + '_interpolated' + suffix, bbox_inches='tight')
        # Plot the blurred bootstrap.
        plt.figure(figsize=(5.5, 5.5))
        plt.title('Blurred Bootstrap')
        plt.imshow(blurred, **kwargs11)
        plt.savefig(rest + '_blurred' + suffix, bbox_inches='tight')

    return lossf, loss, rsse_estimated, rsse_blurred
Esempio n. 44
0
wo.orientation_3D_colorwheel(wheelsize=256, circ_size=50, interp_belt=20/180*np.pi, sat_factor=1)
plt.show()

### Render 3D orientation with 2 channels (in-plane orientation and out-of-plane tilt)
# in-plane orientation
from matplotlib.colors import hsv_to_rgb


ret_min_color = 0
ret_max_color = 1.5


I_hsv = np.transpose(np.array([(azimuth[0])%np.pi/np.pi, \
                               np.ones_like(retardance_pr_nm[0]), \
                               (np.clip(np.abs(retardance_pr_nm[0]),ret_min_color,ret_max_color)-ret_min_color)/(ret_max_color-ret_min_color)]), (1,2,0))
in_plane_orientation = hsv_to_rgb(I_hsv.copy())

plt.figure(figsize=(5,5))
plt.imshow(in_plane_orientation, origin='lower')
plt.figure(figsize=(3,3))
wo.orientation_2D_colorwheel()
plt.show()

# out-of-plane tilt

threshold_inc = np.pi/90

I_hsv = np.transpose(np.array([(-np.maximum(0,np.abs(theta[0]-np.pi/2)-threshold_inc)+np.pi/2+threshold_inc)/np.pi, \
                               np.ones_like(retardance_pr_nm[0]), \
                               (np.clip(np.abs(retardance_pr_nm[0]),ret_min_color,ret_max_color)-ret_min_color)/(ret_max_color-ret_min_color)]), (1,2,0))
out_of_plane_tilt = hsv_to_rgb(I_hsv.copy())
Esempio n. 45
0
def plot_loo_pit(
    ax,
    figsize,
    ecdf,
    loo_pit,
    loo_pit_ecdf,
    unif_ecdf,
    p975,
    p025,
    fill_kwargs,
    ecdf_fill,
    use_hdi,
    x_vals,
    hdi_kwargs,
    hdi_odds,
    n_unif,
    unif,
    plot_unif_kwargs,
    loo_pit_kde,
    legend,  # pylint: disable=unused-argument
    y_hat,
    y,
    color,
    textsize,
    credible_interval,
    plot_kwargs,
    backend_kwargs,
    show,
):
    """Bokeh loo pit plot."""
    if backend_kwargs is None:
        backend_kwargs = {}

    backend_kwargs = {
        **backend_kwarg_defaults(),
        **backend_kwargs,
    }

    (figsize, *_, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)

    if ax is None:
        backend_kwargs.setdefault("x_range", (0, 1))
        ax = create_axes_grid(
            1,
            figsize=figsize,
            squeeze=True,
            backend_kwargs=backend_kwargs,
        )

    plot_kwargs = {} if plot_kwargs is None else plot_kwargs
    plot_kwargs.setdefault("color", to_hex(color))
    plot_kwargs.setdefault("linewidth", linewidth * 1.4)
    if isinstance(y, str):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
    elif isinstance(y, DataArray) and y.name is not None:
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
    elif isinstance(y_hat, str):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
    elif isinstance(y_hat, DataArray) and y_hat.name is not None:
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(
            y_hat.name)
    else:
        label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"

    plot_kwargs.setdefault("legend_label", label)

    plot_unif_kwargs = {} if plot_unif_kwargs is None else plot_unif_kwargs
    light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
    light_color[1] /= 2  # pylint: disable=unsupported-assignment-operation
    light_color[2] += (1 - light_color[2]) / 2  # pylint: disable=unsupported-assignment-operation
    plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
    plot_unif_kwargs.setdefault("alpha", 0.5)
    plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)

    if ecdf:
        n_data_points = loo_pit.size
        plot_kwargs.setdefault(
            "drawstyle", "steps-mid" if n_data_points < 100 else "default")
        plot_unif_kwargs.setdefault(
            "drawstyle", "steps-mid" if n_data_points < 100 else "default")

        if ecdf_fill:
            if fill_kwargs is None:
                fill_kwargs = {}
            fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
            fill_kwargs.setdefault("alpha", 0.5)
            fill_kwargs.setdefault(
                "step",
                "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None)
            fill_kwargs.setdefault(
                "legend_label",
                "{:.3g}% credible interval".format(credible_interval))
    elif use_hdi:
        if hdi_kwargs is None:
            hdi_kwargs = {}
        hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
        hdi_kwargs.setdefault("alpha", 0.35)

    if ecdf:
        if plot_kwargs.get("drawstyle") == "steps-mid":
            ax.step(
                np.hstack((0, loo_pit, 1)),
                np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
                line_color=plot_kwargs.get("color", "black"),
                line_alpha=plot_kwargs.get("alpha", 1.0),
                line_width=plot_kwargs.get("linewidth", 3.0),
                mode="center",
            )
        else:
            ax.line(
                np.hstack((0, loo_pit, 1)),
                np.hstack((0, loo_pit - loo_pit_ecdf, 0)),
                line_color=plot_kwargs.get("color", "black"),
                line_alpha=plot_kwargs.get("alpha", 1.0),
                line_width=plot_kwargs.get("linewidth", 3.0),
            )

        if ecdf_fill:
            if fill_kwargs.get("drawstyle") == "steps-mid":
                # use step patch when you find out how to do that
                ax.patch(
                    np.concatenate((unif_ecdf, unif_ecdf[::-1])),
                    np.concatenate(
                        (p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
                    fill_color=fill_kwargs.get("color"),
                    fill_alpha=fill_kwargs.get("alpha", 1.0),
                )
            else:
                ax.patch(
                    np.concatenate((unif_ecdf, unif_ecdf[::-1])),
                    np.concatenate(
                        (p975 - unif_ecdf, (p025 - unif_ecdf)[::-1])),
                    fill_color=fill_kwargs.get("color"),
                    fill_alpha=fill_kwargs.get("alpha", 1.0),
                )
        else:
            if fill_kwargs is not None and fill_kwargs.get(
                    "drawstyle") == "steps-mid":
                ax.step(
                    unif_ecdf,
                    p975 - unif_ecdf,
                    line_color=plot_unif_kwargs.get("color", "black"),
                    line_alpha=plot_unif_kwargs.get("alpha", 1.0),
                    line_width=plot_kwargs.get("linewidth", 1.0),
                    mode="center",
                )
                ax.step(
                    unif_ecdf,
                    p025 - unif_ecdf,
                    line_color=plot_unif_kwargs.get("color", "black"),
                    line_alpha=plot_unif_kwargs.get("alpha", 1.0),
                    line_width=plot_unif_kwargs.get("linewidth", 1.0),
                    mode="center",
                )
            else:
                ax.line(
                    unif_ecdf,
                    p975 - unif_ecdf,
                    line_color=plot_unif_kwargs.get("color", "black"),
                    line_alpha=plot_unif_kwargs.get("alpha", 1.0),
                    line_width=plot_unif_kwargs.get("linewidth", 1.0),
                )
                ax.line(
                    unif_ecdf,
                    p025 - unif_ecdf,
                    line_color=plot_unif_kwargs.get("color", "black"),
                    line_alpha=plot_unif_kwargs.get("alpha", 1.0),
                    line_width=plot_unif_kwargs.get("linewidth", 1.0),
                )
    else:
        if use_hdi:
            patch = BoxAnnotation(bottom=hdi_odds[1],
                                  top=hdi_odds[0],
                                  fill_alpha=hdi_kwargs.pop("alpha"),
                                  fill_color=hdi_kwargs.pop("color"),
                                  **hdi_kwargs)
            patch.level = "underlay"
            ax.add_layout(patch)

            # Adds horizontal reference line
            ax.line([0, 1], [1, 1], line_color="white", line_width=1.5)
        else:
            for idx in range(n_unif):
                x_s, unif_density = kde(unif[idx, :])
                ax.line(
                    x_s,
                    unif_density,
                    line_color=plot_unif_kwargs.get("color", "black"),
                    line_alpha=plot_unif_kwargs.get("alpha", 0.1),
                    line_width=plot_unif_kwargs.get("linewidth", 1.0),
                )
        ax.line(
            x_vals,
            loo_pit_kde,
            line_color=plot_kwargs.get("color", "black"),
            line_alpha=plot_kwargs.get("alpha", 1.0),
            line_width=plot_kwargs.get("linewidth", 3.0),
        )

    # Sets xlim(0, 1)
    ax.line(0, 0)
    ax.line(1, 0)
    show_layout(ax, show)

    return ax
Esempio n. 46
0
    myfig.Line(p0, x=[0, 25, 50, 100], y=interbout_interval_as_function_of_coherence_wt_mean, yerr=interbout_interval_as_function_of_coherence_wt_sem, lc="black", zorder=1)
    myfig.Scatter(p0, x=[0, 25, 50, 100], y=interbout_interval_as_function_of_coherence_wt_mean, lc='black', pt='o', lw=0.5, ps=9.8, pc='white', zorder=2)

    myfig.Line(p0, x=[0, 25, 50, 100], y=interbout_interval_as_function_of_coherence_mutant_mean, yerr=interbout_interval_as_function_of_coherence_mutant_sem, lc=basecolor, zorder=1)
    myfig.Scatter(p0, x=[0, 25, 50, 100], y=interbout_interval_as_function_of_coherence_mutant_mean, lc=basecolor, pt='o', lw=0.5, ps=9.8, pc='white', zorder=2)

    ####
    # Correctness over time as function of coherence
    p0 = myfig.Plot(fig, num='b3', xpos=13, ypos=ypos - 1.5, plot_height=2.5, title = "Wildtype",
                                       plot_width=2.2, errorbar_area=False, lw=1,
                                       xl="Time (s)", xmin=-2, xmax=14, xticks=[0, 5, 10], hlines=[50],
                                       yl="Probability correct (%)", ymin=44, ymax=91, yticks=[50, 70, 90], vspans=[[0, 10, "#cccccc", 0.6]])

    for i in range(4):
        c = colors.hsv_to_rgb([0, 0, (i + 2) / 8])

        myfig.Line(p0, x=binned_correctness_wt_mean.loc[i, :].index[1:] - 10, y=binned_correctness_wt_mean.loc[i, :].values[1:][:,0], yerr=binned_correctness_wt_sem.loc[i, :].values[1:][:,0], lc=c, zorder=1)
        myfig.Scatter(p0, x=binned_correctness_wt_mean.loc[i, :].index[1:] - 10, y=binned_correctness_wt_mean.loc[i, :].values[1:][:,0], lc=c, pt='o', lw=0.5, ps=9.8, pc='white', zorder=2)


    # Correctness over time as function of coherence
    p0 = myfig.Plot(fig, num='', xpos=16, ypos=ypos - 1.5, plot_height=2.5, title = "Mutant",
                                       plot_width=2.2, errorbar_area=False, lw=1,
                                       xl="Time (s)", xmin=-2, xmax=14, xticks=[0, 5, 10], hlines=[50],
                                       ymin=44, ymax=91, yticks=[50, 70, 90], yticklabels=[""]*3, vspans=[[0, 10, "#cccccc", 0.6]])


    h, s, v = colors.rgb_to_hsv(colors.to_rgb(basecolor))

    for i in range(4):
Esempio n. 47
0
 def use_original_color(original, result):
     result_hsv = rgb_to_hsv(result)
     orig_hsv = rgb_to_hsv(original)
     oh, os, ov = np.split(orig_hsv, axis=-1, indices_or_sections=3)
     rh, rs, rv = np.split(result_hsv, axis=-1, indices_or_sections=3)
     return hsv_to_rgb(np.concatenate([oh, os, rv], axis=-1))
Esempio n. 48
0
def gray_out(color, s_factor=0.5, v_factor=1):
    hsv_color = colors.rgb_to_hsv(colors.to_rgb(color)) * np.array(
        [1, s_factor, v_factor])
    return colors.hsv_to_rgb(hsv_color)
Esempio n. 49
0
    print(list(dataset_dict.keys()))
    quit()


print("Loading Corpus ")
D, X, Y = dataset_dict[dataset_name]()

results = []
std_kmeans = []
representations = torch.load(os.path.join(args.file,"embeddings.t7"))[0]

kmeans = kmh.PoincareKMeans(n_gaussian)
kmeans.fit(representations)
gt_colors = []
pr_colors = []

import matplotlib.pyplot as plt
import matplotlib.colors as plt_colors
import numpy as np
unique_label = np.unique(sum([ y for k, y in D.Y.items()],[]))

prediction = kmeans.predict(representations)

for i in range(len(D.Y)):
    gt_colors.append(plt_colors.hsv_to_rgb([D.Y[i][0]/(len(unique_label)),0.5,0.8]))
    pr_colors.append(plt_colors.hsv_to_rgb([prediction[i].item()/(len(unique_label)),0.5,0.8]))

plot_tools.kmean_plot(representations, kmeans.centroids, gt_colors, pr_colors, args.file, prefix=dataset_name)


Esempio n. 50
0
def get_random_data(
    annotation_line,
    input_shape,
    random=True,
    max_boxes=20,
    jitter=0.3,
    hue=0.1,
    sat=1.5,
    val=1.5,
    proc_img=True,
):
    """random preprocessing for real-time data augmentation"""

    # This type of splitting makes sure that it is compatible with spaces in folder names
    # We split at the first space that is followed by a number
    tmp_split = re.split("( \d)", annotation_line, maxsplit=1)
    if len(tmp_split) > 2:
        line = tmp_split[0], tmp_split[1] + tmp_split[2]
    else:
        line = tmp_split
    # line[0] contains the filename
    #maledh
    #print('Line :', line)
    #print('Line[0]: ',line[0])
    ####
    image = Image.open(line[0])
    # The rest of the line includes bounding boxes
    line = line[1].split(" ")
    iw, ih = image.size
    h, w = input_shape
    box = np.array([np.array(list(map(int, box.split(",")))) for box in line[1:]])

    if not random:
        # resize image
        scale = min(w / iw, h / ih)
        nw = int(iw * scale)
        nh = int(ih * scale)
        dx = (w - nw) // 2
        dy = (h - nh) // 2
        image_data = 0
        if proc_img:
            image = image.resize((nw, nh), Image.BICUBIC)
            new_image = Image.new("RGB", (w, h), (128, 128, 128))
            new_image.paste(image, (dx, dy))
            image_data = np.array(new_image) / 255.0

        # correct boxes
        box_data = np.zeros((max_boxes, 5))
        if len(box) > 0:
            np.random.shuffle(box)
            if len(box) > max_boxes:
                box = box[:max_boxes]
            box[:, [0, 2]] = box[:, [0, 2]] * scale + dx
            box[:, [1, 3]] = box[:, [1, 3]] * scale + dy
            box_data[: len(box)] = box

        return image_data, box_data

    # resize image
    new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)
    scale = rand(0.25, 2)
    if new_ar < 1:
        nh = int(scale * h)
        nw = int(nh * new_ar)
    else:
        nw = int(scale * w)
        nh = int(nw / new_ar)
    image = image.resize((nw, nh), Image.BICUBIC)

    # place image
    dx = int(rand(0, w - nw))
    dy = int(rand(0, h - nh))
    new_image = Image.new("RGB", (w, h), (128, 128, 128))
    new_image.paste(image, (dx, dy))
    image = new_image

    # flip image or not
    flip = rand() < 0.5
    if flip:
        image = image.transpose(Image.FLIP_LEFT_RIGHT)

    # distort image
    hue = rand(-hue, hue)
    sat = rand(1, sat) if rand() < 0.5 else 1 / rand(1, sat)
    val = rand(1, val) if rand() < 0.5 else 1 / rand(1, val)
    x = rgb_to_hsv(np.array(image) / 255.0)
    x[..., 0] += hue
    x[..., 0][x[..., 0] > 1] -= 1
    x[..., 0][x[..., 0] < 0] += 1
    x[..., 1] *= sat
    x[..., 2] *= val
    x[x > 1] = 1
    x[x < 0] = 0
    image_data = hsv_to_rgb(x)  # numpy array, 0 to 1

    # make gray
    gray = rand() < 0.2
    if gray:
        image_gray = np.dot(image_data, [0.299, 0.587, 0.114])
        # a gray RGB image is GGG
        image_data = np.moveaxis(np.stack([image_gray, image_gray, image_gray]), 0, -1)

    # invert colors
    invert = rand() < 0.1
    if invert:
        image_data = 1.0 - image_data

    # correct boxes
    box_data = np.zeros((max_boxes, 5))
    if len(box) > 0:
        np.random.shuffle(box)
        box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx
        box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy
        if flip:
            box[:, [0, 2]] = w - box[:, [2, 0]]
        box[:, 0:2][box[:, 0:2] < 0] = 0
        box[:, 2][box[:, 2] > w] = w
        box[:, 3][box[:, 3] > h] = h
        box_w = box[:, 2] - box[:, 0]
        box_h = box[:, 3] - box[:, 1]
        box = box[np.logical_and(box_w > 1, box_h > 1)]  # discard invalid box
        if len(box) > max_boxes:
            box = box[:max_boxes]
        box_data[: len(box)] = box

    return image_data, box_data
Esempio n. 51
0
# normalize polar angles to have values in circle between 0 and 1
polar_ang_norm = (polar_angle + np.pi) / (np.pi * 2.0)

# use "resto da divisão" so that 1 == 0 (because they overlapp in circle)
# why have an offset?
angle_offset = 0.1
polar_ang_norm = np.fmod(polar_ang_norm + angle_offset, 1.0)

# convert angles to colors, using correlations as weights
hsv = np.zeros(list(polar_ang_norm.shape) + [3])
hsv[..., 0] = polar_ang_norm  # different hue value for each angle
hsv[..., 1] = np.ones_like(rsq)  # saturation weighted by rsq
hsv[..., 2] = np.ones_like(rsq)  # value weighted by rsq

# convert hsv values of np array to rgb values (values assumed to be in range [0, 1])
rgb = colors.hsv_to_rgb(hsv)

# define alpha channel - which specifies the opacity for a color
# 0 = transparent = values with rsq below thresh and 1 = opaque = values above thresh
alpha_mask = (rsq <= rsq_threshold
              ).T  #why transpose? because of orientation of pycortex volume?
alpha = np.ones(alpha_mask.shape)
alpha[alpha_mask] = 0

#create volumes

#contains RGBA colors for each voxel in a volumetric dataset
# volume for polar angles
vrgba = cortex.VolumeRGB(red=rgb[..., 0].T,
                         green=rgb[..., 1].T,
                         blue=rgb[..., 2].T,
Esempio n. 52
0
def hsv_to_pic(pixels):
    pixels = plc.hsv_to_rgb(pixels)
    plt.imshow(pixels)  #afficher l’image
    plt.show()
Esempio n. 53
0
# plt.subplot(122)
# plt.scatter(z_test[:, 0], z_test[:, 1], marker='.')

# plt.show()
zs = np.random.randn(1000, latent_dim)

zs_2d = np.zeros((1000, 2))
zs_2d[:, 0] = zs[:, 0]
zs_2d[:, 1] = zs[:, latent_dim - 1]

# Get a polar color scheme
hues = np.clip((np.arctan2(zs_2d[:, 0], zs_2d[:, 1]) + np.pi) / (2 * np.pi), 0,
               1)
values = np.clip(np.sqrt(zs_2d[:, 0]**2 + zs_2d[:, 1]**2) / 2, 0, 1)
colors = hsv_to_rgb(np.array([hues, values, np.ones(hues.shape)]).T)

# Plot input space
ax = plt.subplot(1, 2, 1)
ax.set_aspect(1)
# plt.scatter(x_train[:, 0], x_train[:, 1], marker='.')
plt.hexbin(x_train[:, 0], x_train[:, 1], gridsize=30, extent=(-2, 2, -2, 2))

# Plot Z space
# plt.subplot(1, 2, 2)
# plt.scatter(zs_2d[:, 0], zs_2d[:, 1], c=colors, marker='.')

# Compute average density across models
density = np.zeros((1000, n_models, input_dim))
iters = 1
for iv, vae in enumerate(vaes):
Esempio n. 54
0
def plot_loo_pit(
    idata=None,
    y=None,
    y_hat=None,
    log_weights=None,
    ecdf=False,
    ecdf_fill=True,
    n_unif=100,
    use_hpd=False,
    credible_interval=0.94,
    figsize=None,
    textsize=None,
    color="C0",
    legend=True,
    ax=None,
    plot_kwargs=None,
    plot_unif_kwargs=None,
    hpd_kwargs=None,
    fill_kwargs=None,
    backend=None,
    show=True,
):
    """Plot Leave-One-Out (LOO) probability integral transformation (PIT) predictive checks.

    Parameters
    ----------
    idata : InferenceData
        InferenceData object.
    y : array, DataArray or str
        Observed data. If str, idata must be present and contain the observed data group
    y_hat : array, DataArray or str
        Posterior predictive samples for ``y``. It must have the same shape as y plus an
        extra dimension at the end of size n_samples (chains and draws stacked). If str or
        None, idata must contain the posterior predictive group. If None, y_hat is taken
        equal to y, thus, y must be str too.
    log_weights : array or DataArray
        Smoothed log_weights. It must have the same shape as ``y_hat``
    ecdf : bool, optional
        Plot the difference between the LOO-PIT Empirical Cumulative Distribution Function
        (ECDF) and the uniform CDF instead of LOO-PIT kde.
        In this case, instead of overlaying uniform distributions, the beta ``credible_interval``
        interval around the theoretical uniform CDF is shown. This approximation only holds
        for large S and ECDF values not vary close to 0 nor 1. For more information, see
        `Vehtari et al. (2019)`, `Appendix G <https://avehtari.github.io/rhat_ess/rhat_ess.html>`_.
    ecdf_fill : bool, optional
        Use fill_between to mark the area inside the credible interval. Otherwise, plot the
        border lines.
    n_unif : int, optional
        Number of datasets to simulate and overlay from the uniform distribution.
    use_hpd : bool, optional
        Use plot_hpd to fill between hpd values instead of overlaying the uniform distributions.
    credible_interval : float, optional
        Credible interval of the hpd or of the ECDF theoretical credible interval
    figsize : figure size tuple, optional
        If None, size is (8 + numvars, 8 + numvars)
    textsize: int, optional
        Text size for labels. If None it will be autoscaled based on figsize.
    color : str or array_like, optional
        Color of the LOO-PIT estimated pdf plot. If ``plot_unif_kwargs`` has no "color" key,
        an slightly lighter color than this argument will be used for the uniform kde lines.
        This will ensure that LOO-PIT kde and uniform kde have different default colors.
    legend : bool, optional
        Show the legend of the figure.
    ax : axes, optional
        Matplotlib axes
    plot_kwargs : dict, optional
        Additional keywords passed to ax.plot for LOO-PIT line (kde or ECDF)
    plot_unif_kwargs : dict, optional
        Additional keywords passed to ax.plot for overlaid uniform distributions or
        for beta credible interval lines if ``ecdf=True``
    hpd_kwargs : dict, optional
        Additional keywords passed to az.plot_hpd
    fill_kwargs : dict, optional
        Additional kwargs passed to ax.fill_between

    Returns
    -------
    axes : axes
        Matplotlib axes

    References
    ----------
    * Gabry et al. (2017) see https://arxiv.org/abs/1709.01449
    * https://mc-stan.org/bayesplot/reference/PPC-loo.html
    * Gelman et al. BDA (2014) Section 6.3

    Examples
    --------
    Plot LOO-PIT predictive checks overlaying the KDE of the LOO-PIT values to several
    realizations of uniform variable sampling with the same number of observations.

    .. plot::
        :context: close-figs

        >>> import arviz as az
        >>> idata = az.load_arviz_data("centered_eight")
        >>> az.plot_loo_pit(idata=idata, y="obs")

    Fill the area containing the 94% credible interval of the difference between uniform
    variables empirical CDF and the real uniform CDF. A LOO-PIT ECDF clearly outside of these
    theoretical boundaries indicates that the observations and the posterior predictive
    samples do not follow the same distribution.

    .. plot::
        :context: close-figs

        >>> az.plot_loo_pit(idata=idata, y="obs", ecdf=True)

    """
    if ecdf and use_hpd:
        raise ValueError("use_hpd is incompatible with ecdf plot")

    (figsize, _, _, xt_labelsize, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)

    loo_pit = _loo_pit(idata=idata, y=y, y_hat=y_hat, log_weights=log_weights)
    loo_pit = loo_pit.flatten() if isinstance(loo_pit, np.ndarray) else loo_pit.values.flatten()

    if plot_kwargs is None:
        plot_kwargs = {}
    plot_kwargs["color"] = to_hex(color)
    plot_kwargs.setdefault("linewidth", linewidth * 1.4)
    if isinstance(y, str):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
    elif isinstance(y, DataArray):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
    elif isinstance(y_hat, str):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
    elif isinstance(y_hat, DataArray):
        label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
    else:
        label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"

    plot_kwargs.setdefault("label", label)
    plot_kwargs.setdefault("zorder", 5)

    if plot_unif_kwargs is None:
        plot_unif_kwargs = {}
    light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
    light_color[1] /= 2  # pylint: disable=unsupported-assignment-operation
    light_color[2] += (1 - light_color[2]) / 2  # pylint: disable=unsupported-assignment-operation
    plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
    plot_unif_kwargs.setdefault("alpha", 0.5)
    plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)

    loo_pit_ecdf = None
    unif_ecdf = None
    p975 = None
    p025 = None
    loo_pit_kde = None
    unif = None
    unif_densities = None
    x_vals = None

    if ecdf:
        loo_pit.sort()
        n_data_points = loo_pit.size
        loo_pit_ecdf = np.arange(n_data_points) / n_data_points
        # ideal unnormalized ECDF of uniform distribution with n_data_points points
        # it is used indistinctively as x or p(u<x) because for u~U(0,1) they are equal
        unif_ecdf = np.arange(n_data_points + 1)
        p975 = stats.beta.ppf(
            0.5 + credible_interval / 2, unif_ecdf + 1, n_data_points - unif_ecdf + 1
        )
        p025 = stats.beta.ppf(
            0.5 - credible_interval / 2, unif_ecdf + 1, n_data_points - unif_ecdf + 1
        )
        unif_ecdf = unif_ecdf / n_data_points

        plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
        plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")

        if ecdf_fill:
            if fill_kwargs is None:
                fill_kwargs = {}
            fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
            fill_kwargs.setdefault("alpha", 0.5)
            fill_kwargs.setdefault(
                "step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
            )
            fill_kwargs.setdefault("label", "{:.3g}% credible interval".format(credible_interval))
    else:
        loo_pit_kde, _, _ = _fast_kde(loo_pit, xmin=0, xmax=1)

        unif = np.random.uniform(size=(n_unif, loo_pit.size))
        x_vals = np.linspace(0, 1, len(loo_pit_kde))
        if use_hpd:
            if hpd_kwargs is None:
                hpd_kwargs = {}
            hpd_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
            hpd_fill_kwargs = hpd_kwargs.pop("fill_kwargs", {})
            hpd_fill_kwargs.setdefault("label", "Uniform HPD")
            hpd_kwargs["fill_kwargs"] = hpd_fill_kwargs
            hpd_kwargs["credible_interval"] = credible_interval

            unif_densities = np.empty((n_unif, len(loo_pit_kde)))

    loo_pit_kwargs = dict(
        ax=ax,
        figsize=figsize,
        ecdf=ecdf,
        loo_pit=loo_pit,
        loo_pit_ecdf=loo_pit_ecdf,
        unif_ecdf=unif_ecdf,
        p975=p975,
        p025=p025,
        fill_kwargs=fill_kwargs,
        ecdf_fill=ecdf_fill,
        use_hpd=use_hpd,
        x_vals=x_vals,
        unif_densities=unif_densities,
        hpd_kwargs=hpd_kwargs,
        n_unif=n_unif,
        unif=unif,
        plot_unif_kwargs=plot_unif_kwargs,
        loo_pit_kde=loo_pit_kde,
        xt_labelsize=xt_labelsize,
        legend=legend,
        credible_interval=credible_interval,
        plot_kwargs=plot_kwargs,
    )

    if backend == "bokeh":
        from .backends.bokeh.bokeh_loopitplot import _plot_loo_pit

        if (
            loo_pit_kwargs["hpd_kwargs"] is not None
            and "fill_kwargs" in loo_pit_kwargs["hpd_kwargs"]
            and loo_pit_kwargs["hpd_kwargs"]["fill_kwargs"] is not None
            and "label" in loo_pit_kwargs["hpd_kwargs"]["fill_kwargs"]
        ):
            loo_pit_kwargs["hpd_kwargs"]["fill_kwargs"].pop("label")
        loo_pit_kwargs.pop("legend")
        loo_pit_kwargs.pop("xt_labelsize")
        loo_pit_kwargs.pop("credible_interval")
        loo_pit_kwargs["show"] = show
        ax = _plot_loo_pit(**loo_pit_kwargs)  #  pylint: disable=unexpected-keyword-arg
    else:
        from .backends.matplotlib.mpl_loopitplot import _plot_loo_pit

        ax = _plot_loo_pit(**loo_pit_kwargs)

    return ax
Esempio n. 55
0
def decide_split_colours(split):
	return hsv_to_rgb((float(dataset_splits_dict[split]//2)/6, 1.0, 1.0))
Esempio n. 56
0
def visualize_phi(phi):
    phi = phi[:, :, np.newaxis]
    h, w = phi.shape[:2]
    hsv = np.concatenate([phi, np.ones([h, w, 1]), np.ones([h, w, 1])], axis=2)
    rgb = hsv_to_rgb(hsv)
    return rgb
Esempio n. 57
0
 def make_ori_map(xc):
     return colors.hsv_to_rgb(
         np.minimum(
             1,
             np.stack((np.angle(xc) / np.pi / 2 % 1, abs(xc), abs(xc)),
                      axis=-1)))
Esempio n. 58
0
                sat = 1
                value = 1
                if 0 < i < 499 and 0 < j < 499:
                    odl = 20  # 75.37
                    pra = np.array([odl, 0, -dane[i][j] + dane[i][j + 1]])
                    lew = np.array([-odl, 0, -dane[i][j] + dane[i][j - 1]])
                    gor = np.array([0, odl, -dane[i][j] + dane[i - 1][j]])
                    dol = np.array([0, -odl, -dane[i][j] + dane[i + 1][j]])
                    nor = np.cross(lew, gor) + np.cross(gor, pra) + np.cross(
                        pra, dol) + np.cross(dol, lew)
                    vecSlo = np.array([0, -0.447214, -0.894427])
                    kat = toCos(nor, vecSlo)

                    q = 0.5
                    if kat < 0.93:

                        value = kat**1.5
                    else:

                        s = q
                        sat = (1 - kat) / 0.07
                colorMap[i].append(colors.hsv_to_rgb((hue, sat, value)))
            else:
                colorMap[i].append((0, 0, 0))
            j += 1
        i += 1

    fig, ax = plt.subplots()
    plt.imshow(colorMap)
    plt.show()
Esempio n. 59
0
def hsv2Rgba(h, s, v, alpha):
    return np.append(hsv_to_rgb((h, s, v)) * 255.9, [1]) * alpha
Esempio n. 60
0
def disp_flow(flow):
    flow_ang, flow_mag = radial_flow(flow)
    flow_ang, flow_mag = normalize_flow(flow_ang, flow_mag)
    hsv = np.stack((flow_ang, flow_mag, np.ones_like(flow_ang)), 2)
    rgb = hsv_to_rgb(hsv)  # In range [0, 1]
    return rgb