예제 #1
0
def run_color(image, image_out):
    caffe.set_mode_cpu()
    net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST)

    (H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
    (H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
    net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature
    
    img_rgb = caffe.io.load_image(image)
    img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
    img_l = img_lab[:,:,0] # pull out L channel
    (H_orig,W_orig) = img_rgb.shape[:2] # original image size

    # resize image to network input size
    img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
    img_lab_rs = color.rgb2lab(img_rs)
    img_l_rs = img_lab_rs[:,:,0]

    net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
    net.forward() # run network

    ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
    ab_dec_us = sni.zoom(ab_dec,(1.*H_orig/H_out,1.*W_orig/W_out,1)) # upsample to match size of original image L
    img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
    img_rgb_out = np.clip(color.lab2rgb(img_lab_out),0,1) # convert back to rgb

    scipy.misc.imsave(image_out, img_rgb_out)
예제 #2
0
def applyNailPolish(x , y , r = Rg, g = Gg, b = Bg):
	val = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
	L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
	L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
	ll, aa, bb = L1 - L, A1 - A, B1 - B
	val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
	val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
	val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
	im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
예제 #3
0
def convertLAB(img):
    "convert an RGB img into LAB color space"
    if img.shape[2]==4:
        return rgb2lab(img[:,:,0:3])
    else:
        if img.shape[2]==3:
            return rgb2lab(img)
        else:
            print ("Image format not supported")
예제 #4
0
def process_pair(ref, recons):
    ref_lab = color.rgb2lab(decode_y4m_buffer(ref))
    recons_lab = color.rgb2lab(decode_y4m_buffer(recons))
    # "Color Image Quality Assessment Based on CIEDE2000"
    # Yang Yang, Jun Ming and Nenghai Yu, 2012
    # http://dx.doi.org/10.1155/2012/273723
    dE = color.deltaE_ciede2000(ref_lab, recons_lab, kL=0.65, kC=1.0, kH=4.0)
    scores.append(45. - 20. * np.log10(dE.mean()))
    print('%08d: %2.4f' % (ref.count, scores[-1]))
예제 #5
0
    def merge_images(self, img_a, img_b):
        i_a = skic.rgb2lab(img_a)
        i_b = skic.rgb2lab(img_b)

        norm_lum = np.max(np.asarray([i_a[..., 0], i_b[..., 0]]), axis=0)

        res_img = i_a.copy()
        res_img[..., 0] = norm_lum

        return skic.lab2rgb(res_img)
예제 #6
0
def applyBlushColor(r = Rg, g = Gg, b = Bg):
 global im
 val = color.rgb2lab((im/255.)).reshape(width*height, 3)
 L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
 L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
 ll, aa, bb = (L1 - L)*intensity, (A1 - A)*intensity, (B1 - B)*intensity
 val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
 val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
 val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
 im = color.lab2rgb(val.reshape(height, width, 3))*255
예제 #7
0
def get_train_data(img_file):
    image = img_to_array(load_img(img_file))
    image_shape = image.shape
    image = np.array(image, dtype=float)
    x = rgb2lab(1.0 / 255 * image)[:, :, 0]
    y = rgb2lab(1.0 / 255 * image)[:, :, 1:]
    y /= 128
    x = x.reshape(1, image_shape[0], image_shape[1], 1)
    y = y.reshape(1, image_shape[0], image_shape[1], 2)
    return x, y, image_shape
예제 #8
0
def apply_texture(x, y):
    xmin, ymin = amin(x), amin(y)
    X = (x - xmin).astype(int)
    Y = (y - ymin).astype(int)
    val1 = color.rgb2lab((text[X, Y] / 255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
    val2 = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
    L, A, B = mean(val2[:, 0]), mean(val2[:, 1]), mean(val2[:, 2])
    val2[:, 0] = np.clip(val2[:, 0] - L + val1[:, 0], 0, 100)
    val2[:, 1] = np.clip(val2[:, 1] - A + val1[:, 1], -127, 128)
    val2[:, 2] = np.clip(val2[:, 2] - B + val1[:, 2], -127, 128)
    im[x, y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
예제 #9
0
파일: linearize.py 프로젝트: axil/fusion
def b():
    from skimage import io, color
    print 'imported'
    rgb = io.imread('window_exp_1_1.jpg')
    print 'opened'
    lab = color.rgb2lab(rgb)
    print lab[0,0]
예제 #10
0
def compute_saliency(img):
    """
        Computes Boolean Map Saliency (BMS).
    """

    img_lab = rgb2lab(img)
    img_lab -= img_lab.min()
    img_lab /= img_lab.max()
    thresholds = np.arange(0, 1, 1.0 / N_THRESHOLDS)[1:]

    # compute boolean maps
    bool_maps = []
    for thresh in thresholds:
        img_lab_T = img_lab.transpose(2, 0, 1)
        img_thresh = (img_lab_T > thresh)
        bool_maps.extend(list(img_thresh))

    # compute mean attention map
    attn_map = np.zeros(img_lab.shape[:2], dtype=np.float)
    for bool_map in bool_maps:
        attn_map += activate_boolean_map(bool_map)
    attn_map /= N_THRESHOLDS

    # gaussian smoothing
    attn_map = cv2.GaussianBlur(attn_map, (0, 0), 3)

    # perform normalization
    norm = np.sqrt((attn_map**2).sum())
    attn_map /= norm
    attn_map /= attn_map.max() / 255

    return attn_map.astype(np.uint8)
예제 #11
0
def dominant_colors(image, num_colors, mask=None):
    """Reduce image colors to a representative set of a given size.

    Args:
        image (ndarray): BGR image of shape n x m x 3.
        num_colors (int): Number of colors to reduce to.
        mask (array_like, optional): Foreground mask. Defaults to None.

    Returns:
        list: The list of Color objects representing the most dominant colors in the image.

    """
    image = rgb2lab(image / 255.0)

    if mask is not None:
        data = image[mask > 250]
    else:
        data = np.reshape(image, (-1, 3))

    # kmeans algorithm has inherent randomness - result will not be exactly the same
    # every time. Fairly consistent with >= 30 iterations
    centroids, labels = kmeans2(data, num_colors, iter=30)
    counts = np.histogram(labels, bins=range(0, num_colors + 1), normed=True)[0]

    centroids_RGB = lab2rgb(centroids.reshape(-1, 1, 3))[:, 0, :] * 255.0
    colors = [Color(centroid, count) for centroid, count in zip(centroids_RGB, counts)]
    colors.sort(key=lambda color: np.mean(color.BGR))

    return colors
예제 #12
0
    def __init__(self, image_path):
        rgb = io.imread(image_path)
        self.lab = color.rgb2lab(rgb)
        self.im_shp = self.lab.shape
        self.r_image = np.zeros((self.im_shp[0], self.im_shp[1]-1))

        pass
예제 #13
0
def snap_ab(input_l, input_rgb, return_type='rgb'):
    ''' given an input lightness and rgb, snap the color into a region where l,a,b is in-gamut
    '''
    T = 20
    warnings.filterwarnings("ignore")
    input_lab = rgb2lab_1d(np.array(input_rgb))  # convert input to lab
    conv_lab = input_lab.copy()  # keep ab from input
    for t in range(T):
        conv_lab[0] = input_l  # overwrite input l with input ab
        old_lab = conv_lab
        tmp_rgb = color.lab2rgb(conv_lab[np.newaxis, np.newaxis, :]).flatten()
        tmp_rgb = np.clip(tmp_rgb, 0, 1)
        conv_lab = color.rgb2lab(tmp_rgb[np.newaxis, np.newaxis, :]).flatten()
        dif_lab = np.sum(np.abs(conv_lab-old_lab))
        if dif_lab < 1:
            break
        # print(conv_lab)

    conv_rgb_ingamut = lab2rgb_1d(conv_lab, clip=True, dtype='uint8')
    if (return_type == 'rgb'):
        return conv_rgb_ingamut

    elif(return_type == 'lab'):
        conv_lab_ingamut = rgb2lab_1d(conv_rgb_ingamut)
        return conv_lab_ingamut
예제 #14
0
def call_color_sig(path_to_image):

    size = 360
    nclusters = 15

    image_array = io.imread(path_to_image)
    image_array = transform.resize(image_array,(size,size), \
    mode='nearest')
    image_array = color.rgb2lab(image_array)
    image_array = image_array.reshape(-1,3)

    k_means = cluster.MiniBatchKMeans(nclusters,init='k-means++',n_init=1,\
              max_iter=300,tol=0.01,random_state=451792)
    k_means.fit(image_array)
    centers = k_means.cluster_centers_.squeeze()
    labels = k_means.labels_

    pixels_tot = 0
    pixels_loc = np.empty((nclusters,1),dtype=int)
    for index in np.arange(0,nclusters):
        pixels_loc[index] = np.sum((labels == index).astype('int'))
        pixels_tot += pixels_loc[index]

    weights = pixels_loc.astype('float')/pixels_tot

    #print "Total number of pixels ", pixels_tot
    signature = \
    np.concatenate((weights,centers),axis=1).T.flatten()

    return signature
예제 #15
0
    def _convert_colorspace(self,image,colorspace,blur=False, sigma=3):
        """ 
        INPUT:
                image: The image to be converted to the specified colorspace, should have shape=(image_x,image_y,3)
                        the input colorspace should be RGB mapped between [0 and 1], (it will return the same image if colorspace is set to RGB)
                colorspace: the colorspace that the images will be put in;
                        'CIELab' for CIELab colorspace
                        'CIEL*a*b*' for the mapped CIELab colorspace (by function remap_CIELab in NNPreprocessor)
                        'RGB' for rgb mapped between [0 and 1]
                        'YCbCr' for YCbCr
                        'HSV' for HSV
                blur: Blur the target output of the image if True.
                        Supported colorspaces:
                            'CIELab'
                            'CIEL*a*b*'
                            'HSV'
                            'YUV'
        OUTPUT:
                The image converted to the specified colorspace of shape=(image_x,image_y,3)
        """

        # Convert to CIELab
        if ( (colorspace == 'CIELab') or (colorspace == 'CIEL*a*b*') ):
            # This converts the rgb to XYZ where:
            # X is in [0, 95.05]
            # Y is in [0, 100]
            # Z is in [0, 108.9]
            # Then from XYZ to CIELab where: (DOES DEPEND ON THE WHITEPOINT!, here for default)
            # L is in [0, 100]
            # a is in [-431.034,  431.034] --> [-500*(1-16/116), 500*(1-16/116)]
            # b is in [-172.41379, 172.41379] --> [-200*(1-16/116), 200*(1-16/116)]
            image = color.rgb2lab(image)

        return image
예제 #16
0
	def run(self, im, skin_thresh=[-1,1], n_peaks=3):
		'''
		im : color image
		'''
		im_skin = rgb2lab(im.astype(np.int16))[:,:,2]
		self.im_skin = im_skin
		# im_skin = skimage.exposure.equalize_hist(im_skin)
		# im_skin = skimage.exposure.rescale_intensity(im_skin, out_range=[0,1])
		im_skin *= im_skin > skin_thresh[0]
		im_skin *= im_skin < skin_thresh[1]

		skin_match_c = nd.correlate(-im_skin, self.hand_template)
		self.skin_match = skin_match_c

		# Display Predictions - Color Based matching
		optima = peak_local_max(skin_match_c, min_distance=20, num_peaks=n_peaks, exclude_border=False)
		# Visualize
		if len(optima) > 0:
			optima_values = skin_match_c[optima[:,0], optima[:,1]]
			optima_thresh = np.max(optima_values) / 2
			optima = optima.tolist()

			for i,o in enumerate(optima):
				if optima_values[i] < optima_thresh:
					optima.pop(i)
					break
		self.markers = optima

		return self.markers
예제 #17
0
def LAB(img, k, filename):
    # print 'lab'
    # restructure image pixel values into range from 0 to 1 - needed for library
    img = img * 1.0 / MAX_COLOR_VAL

    # convert rgb to LAB
    pixels_lab = color.rgb2lab(img)
    # remove the L channel
    L = pixels_lab[:, :, 0]

    # reshape, cluster, and retrieve quantized values
    pixels_l = np.reshape(L, (L.shape[0] * L.shape[1], 1))
    clustered = cluster_pixels(pixels_l, k, (L.shape[0], L.shape[1]))
    pixels_lab[:, :, 0] = clustered[:, :, 0]

    # convert result to 255 RGB space
    quanted_img = color.lab2rgb(pixels_lab) * MAX_COLOR_VAL
    quanted_img = quanted_img.astype('uint8')

    fig = plt.figure(1)
    plt.imshow(quanted_img)
    plt.title("LAB quantization where k is " + str(k))
    plt.savefig('Q2/' + filename + '_LAB.png')
    plt.close(fig)
    return quanted_img
예제 #18
0
    def __init__(self, svbrdf):
        super().__init__(_load_shader('default.vert.glsl'),
                         _load_shader('svbrdf_colortransfer.frag.glsl'),
                         has_texture=True)
        from skimage import color
        print('Converting diffuse map to Lab')
        diff_map_lab = np.clip(svbrdf.diffuse_map, 0, 1)
        diff_map_lab = color.rgb2lab(diff_map_lab).astype(dtype=np.float32)
        self.diff_map_mean = diff_map_lab.mean(axis=(0, 1))
        self.diff_map_std = diff_map_lab.std(axis=(0, 1))
        diff_map_lab = (diff_map_lab - self.diff_map_mean) / self.diff_map_std
        self.spec_scale = 1
        self.spec_shape_scale = 1

        self.alpha = svbrdf.alpha
        self.diff_map = Texture2D(diff_map_lab,
                                  interpolation='linear',
                                  wrapping='repeat',
                                  internalformat='rgb32f')
        self.spec_map = Texture2D(svbrdf.specular_map,
                                  interpolation='linear',
                                  wrapping='repeat',
                                  internalformat='rgb32f')
        self.spec_shape_map = Texture2D(svbrdf.spec_shape_map,
                                        wrapping='repeat',
                                        internalformat='rgb32f')
        self.normal_map = Texture2D(svbrdf.normal_map,
                                    interpolation='linear',
                                    wrapping='repeat',
                                    internalformat='rgb32f')
예제 #19
0
def color2gray(image, itns):

  global width
  global height
  global lab

  width = image.shape[1]
  height = image.shape[0]
 
 # Convert rgb to lab color space
  lab = color.rgb2lab(image);

  g0 = lab[:, :, 0]
  g0 = g0.astype(np.uint8)
  g0 = g0.flatten()

  # Solve Least square Optimization
  res = minimize(objective, g0, method='BFGS', jac=objective_der, options={'maxiter':itns, 'disp': True})
  
  output = res.x.reshape(height, width)
  output = output.astype(np.uint8)

  output += 50

  return output
예제 #20
0
def plot_lstar(colors, ax, xoffset=0, show=False):
    """Plot the color map in the ab plane of the L*a*b* color space

    Args:
        colors (Colormap or ndarray): colors as either a matplotlib
            Colormap or an Nx3 or Nx4 ndarray or rgb(a) data
        ax: matplotlib axes
        xoffset (int): number to add to the x axis, useful when
            plotting on an axis with other lstar data
        show (bool): call ax.figure.show() before returning
        **kwargs: passed to make_palette
    """
    from skimage.color import rgb2lab
    cmap = to_linear_cmap('null', colors)
    rgba = to_rgba(colors)
    rgb = rgba[:, :3]
    l = rgb2lab(rgb.reshape((-1, 1, 1, 3)))[:, 0, 0, 0]
    x = np.arange(len(l))
    ax.scatter(x + xoffset, l, c=x, s=25, cmap=cmap, linewidths=0.0)
    ax.set_ylabel("L$^*$")

    # # find and plot the density of levels
    # dli = 1.0 / (l[1:] - l[:-1])
    # dli = 100.0 * (dli - np.min(dli)) / np.max(dli - np.min(dli))
    # ax.plot(x[:-1], dli, 'k-')
    if show:
        ax.figure.show()
예제 #21
0
def plot_color_gradients(cmap_category, cmap_list):
    fig, axes = plt.subplots(nrows=nrows, ncols=2)
    fig.subplots_adjust(top=0.95, bottom=0.01, left=0.2, right=0.99, wspace=0.05)
    fig.suptitle(cmap_category + ' colormaps', fontsize=14, y=1.0, x=0.6)

    for ax, name in zip(axes, cmap_list):

        # Get rgb values for colormap
        rgb = cm.get_cmap(plt.get_cmap(name))(x)[np.newaxis,:,:3]

        # Get colormap in CIE LAB. We want the L here.
        lab = color.rgb2lab(rgb)
        L = lab[0,:,0]
        L = np.float32(np.vstack((L, L, L)))

        ax[0].imshow(gradient, aspect='auto', cmap=plt.get_cmap(name))
        ax[1].imshow(L, aspect='auto', cmap='binary_r', vmin=0., vmax=100.)
        pos = list(ax[0].get_position().bounds)
        x_text = pos[0] - 0.01
        y_text = pos[1] + pos[3]/2.
        fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)

    # Turn off *all* ticks & spines, not just the ones with colormaps.
    for ax in axes:
        ax[0].set_axis_off()
        ax[1].set_axis_off()
예제 #22
0
def add_lab_stats(pic, sp_mask, features):
    lab = color.rgb2lab(pic)
    layered = to_layered(lab)
    add_statistics(layered[0], sp_mask, features, 'lab_l')
    add_statistics(layered[1], sp_mask, features, 'lab_a')
    add_statistics(layered[2], sp_mask, features, 'lab_b')
    return
예제 #23
0
 def test_rgb_lch_roundtrip(self):
     rgb = img_as_float(self.img_rgb)
     lab = rgb2lab(rgb)
     lch = lab2lch(lab)
     lab2 = lch2lab(lch)
     rgb2 = lab2rgb(lab2)
     assert_array_almost_equal(rgb, rgb2)
예제 #24
0
파일: FH.py 프로젝트: haohao200609/Hybrid
def FH_Seg(filename,labelMask,minSize,c):
    rgbImg = io.imread(filename)
    width=rgbImg.shape[1]
    height=rgbImg.shape[0]
    labImg = color.rgb2lab(rgbImg)
    smooth_img=filter.gaussian_filter(labImg,sigma=0.8,multichannel=True)
    labelList=unique(labelMask)
    numSeg=len(labelList)
    pixelSet=UnionSet(width*height)
    threshold=[1.0/c]*(width*height)
    for currLabel in labelList:
        edgeList=creatEdgeList(smooth_img,labelMask,currLabel)
        segment_graph(edgeList,pixelSet,threshold,c,minSize)

    """
    现在已经有了pixelSet,接下来把每个labelMask标记成root的index,之后,用uniqueList找到对应的独立
    item,然后再用item的index(0:segNum)代替label中的不规则编号
    """
    segNum=pixelSet.setNum()
    mySegNum=0
    for i in range(width*height):
        if i==pixelSet.find(i):
            pixelSet.setNodeLabel(i,mySegNum)
            mySegNum+=1

    FH_Label=numpy.zeros((height,width))
    for y in range(height):
        for x in range(width):
            index=y*width+x
            FH_Label[y,x]=pixelSet.segLabel(index)

    return FH_Label
예제 #25
0
    def generate_features(self):
        # prepare variables
        img_lab = rgb2lab(self._img)
        segments = slic(img_lab, n_segments=500, compactness=30.0, convert2lab=False)
        max_segments = segments.max() + 1

        # create x,y feather
        shape = self._img.shape
        a = shape[0]
        b = shape[1]
        x_axis = np.linspace(0, b - 1, num=b)
        y_axis = np.linspace(0, a - 1, num=a)

        x_coordinate = np.tile(x_axis, (a, 1,))  # 创建X轴的坐标表
        y_coordinate = np.tile(y_axis, (b, 1,))  # 创建y轴的坐标表
        y_coordinate = np.transpose(y_coordinate)

        coordinate_segments_mean = np.zeros((max_segments, 2))

        # create lab feather
        img_l = img_lab[:, :, 0]
        img_a = img_lab[:, :, 1]
        img_b = img_lab[:, :, 2]

        img_segments_mean = np.zeros((max_segments, 3))

        for i in xrange(max_segments):
            segments_i = segments == i

            coordinate_segments_mean[i, 0] = x_coordinate[segments_i].mean()
            coordinate_segments_mean[i, 1] = y_coordinate[segments_i].mean()

            img_segments_mean[i, 0] = img_l[segments_i].mean()
            img_segments_mean[i, 1] = img_a[segments_i].mean()
            img_segments_mean[i, 2] = img_b[segments_i].mean()

        # element distribution
        wc_ij = np.exp(-cdist(img_segments_mean, img_segments_mean) ** 2 / (2 * self._sigma_distribution ** 2))
        wc_ij = wc_ij / wc_ij.sum(axis=1)[:, None]
        mu_i = np.dot(wc_ij, coordinate_segments_mean)
        distribution = np.dot(wc_ij, np.linalg.norm(coordinate_segments_mean - mu_i, axis=1) ** 2)
        distribution = normalize(distribution)
        distribution = np.array([distribution]).T

        # element uniqueness feature
        wp_ij = np.exp(
            -cdist(coordinate_segments_mean, coordinate_segments_mean) ** 2 / (2 * self._sigma_uniqueness ** 2))
        wp_ij = wp_ij / wp_ij.sum(axis=1)[:, None]
        uniqueness = np.sum(cdist(img_segments_mean, img_segments_mean) ** 2 * wp_ij, axis=1)
        uniqueness = normalize(uniqueness)
        uniqueness = np.array([uniqueness]).T

        # save features and variables
        self.img_lab = img_lab
        self.segments = segments
        self.img_segments_mean = img_segments_mean
        self.coordinate_segments_mean = coordinate_segments_mean
        self.uniqueness = uniqueness
        self.distribution = distribution
예제 #26
0
파일: AP.py 프로젝트: w4k2/HSSA
    def computeChannels(self):
        channelNames = []
        # First, the colors
        # red
        filterLength = len(self.epf.filter)

        rgb = np.dstack((
            np.mean(self.cube[:,:,xrange(
                0,
                1 * filterLength / 3)], axis = 2),
            np.mean(self.cube[:,:,xrange(
                1 * filterLength / 3,
                2 * filterLength / 3)], axis = 2),
            np.mean(self.cube[:,:,xrange(
                2 * filterLength / 3,
                3 * filterLength / 3)], axis = 2)))
        channelNames.extend(['red', 'green', 'blue'])

        # Later, CIELab conversion
        cie = color.rgb2lab(rgb)
        channelNames.extend(['CIE X', 'CIE Y', 'CIE Z'])

        #quartiles
        q1 = np.percentile(self.cube, 25, axis = 2)
        median = np.median(self.cube, axis = 2)
        q3 = np.percentile(self.cube, 75, axis = 2)
        interquartileRange = q3 - q1
        channelNames.extend(['1st quartile', 'median', '3rd quartile', 'Interquartile range'])

        # Regular stats
        # minimum
        minimum = np.amin(self.cube, axis = 2)
        channelNames.append('minimum')
        # maximum
        maximum = np.amax(self.cube, axis = 2)
        channelNames.append('maximum')
        # mean
        mean = np.mean(self.cube, axis = 2)
        channelNames.append('mean')
        # stdDev
        std = np.std(self.cube, axis = 2)
        channelNames.append('standard deviation')
        # variance
        var = np.var(self.cube, axis = 2)
        channelNames.append('variance')

        # Collecting
        channels = np.dstack((
            rgb, cie,
            q1, median, q3,
            interquartileRange,
            minimum,
            maximum,
            mean,
            std,
            var
        ))

        return (channels, channelNames)
예제 #27
0
def colors_peripheral_vs_central(image_roi, attrs={}, debug=False):
    image_roi, center = pad_for_rotation(image_roi)
    lesion_mask = image_roi[..., 3]

    goal = lesion_mask.sum() * 0.7
    inner = lesion_mask.copy()
    while inner.sum() > goal:
        inner = binary_erosion(inner, disk(1))
    outer = np.logical_and(lesion_mask, np.logical_not(inner))

    if debug:
        print """\
=== Colors Peripheral vs Central ===
lesion area: %d
inner goal: %d
inner area: %d
outer area: %d
""" % (lesion_mask.sum(), goal, inner.sum(), outer.sum())

    if debug:
        plt.subplot(131)
        plt.imshow(lesion_mask)
        plt.subplot(132)
        plt.imshow(inner)
        plt.subplot(133)
        plt.imshow(outer)
        plt.show()

    outer = np.nonzero(outer)
    inner = np.nonzero(inner)

    image_lab = rgb2lab(image_roi[..., :3])
    L, a, b = np.dsplit(image_lab, 3)

    delta_L = np.mean(L[outer]) - np.mean(L[inner])
    delta_a = np.mean(a[outer]) - np.mean(a[inner])
    delta_b = np.mean(b[outer]) - np.mean(b[inner])

    density_L = (
        np.histogram(L[outer], 100, (0.,100.), density=True)[0] *
        np.histogram(L[inner], 100, (0.,100.), density=True)[0]
    ).sum()
    density_a = (
        np.histogram(a[outer], 254, (-127.,127.), density=True)[0] *
        np.histogram(a[inner], 254, (-127.,127.), density=True)[0]
    ).sum()
    density_b = (
        np.histogram(b[outer], 254, (-127.,127.), density=True)[0] *
        np.histogram(b[inner], 254, (-127.,127.), density=True)[0]
    ).sum()

    attrs.update([
        ('Colors PvsC mean difference L', delta_L),
        ('Colors PvsC mean difference a', delta_a),
        ('Colors PvsC mean difference b', delta_b),
        ('Colors PvsC density baysian L', density_L),
        ('Colors PvsC density baysian a', density_a),
        ('Colors PvsC density baysian b', density_b),
    ])
예제 #28
0
def predictIMG(filen):    
    window_size=21    
    
    imgtest=plt.imread(folder_path+'images/'+filen)
    

    img_lab=color.rgb2lab(imgtest)#img/255.#
    ##range CIELAB = {L in [0, 100], A in [-86.185, 98.254], B in [-107.863, 94.482]}
    ###normalise it to have range [0,1]
    img_lab[:,:,0]=newRange(img_lab[:,:,0],0.,1.,0.,100.)
    img_lab[:,:,1]=newRange(img_lab[:,:,1],0.,1.,-86.185,98.254)
    img_lab[:,:,2]=newRange(img_lab[:,:,2],0.,1.,-107.863,94.482)

    
    
    height,width,channel=numpy.shape(img_lab)

    mat=sio.loadmat(folder_path+'features/'+filen[:-4]+'.mat')
    feat_vect=mat['cnn']  

    predict=Categoriser.predict_proba(feat_vect)
    ipl_list=list()
    for a in predict:
        ipl_list.append(a[0,1])
        
    ILP=numpy.array(ipl_list)
    
    ##soften the prior by alpha [1...5]
    alpha=1
    ILP=ILP**alpha
    
    predicted_imgILP=numpy.zeros(shape=numpy.shape(imgtest))  
    
    ##pad image
    img_lab=padded_image(img_lab)
 
    for c in range(0,width-(window_size)+20):
        for r in range(0,height-(window_size)+20):

            
            img_c=c
            img_r=r
 
            predict=numpy.zeros(shape=(len(tree),num_class))
            for t in range(len(tree)):
                val=predictSTF_tree.predict(tree[t],img_lab[r:r+window_size,c:c+window_size,:])

                predict[t,:]=val/numpy.sum(val,axis=1)
            
            #print predict            
            STF_ILP=numpy.mean(predict,axis=0)*ILP            
            predict_classILP=STF_ILP.argmax()
                           
            predicted_imgILP[img_r,img_c,0]=class_list[predict_classILP][0]/255.
            predicted_imgILP[img_r,img_c,1]=class_list[predict_classILP][1]/255.
            predicted_imgILP[img_r,img_c,2]=class_list[predict_classILP][2]/255.
            

    plt.imsave(folder_path+'results/{0}.png'.format(filen[:-4]),predicted_imgILP)
    def read_image(self, image_file):
        # self.result = None
        self.image_loaded = True
        self.image_file = image_file
        print(image_file)
        im_bgr = cv2.imread(image_file)
        self.im_full = im_bgr.copy()
        # get image for display
        h, w, c = self.im_full.shape
        max_width = max(h, w)
        r = self.win_size / float(max_width)
        self.scale = float(self.win_size) / self.load_size
        print('scale = %f' % self.scale)
        rw = int(round(r * w / 4.0) * 4)
        rh = int(round(r * h / 4.0) * 4)

        self.im_win = cv2.resize(self.im_full, (rw, rh), interpolation=cv2.INTER_CUBIC)

        self.dw = int((self.win_size - rw) // 2)
        self.dh = int((self.win_size - rh) // 2)
        self.win_w = rw
        self.win_h = rh
        self.uiControl.setImageSize((rw, rh))
        im_gray = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2GRAY)
        self.im_gray3 = cv2.cvtColor(im_gray, cv2.COLOR_GRAY2BGR)

        self.gray_win = cv2.resize(self.im_gray3, (rw, rh), interpolation=cv2.INTER_CUBIC)
        im_bgr = cv2.resize(im_bgr, (self.load_size, self.load_size), interpolation=cv2.INTER_CUBIC)
        self.im_rgb = cv2.cvtColor(im_bgr, cv2.COLOR_BGR2RGB)
        lab_win = color.rgb2lab(self.im_win[:, :, ::-1])

        self.im_lab = color.rgb2lab(im_bgr[:, :, ::-1])
        self.im_l = self.im_lab[:, :, 0]
        self.l_win = lab_win[:, :, 0]
        self.im_ab = self.im_lab[:, :, 1:]
        self.im_size = self.im_rgb.shape[0:2]

        self.im_ab0 = np.zeros((2, self.load_size, self.load_size))
        self.im_mask0 = np.zeros((1, self.load_size, self.load_size))
        self.brushWidth = 2 * self.scale

        self.model.load_image(image_file)

        if (self.dist_model is not None):
            self.dist_model.set_image(self.im_rgb)
            self.predict_color()
예제 #30
0
파일: slic.py 프로젝트: hagisgit/SLIC
 def sync_buffers_from_rgb(self):
     rgb = self.rgb.value
     lab = color.rgb2lab(rgb[:,:,:3])
     lab_ = np.zeros((lab.shape[0], lab.shape[1], 4), dtype=np.float64)
     lab_[:,:,:3] = lab[:,:,:3]
     if not self.label_map_np == None:
         lab_[:,:,3] = self.label_map_np[:,:]
     self.lab.value = lab_              
예제 #31
0
def run():
    while (cap.isOpened()):
        # Capture frame-by-frame
        ret, frame = cap.read()
        if ret:
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            faces = faceCascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags = cv2.CASCADE_SCALE_IMAGE
            )

            #print("Found {0} faces!".format(len(faces)))


            #forehead = None
            L = 0
            A = 0
            B = 0

            for (x, y, w, h) in faces:
                rows = 0
                cols = 0

                cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
                # gets forehead from image and turns into lab

                # MIDDLE FACE REGION
                forehead = frame[y:y+150, x:x+w]
                rgb_forehead = cv2.cvtColor(forehead, cv2.COLOR_BGR2RGB)
                lab_forehead = color.rgb2lab(rgb_forehead)
                #
                rows = len(lab_forehead)
                cols = len(lab_forehead[0])
                for i in range(rows):
                    for j in range(cols):
                        #for w in range(len(lab_forehead[0][0])):
                        #    print(lab_forehead[i][j][w])
                        L += lab_forehead[i][j][0]
                        A += lab_forehead[i][j][1]
                        B += lab_forehead[i][j][2]
                numpix = rows*cols
                L = L/(numpix)
                A = A/(numpix)
                B = B/(numpix)

                # print("Lightness: ", L)
                # print("green-red: ", A)
                # print("blue-yellow: ", B)
            print(L)
            print(A)
            print(B)
            print("\n")

                # do we want L ??
                # # gets left cheek from image and turns into lab
                # left_cheek = image[y+150:y+h, x:x+100]
                # rgb_left_cheek = cv2.cvtColor(left_cheek, cv2.COLOR_BGR2RGB)
                # lab_left_cheek = color.rgb2lab(rgb_left_cheek)
                # # gets right cheek from image and turns into lab
                # right_cheek = image[y+150:y+h, x+180:x+w]
                # rgb_right_cheek = cv2.cvtColor(right_cheek, cv2.COLOR_BGR2RGB)
                # lab_right_cheek = color.rgb2lab(rgb_right_cheek)

            cv2.imshow('Video', frame)

            #print(forehead)



            # Press Q on keyboard to  exit
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
        # Break the loop
        else:
            break
예제 #32
0
파일: script.py 프로젝트: Devotron/TPT-ML
def image_a_b_gen(batch_size):
    for batch in datagen.flow(Xtrain, batch_size=batch_size):
        lab_batch = rgb2lab(batch)
        X_batch = lab_batch[:, :, :, 0]
        Y_batch = lab_batch[:, :, :, 1:] / 128
        yield (X_batch.reshape(X_batch.shape + (1, )), Y_batch)
예제 #33
0
net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST)

(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature


# Loading the image

start = '00000001'
finish = '00004522'
for current_frame in range(int(start),int(finish)):
    print current_frame
    img_rgb = caffe.io.load_image('./png_frames/'+str(current_frame).zfill(8)+'.png')
    # img_rgb = caffe.io.load_image('./imgs/ILSVRC2012_val_00041580.JPEG')
    img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
    img_l = img_lab[:,:,0] # pull out L channel
    (H_orig,W_orig) = img_rgb.shape[:2] # original image size


    # resize image to network input size
    img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
    img_lab_rs = color.rgb2lab(img_rs)
    img_l_rs = img_lab_rs[:,:,0]

    net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
    net.forward() # run network

    ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
    ab_dec_us = sni.zoom(ab_dec,(1.*H_orig/H_out,1.*W_orig/W_out,1)) # upsample to match size of original image L
    img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
예제 #34
0
def to_color_space(image):
    return color.rgb2lab(image)
예제 #35
0
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing import image

img_path = 'bnw.jpg'

reconstructed_model = tf.keras.models.load_model(
    "ImageColorization/trained_models_v1/Autoencoder-epoch-95-loss-0.003109.hdf5"
)

img = image.img_to_array(image.load_img(img_path))
h, w = img.shape[0], img.shape[1]
print(h, w)
plt.imshow(rgb2lab(img / 255.0)[:, :, 0])
plt.show()

img_color = []
img_resize = image.img_to_array(
    image.load_img(img_path, target_size=(256, 256, 3)))
img_color.append(img_resize)
img_color = np.array(img_color, dtype=float)
# print(img_color.shape)
img_color = rgb2lab(img_color / 255.0)[:, :, :, 0]
# print(img_color.shape)
img_color = img_color.reshape(img_color.shape + (1, ))
# print(img_color.shape)

output = reconstructed_model.predict(img_color)
# print(output.shape)
def split_img_to_l_ab(img):
    lab = color.rgb2lab(img)
    img_l = lab[:, :, 0] / L_RANGE
    img_ab = lab[:, :, 1:] / AB_RANGE

    return img_l, img_ab
예제 #37
0
def rgb_to_lab(rgb):
    lab = color.rgb2lab(rgb)
    return lab  # L: 0~100
def reshape_and_convert(X):
    X = X.reshape(1, -1, 3)
    X = rgb2lab(X)
    return X.reshape(-1, 3)
예제 #39
0
    print('first train')
    model.compile(optimizer='rmsprop', loss='mse')
model.fit_generator(image_a_b_gen(batch_size), epochs=EPOCHS, steps_per_epoch=1)
# model.fit_generator(generate_arrays_from_path(batch_size),epochs=EPOCHS,steps_per_epoch=1)
model.save(weight_file)

model = load_model(weight_file)
color_me = []
temp = []
for filename in os.listdir('./Test/'):
    temp = img_to_array(load_img('./Test/' + filename))
    temp = np.resize(temp, (512, 512, 3))
    color_me.append(temp)

color_me = np.array(color_me, dtype=float)
gray_me = gray2rgb(rgb2gray(1.0 / 255 * color_me))
color_me_embed = create_inception_embedding(gray_me)
color_me = rgb2lab(1.0 / 255 * color_me)[:, :, :, 0]
color_me = color_me.reshape(color_me.shape + (1,))

# Test model
output = model.predict([color_me, color_me_embed])
output = output * 128

# Output colorizations
for i in range(len(output)):
    cur = np.zeros((512, 512, 3))
    cur[:, :, 0] = color_me[i][:, :, 0]
    cur[:, :, 1:] = output[i]
    imsave("result/img_" + str(i) + ".png", lab2rgb(cur))
예제 #40
0
def rgb2lab(in_img, mean_cent=False):
    from skimage import color
    img_lab = color.rgb2lab(in_img)
    if (mean_cent):
        img_lab[:, :, 0] = img_lab[:, :, 0] - 50
    return img_lab
예제 #41
0
def rgb2lab(input):
    from skimage import color
    return color.rgb2lab(input / 255.)
예제 #42
0
 def _get_lab0(self):
     rgb = img_as_float(self.img_rgb[:1, :1, :])
     return rgb2lab(rgb)[0, 0, :]
예제 #43
0
 def test_lab_lch_roundtrip(self):
     rgb = img_as_float(self.img_rgb)
     lab = rgb2lab(rgb)
     lab2 = lch2lab(lab2lch(lab))
     assert_array_almost_equal(lab2, lab)
예제 #44
0
def get_index(in_data):
    """
    Returns a quantised image with the ab color closest in gamut
    """
    expand_in_data = np.expand_dims(in_data, axis=1)
    distance = np.sum(np.square(expand_in_data - points), axis=2)
    return np.argmin(distance, axis=1)


for num, img_f in enumerate(filename_lists):
    img = imread(img_f)
    img = resize(img, (256, 256), preserve_range=True)

    # Make sure the image is rgb format
    if len(img.shape) != 3 or img.shape[2] != 3:
        continue
    img_lab = color.rgb2lab(img)  #[H, W, 3]
    img_lab = img_lab.reshape((-1, 3))  #[H*W, 3]

    img_ab = img_lab[:, 1:].astype(np.float64)  #[H*W, 2]

    nd_index = get_index(img_ab)
    for i in nd_index:
        i = int(i)
        probs[i] += 1
    print(num)

# Normalise the probability
probs = probs / np.sum(probs)  #[313,]
filename = 'prior_probs.npy'
np.save(filename, probs)
예제 #45
0
파일: script.py 프로젝트: Devotron/TPT-ML
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()

# In[ ]:

# Save model
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
model.save_weights("model.h5")

# In[ ]:

# Test images
Xtest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 0]
Xtest = Xtest.reshape(Xtest.shape + (1, ))
Ytest = rgb2lab(1.0 / 255 * X[split:])[:, :, :, 1:]
Ytest = Ytest / 128
print(model.evaluate(Xtest, Ytest, batch_size=batch_size))

# In[ ]:

color_me = []
for filename in os.listdir(test_dataset_path):
    color_me.append(img_to_array(load_img(test_dataset_path + filename)))
color_me = np.array(color_me, dtype=float)
color_me = rgb2lab(1.0 / 255 * color_me)[:, :, :, 0]
color_me = color_me.reshape(color_me.shape + (1, ))

# Test model
예제 #46
0
 def test_lab_rgb_roundtrip(self):
     img_rgb = img_as_float(self.img_rgb)
     assert_array_almost_equal(lab2rgb(rgb2lab(img_rgb)), img_rgb)
예제 #47
0
def process_imgs(x, n):
    aux = color.rgb2lab(x[:n])
    res_x = aux[:n, :32, :32, 0].reshape(n, 32, 32, 1)
    res_y = aux[:n, :32, :32, 1:]
    return res_x, res_y
예제 #48
0
def calc_RMSE(real_img, fake_img):
    # convert to LAB color space
    real_lab = rgb2lab(real_img)
    fake_lab = rgb2lab(fake_img)
    return real_lab - fake_lab
예제 #49
0
파일: Lab.py 프로젝트: SamiaKabir/Pytorch

### Main training loop
for epoch in range(5):
    
    for i, data in enumerate(trainloader, 0):
        
		## Getting the input and the target from the training set
        image, dummy = data


        for j in range(len(image)):
            rgb=image[j]
            rgb=np.array(rgb.data)
            rgb=np.transpose(rgb)
            lab= color.rgb2lab(rgb)
            lab=np.transpose(lab)
            lab=torch.from_numpy(lab)
            lab[:,:,0:1]=lab[:,:,0:1]/100
            # lab=lab.transpose(0,2)
            image[j].data=lab[:,:,:]

        # print(lab)
        # print(image.data)
        #set ground truth before downsampling
        target = image[:,0:1,:,:]    

        # preparing the data by downsampling and then upsampling
        image1=torch.nn.functional.interpolate(image,size=48,mode='bilinear')
        image2=torch.nn.functional.interpolate(image1,size=96,mode='bilinear')
    
예제 #50
0
def performExperiment(eng, strExperimentName, dcCorpus):
    """
        Run a experiment
        
        Parameters
        ----
        eng: matlab engine instance.
        strExperimentName: experiment name.
        strRefImgPath: file path of reference image.
        dcCorpus: a dictionary of experiment data.
        
        
    """
       
    dcResult = {} # result of all sets 
    for strSetName, dcSetData in dcCorpus.iteritems():
        print("Comparing img set: %s..." % strSetName)
        
        strRefImgPath = dcSetData[REF]
        lsCompImg = dcSetData[CMP]
        
        # load ref image
        arrRefImg = io.imread(strRefImgPath)
        arrRefImg_lab = color.rgb2lab(arrRefImg)
        
        dcSetResult = {} # set result
        for i, strCompImgPath in enumerate(lsCompImg):
            try:
                print("-->img: %s." % strCompImgPath)
                arrCompImg = io.imread(strCompImgPath)
                arrCompImg_lab = color.rgb2lab(arrCompImg)
                
                # PSNR
                print("computing PSNR...")
                dPSNR = measure.compare_psnr(arrRefImg, arrCompImg)
                
                # qssim
                print("computing QSSIM...")
                dQSSIM = eng.qssim(strRefImgPath, strCompImgPath)
                
                # Color difference
                print("computing CD...")
                dCD = computeColorDiff(arrRefImg_lab, arrCompImg_lab)
        
                
                # FISMc
                print("computing FISMc...")
                dFISM, dFISMc = eng.FeatureSIM(strRefImgPath, strCompImgPath,\
                                               nargout=2)
            
                # add to result
                dcCompResult = {PSNR: dPSNR,
                                QSSIM: dQSSIM,
                                COLOR_DIFF: dCD,
                                FISMC: dFISMc}
                dcSetResult[strCompImgPath] = dcCompResult
                print dcCompResult
            except Exception as e:
                print(e.strerror)
        # statistics of set result
        dfSetResult = pd.DataFrame.from_dict(dcSetResult, orient='index')
        dfSetResult.to_csv('../../data/evaluation/temp_result/%s_%s.csv' % \
                           (strExperimentName, strSetName) )
        
        srSetMean = dfSetResult.mean()
        srSetStd = dfSetResult.std()
        srSetStd.rename(lambda x: x+"_"+STD, inplace=True)
        dcResult[strSetName] = srSetMean.append(srSetStd)
        
    
    dfResult = pd.DataFrame.from_dict(dcResult, orient='index')
    print("Experiment is finished.")
    return dfResult
예제 #51
0
cedgedir = '../data/example/coloredge'
cannydir = '../data/example/cannyedge'
unsharpdir = '../data/example/unsharp'
bilateraldir = '../data/example/bilateral'

make_dir(edgedir)
make_dir(cedgedir)
make_dir(cannydir)
make_dir(unsharpdir)
make_dir(bilateraldir)

for n, f in enumerate(dirlist):
    if not f.endswith('.jpg'):
        continue
    rgbim = image_load(f, indir, flatten=False)
    labim = rgb2lab(rgbim)
    greyim = rgb2grey(rgbim)
    edgeim = sobel(greyim)
    cedgeim = labim
    cedgeim = numpy.zeros(greyim.shape)
    cedgeim = cedgeim + sobel(labim[:, :, 0])
    cedgeim = cedgeim + sobel(labim[:, :, 1])
    cedgeim = cedgeim + sobel(labim[:, :, 2])
    cannyim = canny(greyim)
    unsharpim = unsharp(rgbim)
    bilateralim = denoise_bilateral(rgbim, sigma_range=0.5, win_size=10)
    image_save(edgeim, f, edgedir)
    image_save(cedgeim, f, cedgedir)
    image_save(cannyim, f, cannydir)
    image_save(unsharpim, f, unsharpdir)
    image_save(bilateralim, f, bilateraldir)
예제 #52
0
def export(data_loader, model, mean, std, args):
    all_des = []
    all_ssim = []
    all_psnr = []
    with torch.no_grad():
        for i, (img_readies, img_target, img_paths) in enumerate(data_loader):
            img_readies = img_readies.cuda()
            out_rgb = model(img_readies)
            out_rgb = out_rgb[0]
            out_rgb = out_rgb[args.out_colour_space].detach().cpu()
            img_readies = img_readies.detach().cpu()

            for img_ind in range(out_rgb.shape[0]):
                img_path = img_paths[img_ind]
                print(img_path)
                img_ready = img_readies[img_ind].unsqueeze(0)

                org_img_tmp = inv_normalise_tensor(img_ready, mean, std)
                org_img_tmp = org_img_tmp.numpy().squeeze().transpose(1, 2, 0)
                # org_img.append(org_img_tmp)

                if args.in_colour_space == 'lab':
                    org_img_tmp = np.uint8(org_img_tmp * 255)
                    org_img_tmp = cv2.cvtColor(org_img_tmp, cv2.COLOR_LAB2RGB)
                elif args.in_colour_space == 'hsv':
                    org_img_tmp = colour_spaces.hsv012rgb(org_img_tmp)
                elif args.in_colour_space == 'lms':
                    org_img_tmp = colour_spaces.lms012rgb(org_img_tmp)
                elif args.in_colour_space == 'yog':
                    org_img_tmp = colour_spaces.yog012rgb(org_img_tmp)
                elif args.in_colour_space == 'dkl':
                    org_img_tmp = colour_spaces.dkl012rgb(org_img_tmp)
                else:
                    org_img_tmp = normalisations.uint8im(org_img_tmp)

                # if os.path.exists(img_path.replace(cat_in_dir, rgb_dir)):
                #     rec_rgb_tmp = cv2.imread(
                #         img_path.replace(cat_in_dir, rgb_dir))
                #     rec_rgb_tmp = cv2.cvtColor(rec_rgb_tmp, cv2.COLOR_BGR2RGB)
                # else:
                rec_img_tmp = inv_normalise_tensor(
                    out_rgb[img_ind].unsqueeze(0), mean, std)
                rec_img_tmp = rec_img_tmp.numpy().squeeze().transpose(1, 2, 0)
                rec_img_tmp = cv2.resize(
                    rec_img_tmp, (org_img_tmp.shape[1], org_img_tmp.shape[0]))
                if args.out_colour_space == 'lab':
                    rec_img_tmp = np.uint8(rec_img_tmp * 255)
                    rec_img_tmp = cv2.cvtColor(rec_img_tmp, cv2.COLOR_LAB2RGB)
                elif args.out_colour_space == 'hsv':
                    rec_img_tmp = colour_spaces.hsv012rgb(rec_img_tmp)
                elif args.out_colour_space == 'lms':
                    rec_img_tmp = colour_spaces.lms012rgb(rec_img_tmp)
                elif args.out_colour_space == 'yog':
                    rec_img_tmp = colour_spaces.yog012rgb(rec_img_tmp)
                elif args.out_colour_space == 'dkl':
                    rec_img_tmp = colour_spaces.dkl012rgb(rec_img_tmp)
                else:
                    rec_img_tmp = normalisations.uint8im(rec_img_tmp)

                ssim = metrics.structural_similarity(org_img_tmp,
                                                     rec_img_tmp,
                                                     multichannel=True)
                all_ssim.append(ssim)
                psnr = metrics.peak_signal_noise_ratio(org_img_tmp,
                                                       rec_img_tmp)
                all_psnr.append(psnr)

                if args.de:
                    img_org = color.rgb2lab(org_img_tmp)
                    img_res = color.rgb2lab(rec_img_tmp)
                    de = color.deltaE_ciede2000(img_org, img_res)
                    all_des.append([np.mean(de), np.median(de), np.max(de)])

            np.savetxt(args.out_dir + '/ssim_' + args.colour_space + '.txt',
                       np.array(all_ssim))
            np.savetxt(args.out_dir + '/psnr_' + args.colour_space + '.txt',
                       np.array(all_psnr))
            if args.de:
                np.savetxt(args.out_dir + '/de_' + args.colour_space + '.txt',
                           np.array(all_des))
예제 #53
0
    def do_Poisson_reconstruct(self):
        if not self.do_poisson_reconstruct:
            print 'skip and return'
            return

        print 'do_Poisson_reconstruct'
        cur_dir = os.getcwd()

        if not os.path.exists(self.poisson_reconstruct_data_dir):
            os.mkdir(self.poisson_reconstruct_data_dir)


#         if self.use_local_context_ftr:
        local_context_paras = self.tr_batches_meta['local_context_paras']

        num_in_imgs = len(self.test_imgs)
        gradmag_transform_dim = 2

        gradmag_L2_dist = np.zeros((num_in_imgs))

        new_img_save_dir = \
        os.path.join(self.poisson_reconstruct_data_dir, '%f_%f' % (self.ratio_on_edge, self.ratio_off_edge))
        if not os.path.exists(new_img_save_dir):
            os.mkdir(new_img_save_dir)

        for i in range(num_in_imgs):
            print 'processing %d th out of %d images: %s' % (i, num_in_imgs,
                                                             self.test_imgs[i])
            out_img_data_dir = os.path.join\
            (self.poisson_reconstruct_data_dir, self.test_imgs[i][:-4])
            if not os.path.exists(out_img_data_dir):
                os.mkdir(out_img_data_dir)

            os.chdir(out_img_data_dir)

            if self.fredo_image_processing == 1:
                in_img = read_tiff_16bit_img_into_LAB\
                (os.path.join(self.in_img_dir, self.test_imgs[i]), 1.5, False)
                gt_enh_img = read_tiff_16bit_img_into_LAB\
                (os.path.join(self.enh_img_dir, self.test_imgs[i]), 0, False)
            else:
                in_img = read_tiff_16bit_img_into_LAB\
                (os.path.join(self.in_img_dir, self.test_imgs[i]))
                gt_enh_img = read_tiff_16bit_img_into_LAB\
                (os.path.join(self.enh_img_dir, self.test_imgs[i]))
            assert in_img.shape == gt_enh_img.shape
            h, w, ch = in_img.shape[0], in_img.shape[1], in_img.shape[2]

            in_img_L = in_img[:, :, 0]
            in_img_grady_L, in_img_gradx_L = np.gradient(in_img_L)
            in_img_gradmag_L = np.sqrt(in_img_grady_L**2 + in_img_gradx_L**2)
            gt_enh_img_L = gt_enh_img[:, :, 0]
            gt_enh_img_grady_L, gt_enh_img_gradx_L = np.gradient(gt_enh_img_L)
            gt_enh_img_gradmag_L = np.sqrt(gt_enh_img_grady_L**2 +
                                           gt_enh_img_gradx_L**2)

            pred_enh_img_path = os.path.join(self.out_img_dir,
                                             self.test_imgs[i][:-4] + '.png')
            pred_enh_im_sRGB = scipy.misc.imread(pred_enh_img_path)
            pred_enh_im_Lab = color.rgb2lab(pred_enh_im_sRGB)

            # normalize L to [0,1],normalize a,b channels to [-1,1]
            normalizer = np.array([1.0 / 100, 1.0 / 128.0, 1.0 / 128.0])
            in_img_normed = in_img * normalizer[np.newaxis, np.newaxis, :]
            pred_enh_im_Lab_normed = pred_enh_im_Lab * normalizer[
                np.newaxis, np.newaxis, :]

            img_edge_mat_file_path = \
            os.path.join(self.ori_img_edge_folder, self.test_imgs[i][:-4] + '_edge.mat')
            edge_pix = scipy.io.loadmat(img_edge_mat_file_path)
            edge_pix_y, edge_pix_x = np.int32(edge_pix['edge_pix_y'] - 1), \
            np.int32(edge_pix['edge_pix_x'] - 1)  # note, matlab uses 1-based index
            edge_pix_y = edge_pix_y.reshape((edge_pix_y.shape[0]))
            edge_pix_x = edge_pix_x.reshape((edge_pix_x.shape[0]))
            assert edge_pix_y.shape == edge_pix_x.shape
            edge_pix_x, edge_pix_y = \
            get_extended_edge_pixel(edge_pix_x, edge_pix_y, h, w, extend_width=1)
            gt_enh_edge_pix_gradmag_L = gt_enh_img_gradmag_L[edge_pix_y,
                                                             edge_pix_x]

            # prepare data for neural network
            batch_size = edge_pix_x.shape[0]
            print 'batch_size', batch_size
            edge_pix_L = in_img_L[edge_pix_y, edge_pix_x]
            edge_pix_gradmag_L = in_img_gradmag_L[edge_pix_y, edge_pix_x]
            if self.in_img_precomputed_context_ftr_dir:
                precomputed_context_ftr_path = os.path.join\
                (self.in_img_precomputed_context_ftr_dir, self.test_imgs[i][:-4] + '_context_ftr.mat')
                precomputed_context_ftr = scipy.io.loadmat(
                    precomputed_context_ftr_path)
                precomputed_context_ftr = precomputed_context_ftr[
                    'context_ftr']
                edge_pix_context_ftr = precomputed_context_ftr[edge_pix_y,
                                                               edge_pix_x, :]
            else:
                '''compute context ftr on the fly'''
                context_map = \
                scipy.io.loadmat(os.path.join(self.sem_integral_map_dir, self.semIntegralMapFiles[i]))
                context_map = context_map['maps'].reshape(
                    (local_context_paras['label_num']))

                edge_pix_context_ftr = np.zeros\
                ((edge_pix_x.shape[0], local_context_paras['ftr_dim']), dtype=np.single)
                getPixLocContextV2\
                ([edge_pix_x + LOCAL_CONTEXT_MAP_APPEND_WIDTH,
                  edge_pix_y + LOCAL_CONTEXT_MAP_APPEND_WIDTH, \
                  context_map, local_context_paras, self.pool, edge_pix_context_ftr, self.num_proc])

            data = self.train_data_provider.prepare_batch_data\
            (edge_pix_L.reshape((1, batch_size)), \
             [[self.in_img_id[i]] * batch_size, edge_pix_gradmag_L.reshape((1, batch_size)), edge_pix_context_ftr.transpose()])
            gradmag_transform = np.zeros((batch_size, gradmag_transform_dim),
                                         dtype=np.single)
            print 'start feature writer'
            st_time = time.time()
            self.libmodel.startFeatureWriter(data + [gradmag_transform],
                                             self.label_layer_gradmag_idx)
            self.finish_batch()
            elapsed = time.time() - st_time
            print 'startFeatureWriter elapsed time:%f' % elapsed

            pred_edge_pix_gradmag_L = np.exp(gradmag_transform[:, 0] * np.log(edge_pix_L) + gradmag_transform[:, 1])\
            *edge_pix_gradmag_L
            np.max(pred_edge_pix_gradmag_L), np.mean(pred_edge_pix_gradmag_L)
            gradmag_L2_dist[i] = np.mean(
                np.abs(pred_edge_pix_gradmag_L - gt_enh_edge_pix_gradmag_L))
            print 'prediction mean L2 distance between predicted and groundtruth grad-mag on edge pixels', \
            gradmag_L2_dist[i]
            # load predicted enhanced color image. write it to txt
            self.writeEnhancedLabimgToTxt(pred_enh_im_Lab_normed,
                                          out_img_data_dir)
            # write edge pixel mask to txt
            self.writeImgBinaryEdgeMaskToTxt(edge_pix_x, edge_pix_y, h, w,
                                             out_img_data_dir)

            gaus_smooth_sigma = 1.5
            grad_xy = get_color_gradient(in_img_normed, central_diff=True)
            grady_L, gradx_L = grad_xy[:, :, 3], grad_xy[:, :, 0]
            grady_a, gradx_a = grad_xy[:, :, 4], grad_xy[:, :, 1]
            grady_b, gradx_b = grad_xy[:, :, 5], grad_xy[:, :, 2]

            grad_mag_L, grad_angle_L = compute_grad_mag_angle(gradx_L, grady_L)
            grad_mag_a, grad_angle_a = compute_grad_mag_angle(gradx_a, grady_a)
            grad_mag_b, grad_angle_b = compute_grad_mag_angle(gradx_b, grady_b)

            pred_edge_pix_gradmag_L_normed = pred_edge_pix_gradmag_L / 100.0
            pred_enh_im_edge_pix_gradmag_L = grad_mag_L[edge_pix_y, edge_pix_x]
            diff = np.mean(
                np.abs(pred_edge_pix_gradmag_L_normed -
                       pred_enh_im_edge_pix_gradmag_L))
            print 'mean normalized L2 distance between predicted and already enhanced grad-mag', diff
            ''' compute divergence and write it to txt file '''
            grad_mag_target = np.zeros((h, w, ch), dtype=np.single)
            grad_angle_target = np.zeros((h, w, ch), dtype=np.single)

            grad_mag_target[:, :, 0] = grad_mag_L[:, :]
            grad_angle_target[:, :, 0] = grad_angle_L
            grad_mag_target[:, :, 1] = grad_mag_a[:, :]
            grad_angle_target[:, :, 1] = grad_angle_a[:, :]
            grad_mag_target[:, :, 2] = grad_mag_b[:, :]
            grad_angle_target[:, :, 2] = grad_angle_b[:, :]

            #             grad_mag_target[edge_pix_y, edge_pix_x, 0] = 0.9
            grad_mag_target[edge_pix_y, edge_pix_x,
                            0] = pred_edge_pix_gradmag_L_normed
            self.write_divergence_to_txt(grad_mag_target, grad_angle_target,
                                         out_img_data_dir)
            ''' copy 'PoissonRestruction.ext' into current folder '''
            command = \
            ['copy', os.path.join(self.checkpoints_dir, 'PoissonRestruction.exe'), "\"" + out_img_data_dir + "\""]
            command = " ".join(command)
            print 'command', command
            call(command, shell=True)
            print 'copying is completed'
            new_enhanced_img_txt_nm = 'newLab.txt'
            command = \
            ['PoissonRestruction.exe', new_enhanced_img_txt_nm, '%f' % self.ratio_on_edge, '%f' % self.ratio_off_edge]
            command = " ".join(command)
            print 'command: %s' % command
            call(command, shell=True)
            command = ['del', 'PoissonRestruction.exe']
            command = " ".join(command)
            call(command, shell=True)

            new_img_Lab = read_Lab_txt_into_Lab_img(h, w,
                                                    new_enhanced_img_txt_nm)
            new_img_sRGB = color.lab2rgb(new_img_Lab)
            new_img_sRGB = clamp_sRGB_img(new_img_sRGB)
            # append outmost 1-pixel width region
            new_img_sRGB[0, 1:-1, :] = new_img_sRGB[1, 1:-1, :]
            new_img_sRGB[h - 1, 1:-1, :] = new_img_sRGB[h - 2, 1:-1, :]
            new_img_sRGB[1:-1, 0, :] = new_img_sRGB[1:-1, 1, :]
            new_img_sRGB[1:-1, w - 1, :] = new_img_sRGB[1:-1, w - 2, :]

            new_img_sRGB[0, 0, :] = new_img_sRGB[0, 1, :]
            new_img_sRGB[0, w - 1, :] = new_img_sRGB[0, w - 2, :]
            new_img_sRGB[h - 1, 0, :] = new_img_sRGB[h - 1, 1, :]
            new_img_sRGB[h - 1, w - 1, :] = new_img_sRGB[h - 1, w - 2, :]

            scipy.misc.imsave\
            (os.path.join(new_img_save_dir, self.test_imgs[i][:-4] + '.png'), new_img_sRGB)
            print '--------------------------------------------------------'

        summary_file_path = os.path.join(new_img_save_dir, 'summary')
        summ = {}
        summ['gradmag_L2_dist'] = gradmag_L2_dist
        summ['ratio_on_edge'] = self.ratio_on_edge
        summ['ratio_off_edge'] = self.ratio_off_edge
        pickle(summary_file_path, summ)

        os.chdir(cur_dir)
예제 #54
0
def get_index(in_data):

    expand_in_data = np.expand_dims(in_data, axis=1)
    distance = np.sum(np.square(expand_in_data - points), axis=2)

    return np.argmin(distance, axis=1)


for num, img_f in enumerate(filename_lists):
    img = imread(img_f)
    img = resize(img, (256, 256), preserve_range=True)

    # Make sure the image is rgb format
    if len(img.shape) != 3 or img.shape[2] != 3:
        continue
    img_lab = color.rgb2lab(img)
    img_lab = img_lab.reshape((-1, 3))

    # img_ab (256^2, 2)
    img_ab = img_lab[:, 1:].astype(np.float64)

    nd_index = get_index(img_ab)
    for i in nd_index:
        i = int(i)
        probs[i] += 1
    print(num)

# Calculate probability of each bin
probs = probs / np.sum(probs)
#print(probs)
# Save the result
 def forward(self, bottom, top):
     top[0].data[...] = color.rgb2lab(
         bottom[0].data[:, ::-1, :, :].astype('uint8').transpose(
             (2, 3, 0, 1))).transpose((2, 3, 0, 1))
예제 #56
0
from mcpi.minecraft import Minecraft
from skimage import io, color
import cmap

### load picture and map
selfie_rgb = io.imread("capture.jpg")
map_rgb = io.imread("cmap.png")

### Convert to Lab
selfie_lab = color.rgb2lab(selfie_rgb)
map_lab = color.rgb2lab(map_rgb)

### Talk to Minecraft
mc = Minecraft.create()

### Draw picture
cmap.draw(mc, selfie_lab, map_lab)
예제 #57
0
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 26 21:26:45 2018

@author: Xiaopeng
"""

import matplotlib.pyplot as plt
from skimage import io, data, color

img = data.chelsea()
#将图片转换为HSV颜色空间
hsv = color.rgb2lab(img)
fig, axes = plt.subplots(2, 2, figsize=(7, 6))
ax0, ax1, ax2, ax3 = axes.ravel()

ax0.imshow(img)
ax0.set_title('Original image')

ax1.imshow(hsv[:, :, 0], cmap=plt.cm.gray)
ax1.set_title('L')

ax2.imshow(hsv[:, :, 1], cmap=plt.cm.gray)
ax2.set_title('A')

ax3.imshow(hsv[:, :, 2], cmap=plt.cm.gray)
ax3.set_title('B')
for ax in axes.ravel():
    ax.axis('off')
fig.tight_layout()
 def __call__(self, img):
     img = np.asarray(img, np.uint8)
     img = color.rgb2lab(img)
     return img
예제 #59
0
    def test_rgb2lab_dtype(self):
        img = self.colbars_array.astype('float64')
        img32 = img.astype('float32')

        assert rgb2lab(img).dtype == img.dtype
        assert rgb2lab(img32).dtype == img32.dtype
예제 #60
0
import numpy as np
import matplotlib.pyplot as plt


url = 'http://blogs.mathworks.com/images/steve/2010/mms.jpg'

import os
if not os.path.exists('mm.jpg'):
    print("Downloading M&M's...")
    from urllib.request import urlretrieve
    urlretrieve(url, 'mm.jpg')


print("Image I/O...")
mm = io.imread('mm.jpg')
mm_lab = color.rgb2lab(mm)
ab = mm_lab[..., 1:]

print("Mini-batch K-means...")
X = ab.reshape(-1, 2)
kmeans = cluster.MiniBatchKMeans(n_clusters=6)
y = kmeans.fit(X).labels_

labels = y.reshape(mm.shape[:2])
N = labels.max()


def no_ticks(ax):
    ax.set_xticks([])
    ax.set_yticks([])