Ejemplo n.º 1
0
def getFeatures(fn):
    if isinstance(fn,str):
        cropped_pillow_im = crop_border(fn)
        # converting between PIL images and skimage ubyte is easy!
        pic = img_as_ubyte(cropped_pillow_im) # uint8
    else:
        pic = fn
    if pic.shape[1] < 50:
        return None, None # too narrow
    hues = rgb2hsv(pic)[:,:,0]
    brightness = rgb2hsv(pic)[:,:,2] # Value in HSV
    # std, mean, median
    std = np.std(hues)
    diff_avg = abs(np.mean(hues)-np.median(hues))
    hue_vals = get_popular_values(hues, num=2)
    brightness_vals = get_popular_values(brightness, num=3)
    pixel_count = sum(np.histogram(hues)[0])
    first_two_colors_close = (abs(hue_vals[0].idx - hue_vals[1].idx) == 1) or (hue_vals[0].idx==0 and\
        hue_vals[1].idx==hue_vals[1].maxidx ) or (hue_vals[0].idx==hue_vals[0].maxidx and hue_vals[1].idx==0)\
        or (hue_vals[1].cnt <(0.1 * pixel_count))
    good_contrast = True
    if std == 0.0: # no variance in hue means grayscale image
        good_contrast = abs(brightness_vals[0].idx - brightness_vals[1].idx) < 3 
    res = Feature(std, diff_avg,  first_two_colors_close, good_contrast)

    if not isGood(res):
        return None, None

    newy = int(len(pic[0]) / (len(pic) / 250.0))
    pic = resize(pic, (250, newy), mode="nearest")
    return res, pic
Ejemplo n.º 2
0
def main():
    # read the images
    image_from = io.imread(name_from) / 256
    image_to = io.imread(name_to) / 256

    # change to hsv domain (if requested)
    if args.use_hsv:
        image_from[:] = rgb2hsv(image_from)
        image_to[:] = rgb2hsv(image_to)

    # get shapes
    shape_from = image_from.shape
    shape_to = image_to.shape

    # flatten
    X_from = im2mat(image_from)
    X_to = im2mat(image_to)

    # number of pixes
    n_pixels_from = X_from.shape[0]
    n_pixels_to = X_to.shape[0]

    # subsample
    X_from_ss = X_from[np.random.randint(0, n_pixels_from-1, n_pixels),:]
    X_to_ss = X_to[np.random.randint(0, n_pixels_to-1, n_pixels),:]

    if save_col_distribution:
        import matplotlib.pyplot as plt
        import seaborn as sns
        sns.set_style('white')

        fig, axes = plt.subplots(nrows=2, figsize=(5, 10))
        for ax, X in zip(axes, [X_from_ss, X_to_ss]):
            ax.scatter(X[:,0], X[:,1], color=X)
            if args.use_hsv:
                ax.set_xhsvel('hue')
                ax.set_yhsvel('value')
            else:
                ax.set_xhsvel('red')
                ax.set_yhsvel('green')
        axes[0].set_title('distr. from')
        axes[1].set_title('distr. to')
        fig.tight_layout()
        fig.savefig('color_distributions.png')

    # optimal tranportation
    ot_color = OptimalTransport(X_to_ss, X_from_ss, lam=lam,
                                    distance_metric=distance_metric)

    # model transfer
    transfer_model = KNeighborsRegressor(n_neighbors=n_neighbors)
    transfer_model.fit(X_to_ss, n_pixels * ot_color.P @ X_from_ss)
    X_transfered = transfer_model.predict(X_to)

    image_transferd = minmax(mat2im(X_transfered, shape_to))
    if args.use_hsv:
        image_transferd[:] = hsv2rgb(image_transferd)
    io.imsave(name_out, image_transferd)
Ejemplo n.º 3
0
def convertHSV(img):
    "convert RGBA into HSV color Space"
    if img.shape[2]==4:
        return rgb2hsv(img[:,:,0:3])
    else:
        if img.shape[2]==3:
            return rgb2hsv(img)
        else:
            print ("Image format not supported")
Ejemplo n.º 4
0
def test_hsv_value_with_non_float_output():
    # Since `rgb2hsv` returns a float image and the result of the filtered
    # result is inserted into the HSV image, we want to make sure there isn't
    # a dtype mismatch.
    filtered = edges_hsv_uint(COLOR_IMAGE)
    filtered_value = color.rgb2hsv(filtered)[:, :, 2]
    value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
    # Reduce tolerance because dtype conversion.
    assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
Ejemplo n.º 5
0
def composite(overlay_rgb, background_rgb):
    """
    Alpha composite RGB overlay on RGB background
    - derive alpha from HSV value of overlay

    Parameters
    ----------
    overlay_rgb:
    background_rgb:

    Returns
    -------

    """

    overlay_hsv = color.rgb2hsv(overlay_rgb)
    value = overlay_hsv[:,:,2]
    alpha_rgb = np.dstack((value, value, value))

    composite_rgb = overlay_rgb * alpha_rgb + background_rgb * (1.0 - alpha_rgb)

    # Clamp values [0..1)
    composite_rgb = composite_rgb.clip(0.0, 1.0)

    return composite_rgb
Ejemplo n.º 6
0
def compute_color_feature_matrix(file_list, N, ch, cs, cv): 
    m = []
    for f in file_list:
        img = io.imread(f, as_grey=False)
        m.append(hsv_to_feature(color.rgb2hsv(img), N, ch, cs, cv))
        
    return m
def legendCurveDictionary(legends, curveLocs):
    # print curveLocs
    curves = [(x.split("Curve-")[1].split(".png")[0], rgb2hsv(imread(x)) * 360) for x in curveLocs]
    lcd = {}
    for i, l in enumerate(legends):
        lcd[i] = [x for x in [curveScore(l, curve) for curve in curves] if x[1] is not None]
    return lcd
Ejemplo n.º 8
0
def ton_and_color_corrections():
    #色调和彩色校正
    image=data.astronaut()
    h1=color.rgb2hsv(image)
    h2=h1.copy()
    h1[:,:,1]=h1[:,:,1]*0.5
    image1=color.hsv2rgb(h1)
    h2[:,:,1]=h2[:,:,1]*0.5+0.5
    image2=color.hsv2rgb(h2)
    io.imshow(image)
    io.imsave('astronaut.png',image)
    io.imshow(image1)
    io.imsave('astronautlight.png',image1)
    io.imshow(image2)
    io.imsave('astronautdark.png',image2)
    
    imagered=image.copy()
    imagered[:,:,0]=image[:,:,0]*127.0/255+128
    io.imsave('astronautred.png',imagered)
    imageblue=image.copy()
    imageblue[:,:,2]=image[:,:,2]*127.0/255+128
    io.imsave('astronautblue.png',imageblue)
    imageyellow=image.copy()
    imageyellow[:,:,0]=image[:,:,0]*127.0/255+128
    imageyellow[:,:,1]=image[:,:,1]*127.0/255+128
    io.imsave('astronautyellow.png',imageyellow)
    io.imshow(imageyellow)
Ejemplo n.º 9
0
def stretchImageHue(imrgb):
	# Image must be stored as 0-1 bound float. If it's 0-255 int, convert
	if( imrgb.max() > 1 ):
		imrgb = imrgb*1./255

	# Transform to HSV
	imhsv = rgb2hsv(imrgb)

	# Find 2-98 percentiles of H histogram (except de-saturated pixels)
	plt.figure()
	plt.hist(imhsv[imhsv[:,:,1]>0.1,0].flatten(), bins=360)
	p2, p98 = np.percentile(imhsv[imhsv[:,:,1]>0.1,0], (2, 98))
	print p2, p98

	imhsv[:,:,0] = doStretch(imhsv[:,:,0], p2, p98, 0.6, 0.99)
	plt.figure()
	plt.hist(imhsv[imhsv[:,:,1]>0.1,0].flatten(), bins=360)	

	imrgb_stretched = hsv2rgb(imhsv)
	plt.figure()
	plt.imshow(imrgb)
	plt.figure()
	plt.imshow(imrgb_stretched)

	plt.show()
Ejemplo n.º 10
0
 def testPipo(self):
     hsv = rgb2hsv(self.sample / 256.0).reshape((4, 3))
     print hsv
     polar = colors.convert(hsv, 0.0, 1.0)
     print polar
     xy = colors.unconvert(polar)
     print xy
Ejemplo n.º 11
0
	def get_disease2(self):
		r,g,b = my_image.image_split(self.i_source)
		hsv = rgb2hsv(self.i_source)
		dise= np.copy(self.i_source[:,:,1])
		dise[dise>1]=0
		dise[(hsv[:,:,0]>0) & (hsv[:,:,0]<float(45.0/255))&(r<100)&(g<100)&(b<50)]=1
		label_im, nb_labels = ndimage.label(dise)
		sizes = ndimage.sum(dise, label_im, range(nb_labels + 1))
		mean_vals = ndimage.sum(dise, label_im, range(1, nb_labels + 1))
		mask_size = sizes < np.max(sizes)
		remove_pixel = mask_size[label_im]
		label_im[remove_pixel] = 0
		label_im[label_im>0]=1
		
#		pdb.set_trace()
		###
		# remove artifacts connected to image border
		self.s_disease = 0
		# label image regions
		region = regionprops(label(label_im))
		for r1 in region:
#			pdb.set_trace()
			if r1.area > 10:
				self.s_disease += r1.area
			else:
				minr, minc, maxr, maxc = r1.bbox
				label_im[float(minr):float(maxr),float(minc):float(maxc)]=0
							
		self.i_desease = label_im 
		self.i_leaf = self.i_source#blade
		self.i_back = self.i_source#background
		self.s_leaf = 1#s_b+s_d
Ejemplo n.º 12
0
def color_augment_image(data):
    image = data.transpose(1, 2, 0)
    hsv = color.rgb2hsv(image)

    # Contrast 2
    s_factor1 = numpy.random.uniform(0.25, 4)
    s_factor2 = numpy.random.uniform(0.7, 1.4)
    s_factor3 = numpy.random.uniform(-0.1, 0.1)

    hsv[:, :, 1] = (hsv[:, :, 1] ** s_factor1) * s_factor2 + s_factor3

    v_factor1 = numpy.random.uniform(0.25, 4)
    v_factor2 = numpy.random.uniform(0.7, 1.4)
    v_factor3 = numpy.random.uniform(-0.1, 0.1)

    hsv[:, :, 2] = (hsv[:, :, 2] ** v_factor1) * v_factor2 + v_factor3

    # Color
    h_factor = numpy.random.uniform(-0.1, 0.1)
    hsv[:, :, 0] = hsv[:, :, 0] + h_factor

    hsv[hsv < 0] = 0.0
    hsv[hsv > 1] = 1.0

    rgb = color.hsv2rgb(hsv)

    data_out = rgb.transpose(2, 0, 1)
    return data_out
Ejemplo n.º 13
0
def slic_data():
   for i in range(uu_num_train+uu_num_test):

      print "data %d" %(i+1)
      img_name = ''
      if i < 10:
         img_name = '0' + str(i)
      else:
         img_name = str(i)

      #Read first 70 images as floats
      img = img_as_float(io.imread('..\data\\training\image_2\uu_0000' + img_name + '.png'))
      img_hsv = color.rgb2hsv(img)
      gt_img = img_as_float(io.imread('..\data\\training\gt_image_2\uu_road_0000' + img_name + '.png'))

      #Create superpixels for training images
      image_segment = slic(img, n_segments = numSegments, sigma = 5)
      t, train_indices = np.unique(image_segment, return_index=True)
      images_train_indices.append(train_indices)
      image = np.reshape(img,(1,(img.shape[0]*img.shape[1]),3))
      image_hsv = np.reshape(img_hsv,(1,(img_hsv.shape[0]*img_hsv.shape[1]),3))
      #images_train.append([image[0][i] for i in train_indices])
      images_train_hsv.append([image_hsv[0][i] for i in train_indices])

      #Create gt training image values index at train_indices and converted to 1 or 0
      gt_image = np.reshape(gt_img, (1,(gt_img.shape[0]*gt_img.shape[1]),3))
      gt_image = [1 if gt_image[0][i][2] > 0 else 0 for i in train_indices]
      gt_images_train.append(gt_image)
Ejemplo n.º 14
0
def extract_descriptors_he(_img, w_size, _ncpus=None):
    """
    EXRACT_LOCAL_DESCRIPTORS_HE: extracts a set of local descriptors of the image:
        - histogram of Hue values
        - histogram of haematoxylin and eosin planes
        - Gabor descriptors in haematoxylin and eosin spaces, respectively
        - local binary patterns in haematoxylin and eosin spaces, respectively

    :param _img: numpy.ndarray

    :param w_size: int

    :return: list
    """
    assert (_img.ndim == 3)

    img_iterator = sliding_window(_img.shape[:-1], (w_size, w_size), step=(w_size, w_size))  # non-overlapping windows
    gabor = GaborDescriptor()
    lbp   = LBPDescriptor()

    hsv = rgb2hsv(_img)
    h, e, _ = rgb2he2(_img)

    res = []
    with ProcessPoolExecutor(max_workers=_ncpus) as executor:
        for w_coords in img_iterator:
            res.append(executor.submit(_worker2, hsv[:,:,0], h, e, gabor, lbp, w_coords))

    desc = []
    for f in as_completed(res):
        desc.append(f.result())

    return desc
def extr_beauty_ftrs(imgFlNm):
	img = os.path.basename(imgFlNm)
	
	print("Extracting beauty features for %s" %imgFlNm)

	try:
		rgbImg = resize_img(io.imread(imgFlNm))
	except Exception as e:
		print("Invalid image")
		return e
		
	if len(rgbImg.shape) != 3 or rgbImg.shape[2] !=3:
		print("Invalid image.. Continuing..")
		final_ftr_obj_global[img] = None
		return None

	hsvImg = color.rgb2hsv(rgbImg)
	grayImg = color.rgb2gray(rgbImg)

	red, green, blue = get_arr(rgbImg)
	hue, saturation, value = get_arr(hsvImg)

	contrast = calc_contrast(red, green, blue)
	ftrs = calc_color_ftrs(hue, saturation, value)
	ftrs['contrast'] = contrast
	ftrs['entropy'] = entropy(grayImg) # added to include entropy of the given image: more details: http://stackoverflow.com/a/42059758/5759063
	ftrs.update(get_spat_arrng_ftrs(grayImg))
	
	final_ftr_obj_global[img] = ftrs
	
	return final_ftr_obj_global
Ejemplo n.º 16
0
def RgbToPlateBackgroundScore(im):
	#This function compares images to find number plate background colour
	#since UK plates come in two different colours, they are merged into
	#a single score here.

	hsvImg = color.rgb2hsv(im)

	#Target colours
	#Yellow HSV 47/360, 81/100, 100/100 or 32/255, 217/255, > 248/255
	#While HSV None, < 8/100, > 82/100 or ?/255, < 21/255, > 255/255	

	#Compare to white
	whiteScore = (255.-hsvImg[:,:,1]) * (hsvImg[:,:,2]) / pow(255., 2.)
	
	#Compare to yellow
	#Hue is a repeating value similar to angle, compute dot product with yellow
	hueAng = hsvImg[:,:,0] * (2. * math.pi / 255.)
	hueSin = np.sin(hueAng)
	hueCos = np.cos(hueAng)
	targetSin = math.sin(math.radians(47.)) #Hue of yellow
	targetCos = math.cos(math.radians(47.))
	dotProd = hueSin * targetSin + hueCos * targetCos
	yellowHueScore = (dotProd + 1.) / 2. #Scale from 0 to 1	
	yellowSatScore = np.abs(hsvImg[:,:,1] - 217.) / 217.
	yellowValScore = hsvImg[:,:,2] / 255.
	yellowScore = yellowHueScore * yellowSatScore * yellowValScore

	scoreMax = np.maximum(whiteScore, yellowScore)
	return scoreMax
Ejemplo n.º 17
0
    def loadData(self):
        self.rawData = io.imread(self.fileName, plugin='tifffile')
        self.rawData = cv2.merge((self.rawData[:, :, 0].T,
                                  self.rawData[:, :, 1].T,
                                  self.rawData[:, :, 2].T))
        self.cData = self.rawData.copy()
        self.grayData = self.rawData.copy()
        self.grayData = color.rgb2gray(self.rawData)
        self.hsvData = color.rgb2hsv(self.rawData)
        # self.grayData = self.grayData.convert('LA')
        # self.grayData = self.grayData.transpose(method=PIL.Image.TRANSPOSE)
        self.grayData = transform.rotate(self.grayData, angle=0)
        self.cData = transform.rotate(self.cData, angle=0)
        self.hsvData = transform.rotate(self.hsvData, angle=0)
        self.b = self.cData[:, :, 0]
        self.g = self.cData[:, :, 1]
        self.r = self.cData[:, :, 2]
        self.v = self.hsvData[:, :, 0]
        self.s = self.hsvData[:, :, 1]
        self.h = self.hsvData[:, :, 2]

        self.colorDict = {'RGB': self.cData,
                          'GRAY': self.grayData,
                          'B': self.b,
                          'G': self.g,
                          'R': self.r,
                          'HSV': self.hsvData,
                          'H': self.h,
                          'S': self.s,
                          'V': self.v}
Ejemplo n.º 18
0
def run(args):
    logging.basicConfig(level=logging.INFO)

    slide = openslide.OpenSlide(args.wsi_path)

    # note the shape of img_RGB is the transpose of slide.level_dimensions
    img_RGB = np.transpose(np.array(slide.read_region((0, 0),
                           args.level,
                           slide.level_dimensions[args.level]).convert('RGB')),
                           axes=[1, 0, 2])

    img_HSV = rgb2hsv(img_RGB)

    background_R = img_RGB[:, :, 0] > threshold_otsu(img_RGB[:, :, 0])
    background_G = img_RGB[:, :, 1] > threshold_otsu(img_RGB[:, :, 1])
    background_B = img_RGB[:, :, 2] > threshold_otsu(img_RGB[:, :, 2])
    tissue_RGB = np.logical_not(background_R & background_G & background_B)
    tissue_S = img_HSV[:, :, 1] > threshold_otsu(img_HSV[:, :, 1])
    min_R = img_RGB[:, :, 0] > args.RGB_min
    min_G = img_RGB[:, :, 1] > args.RGB_min
    min_B = img_RGB[:, :, 2] > args.RGB_min

    tissue_mask = tissue_S & tissue_RGB & min_R & min_G & min_B

    np.save(args.npy_path, tissue_mask)
Ejemplo n.º 19
0
    def get_res(self, img):
        # # img.show()
        img = np.array(img)
        from skimage import color
        img_hsv = color.rgb2hsv(img)
        h = img_hsv[:, :, 0]
        s = img_hsv[:, :, 1]
        v = img_hsv[:, :, 2]
        img = 255 - h / 100 - s / 1 + v / 8 - 255
        img *= 255

        img = np.array(img)

        origin = img
        from skimage.filters import threshold_otsu
        # print img
        for i in range(img.shape[0]):
            for j in range(img.shape[1]):
                if img[i][j] > -100:
                    img[i][j] = 255
                else:
                    # print img[i][j]
                    img[i][j] = 0


        new_img = img[:, 28:108]

        res = []
        for i in range(0, 5):
            crop_img = new_img[6:, i * 13: i * 13 + 24]
            res.append(crop_img)
        return res
Ejemplo n.º 20
0
def hsi_equalize_hist():
    image=data.astronaut()
    h=color.rgb2hsv(image)
    h[:,:,2]=exposure.equalize_hist(h[:,:,2])
    image_equal=color.hsv2rgb(h)
    io.imshow(image_equal)
    io.imsave('astronautequal.png',image_equal)
Ejemplo n.º 21
0
def transform_to_hsv_space(im):

    fig = plt.figure(figsize=(12, 6))

    ax = fig.add_subplot(121)
    ax.imshow(im)
    ax.set_title('Original Image')

    ax = fig.add_subplot(122, projection='3d')

    #im = rgb2hsv(im)
    index = 0

    # loop through pixels array and add each pixel as item in scatter plot
    for row in im:
        for pixel in row:
            index = index + 1
            if index % 80 > 0:
                continue
            [[(H, S, V)]] =  rgb2hsv([[pixel]])
            x = cos(H*2*pi) * S
            y = sin(-H*2*pi) * S
            z = V
            color = (pixel[0]/255.,pixel[1]/255.,pixel[2]/255.)
            marker = ','

            ax.scatter(x,y,z,c=color, s=10, lw = 0, alpha=0.08)
    ax.set_zlabel('V')
    ax.view_init(elev=17., azim=30)
    ax.set_title('Image in HSV Space')
    plt.show()
Ejemplo n.º 22
0
 def _process(self, img):
     hsv = rgb2hsv(img)
     h = hsv[:, :, 0] + self._adjust
     h[h > 1] -= 1
     hsv[:, :, 0] = h
     img[:, :, :] = hsv2rgb(hsv)
     return img
Ejemplo n.º 23
0
def RDMcolormap(nCols=256):

    # blue-cyan-gray-red-yellow with increasing V (BCGRYincV)
    anchorCols = np.array([
        [0, 0, 1],
        [0, 1, 1],
        [.5, .5, .5],
        [1, 0, 0],
        [1, 1, 0],
    ])

    # skimage rgb2hsv is intended for 3d images (RGB)
    # here we add a new axis to our 2d anchorCols to satisfy skimage, and then squeeze
    anchorCols_hsv = rgb2hsv(anchorCols[np.newaxis, :]).squeeze()

    incVweight = 1
    anchorCols_hsv[:, 2] = (1-incVweight)*anchorCols_hsv[:, 2] + \
        incVweight*np.linspace(0.5, 1, anchorCols.shape[0]).T

    # anchorCols = brightness(anchorCols)
    anchorCols = hsv2rgb(anchorCols_hsv[np.newaxis, :]).squeeze()

    cols = colorScale(nCols, anchorCols)

    return ListedColormap(cols)
Ejemplo n.º 24
0
def watershed(image):
    hsv_image = color.rgb2hsv(image)

    low_res_image = rescale(hsv_image[:, :, 0], SCALE)
    local_mean = mean(low_res_image, disk(50))
    local_minimum_flat = np.argmin(local_mean)
    local_minimum = np.multiply(np.unravel_index(local_minimum_flat, low_res_image.shape), round(1 / SCALE))

    certain_bone_pixels = np.full_like(hsv_image[:, :, 0], False, bool)
    certain_bone_pixels[
    local_minimum[0] - INITIAL_WINDOW_SIZE/2:local_minimum[0]+INITIAL_WINDOW_SIZE/2,
    local_minimum[1] - INITIAL_WINDOW_SIZE/2:local_minimum[1]+INITIAL_WINDOW_SIZE/2
    ] = True

    certain_non_bone_pixels = np.full_like(hsv_image[:, :, 0], False, bool)
    certain_non_bone_pixels[0:BORDER_SIZE, :] = True
    certain_non_bone_pixels[-BORDER_SIZE:-1, :] = True
    certain_non_bone_pixels[:, 0:BORDER_SIZE] = True
    certain_non_bone_pixels[:, -BORDER_SIZE:-1] = True

    smoothed_hsv = median(hsv_image[:, :, 0], disk(50))
    threshold = MU * np.median(smoothed_hsv[certain_bone_pixels])

    possible_bones = np.zeros_like(hsv_image[:, :, 0])
    possible_bones[smoothed_hsv < threshold] = 1

    markers = np.zeros_like(possible_bones)
    markers[certain_bone_pixels] = 1
    markers[certain_non_bone_pixels] = 2

    labels = morphology.watershed(-possible_bones, markers)

    return labels
Ejemplo n.º 25
0
def saturate(im, amount=1.1):
  hsvim = skcolor.rgb2hsv(im)
  hue = np.take(hsvim, 0, axis=2)
  sat = np.take(hsvim, 1, axis=2)
  val = np.take(hsvim, 2, axis=2)
#   sat = sat * amount
  newhsv = np.dstack((hue, sat, val))
  return skcolor.hsv2rgb(newhsv)
Ejemplo n.º 26
0
 def extract(self, img):
     if len(img.shape) == 2:
         return self.rscodec.decode(self._extract(img))
     elif len(img.shape) == 3:
         hsv = rgb2hsv(img)
         return self.rscodec.decode(self._extract(hsv[:,:,0]))
     else:
         raise TypeError("img must be a 2d or 3d array")
Ejemplo n.º 27
0
def color_feature(blur, hbins=15, sbins=15):
    hsv = color.rgb2hsv(blur)

    # cal hist
    h_hist = exposure.histogram(hsv[:, :, 0], nbins=hbins)
    s_hist = exposure.histogram(hsv[:, :, 1], nbins=sbins)

    return np.append(normalize(h_hist[0]), normalize(s_hist[0]))
Ejemplo n.º 28
0
 def test_rgb2hsv_conversion(self):
     rgb = img_as_float(self.img_rgb)[::16, ::16]
     hsv = rgb2hsv(rgb).reshape(-1, 3)
     # ground truth from colorsys
     gt = np.array([colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
                    for pt in rgb.reshape(-1, 3)]
                   )
     assert_almost_equal(hsv, gt)
Ejemplo n.º 29
0
 def _rotate_Scale_fired(self):
     """" _rotate_Scale_fired(self): rotates scale and re-displays image when button is pressed """
     max = 255. # this will only work with certain image types...
     hsvimage = rgb2hsv([x/max for x in self.image])
     hsvimage[:,:,1] = [np.mod(x+0.5,1) for x in hsvimage[:,:,1]]
     hsvimage = [np.uint8(x*max) for x in hsv2rgb(hsvimage)]
     self.image = hsvimage
     self.ax.imshow(hsvimage)
     self.figure.canvas.draw()
Ejemplo n.º 30
0
 def _rotate_Hue_fired(self):
     """" _rotate_Hue_fired(self): rotates hue and re-displays image when button is pressed """
     max = 255. 
     hsvimage = rgb2hsv([x/max for x in self.image])
     hsvimage[:,:,0] = [np.mod(x+0.5,1) for x in hsvimage[:,:,0]]
     hsvimage = [np.uint8(x*max) for x in hsv2rgb(hsvimage)]
     self.image = hsvimage
     self.ax.imshow(hsvimage)
     self.figure.canvas.draw()
Ejemplo n.º 31
0
image = data.horse()
plt.subplot(2, 1, 1)
plt.imshow(image)
rescaled_image = rescale(image, 0.5)
plt.subplot(2, 1, 2)
plt.imshow(rescaled_image)
plt.show()

### change colour

import matplotlib.pyplot as plt
import numpy as np
from skimage.color import rgb2hsv

red_pixel_rgb = np.array([[[255, 0, 0]]], dtype=np.uint8)
rgb2hsv(red_pixel_rgb)
print(rgb2hsv(red_pixel_rgb))
plt.subplot(2, 1, 1)
plt.imshow(red_pixel_rgb)
plt.subplot(2, 1, 2)
plt.imshow(rgb2hsv(red_pixel_rgb))
plt.show()

### change colour to grey scale

import matplotlib.pyplot as plt
from skimage.color import rgb2grey
from skimage import data

my_image = data.astronaut()
plt.subplot(2, 1, 1)
Ejemplo n.º 32
0
    def __getitem__(self, idx):
        impath = self.images[idx]

        # read image
        im = skio.imread(impath).astype(np.float32) / 255.0

        # if self.augment:
        #   # Jitter the quantized values
        #   im += np.random.normal(0, 0.005, size=im.shape)
        #   im = np.clip(im, 0, 1)

        if self.augment:
            if np.random.uniform() < 0.5:
                im = np.fliplr(im)
            if np.random.uniform() < 0.5:
                im = np.flipud(im)

            im = np.rot90(im, k=np.random.randint(0, 4))

            # Pixel shift
            if np.random.uniform() < 0.5:
                shift_y = np.random.randint(0,
                                            6)  # cover both xtrans and bayer
                im = np.roll(im, 1, 0)
            if np.random.uniform() < 0.5:
                shift_x = np.random.randint(0, 6)
                im = np.roll(im, 1, 1)

            # Random Hue/Sat
            if np.random.uniform() < 0.5:
                shift = np.random.uniform(-0.1, 0.1)
                sat = np.random.uniform(0.8, 1.2)
                im = rgb2hsv(im)
                im[:, :, 0] = np.mod(im[:, :, 0] + shift, 1)
                im[:, :, 1] *= sat
                im = hsv2rgb(im)

            im = np.clip(im, 0, 1)

        if self.linearizer is not None:
            im = self.linearizer(im)

        # Randomize exposure
        if self.augment:
            if np.random.uniform() < 0.5:
                im *= np.random.uniform(0.5, 1.2)

            im = np.clip(im, 0, 1)

        im = np.ascontiguousarray(im).astype(np.float32)

        im = np.transpose(im, [2, 1, 0])

        # crop boundaries to ignore shift
        c = 8
        im = im[:, c:-c, c:-c]

        # Sample a random patch from the image, images can be repeated in the dataset file
        ps = 128
        if im.shape[1] > ps:
            px = np.random.randint(0, im.shape[1] - ps)
        else:
            px = 0

        if im.shape[2] > ps:
            py = np.random.randint(0, im.shape[2] - ps)
        else:
            py = 0

        im = im[:, px:(px + ps), py:(py + ps)]

        # apply mosaic
        mosaic, mask = self.make_mosaic(im)

        # TODO: separate GT/noisy
        # # add noise
        # std = 0
        # if self.add_noise:
        #   std = np.random.uniform(0, self.max_noise)
        #   im += np.random.normal(0, std, size=im.shape)
        #   im = np.clip(im, 0, 1)

        sample = {
            "mosaic": mosaic,
            "mask": mask,
            # "noise_variance": np.array([std]),
            "target": im,
        }

        # Augment
        if self.transform is not None:
            sample = self.transform(sample)

        return sample
Ejemplo n.º 33
0
    center_texture[:, :, 3] = bra_center[:, :, 0] / 255.0

    # base color painting and shading seamless design
    front = pantie[20:100, 30:80, :]
    front_shade = pantie[100:150, 0:40, :]
    base = np.mean(np.mean(front, axis=0), axis=0) / 255.0
    if np.mean(
            base[:3]
    ) < 0.4:  # median estimation provides better estimation for dark panties
        base = np.median(np.median(front, axis=0), axis=0) / 255.0
    base = base[:3]
    base_shade = (np.median(np.median(front, axis=0), axis=0) / 255.0)[:3]
    base_texture = np.copy(bra_mask).astype(np.float32) / 255.0
    base_texture[:, :, :3] = (base_texture[:, :, :3] * base)
    if args.disable_texture is False:
        shade = rgb2hsv(
            np.tile((design_seamless)[:, :, None], [1, 1, 3]) * base_shade)
        shade[:, :, 0] -= 0
        shade[:, :, 1] *= -4 + (1 - np.mean(base)) * 6
        shade[:, :, 2] /= 6 + 3 * np.mean(base)
        shade = hsv2rgb(shade)
        base_texture[:, :, :3] -= shade[:r, :c, ]

    # convine center and base textures and shading
    center_mask = (bra_center[:, :, 0][:, :, None] / 255.0).astype(np.float32)
    convined_texture = base_texture * (
        1 - center_mask) + center_texture * center_mask
    convined_texture = np.concatenate(
        [convined_texture[:, ::-1, :], convined_texture], axis=1)
    if args.disable_shading is False:
        shade = rgb2hsv(
            np.tile(
Ejemplo n.º 34
0
    groups = []

    while len(colors) > 0:
        group = []
        group += [colors.pop()]
        for c in colors[:]:
            if abs(c - group[0]) < limit:
                group.append(c)
                colors.remove(c)
        groups.append(group)

    return {i: len(group) for i, group in enumerate(groups)}


image = plt.imread("balls_and_rects.png")
image = rgb2hsv(image)[:, :, 0]
b = image.copy()
b[b > 0] = 1
labeled = label(b)

shapes = {}
for region in regionprops(labeled):
    key = define_shape(region.image)
    if key in shapes.keys():
        shapes[key] += [region]
    else:
        shapes[key] = [region]

print(f"Суммарное количество фигур: {np.max(labeled)}")
for key, value in shapes.items():
    print(f'Количество фигур "{key}": {len(value)}')
Ejemplo n.º 35
0
    def gen_bra(self, image):
        # image = Image.open('./dream/0101.png')
        pantie = np.array(image)
        if self.use_ribbon_mesh:
            pantie = ribbon_inpaint(pantie)
        else:
            ribbon = pantie.copy()
            ribbon[:, :, 3] = self.ribbon_mask[:, :, 1]
            ribbon = ribbon[19:58, 8:30] / 255.0

        front = pantie[20:100, 30:80, :3] / 255
        front_shade = pantie[100:150, 0:40, :3] / 255
        center = pantie[20:170, -200:-15, :3] / 255
        base_color = np.mean(np.mean(center, axis=0), axis=0)
        front_color = np.mean(np.mean(front, axis=0), axis=0)
        shade_color = np.mean(np.mean(front_shade, axis=0), axis=0)

        # make seamless design
        design = rgb2gray(center[:, :, :3])[::-1, ::-1]
        design = (design - np.min(design)) / (np.max(design) - np.min(design))
        edge = 3
        design_seamless = gaussian(design, sigma=3)
        design_seamless[edge:-edge, edge:-edge] = design[edge:-edge,
                                                         edge:-edge]
        [hr, hc, hd] = center.shape
        y = np.arange(-hr / 2, hr / 2, dtype=np.int16)
        x = np.arange(-hc / 2, hc / 2, dtype=np.int16)
        design_seamless = (design_seamless[y, :])[:, x]  # rearrange pixels
        design_seamless = resize(design_seamless, [1.65, 1.8])
        design_seamless = np.tile(design_seamless, (3, 4))
        posy = int((self.bra_center.shape[0] - design_seamless.shape[0]) / 2)
        posx = int((self.bra_center.shape[1] - design_seamless.shape[1]) / 2)
        sx = 0
        sy = 0
        design_seamless = (np.pad(design_seamless, [(posy + sy + 1, posy - sy),
                                                    (posx + sx, posx - sx)],
                                  mode='constant'))

        # Base shading
        bra_base = self.bra_base[:, :, :3] * front_color
        bra_base = bra_base - design_seamless[:, :, None] / 10

        shade = rgb2hsv(
            np.tile((self.bra_shade)[:, :, None], [1, 1, 3]) * base_color)
        shade[:, :, 0] -= 1
        shade[:, :, 1] *= 0.5 + np.mean(base_color) / 3
        shade[:, :, 2] /= 1 + 1 * np.mean(base_color)
        bra_shade = hsv2rgb(shade)

        # bra_shade = bra_shade[:, :, None] * shade_color

        # Center painting
        sx = -270
        sy = -50
        center = resize(center, [4, 4])
        posy = int((self.bra_center.shape[0] - center.shape[0]) / 2)
        posx = int((self.bra_center.shape[1] - center.shape[1]) / 2)
        center = (np.pad(center, [(posy + sy, posy - sy),
                                  (posx + sx, posx - sx), (0, 0)],
                         mode='constant'))
        center = center * self.bra_center[:, :, None]

        # Decoration painting
        deco_shade = np.median(pantie[5, :, :3], axis=0) / 255
        frill = np.dstack((self.frill[:, :, :3] * deco_shade, self.frill[:, :,
                                                                         3]))
        lace = np.dstack((self.lace[:, :, :3] * shade_color, self.lace[:, :,
                                                                       3]))

        # Finalize
        textured = bra_base * (1 - self.bra_center[:, :, None]
                               ) + center * self.bra_center[:, :, None]
        textured = textured - bra_shade
        textured = textured * (
            1 - lace[:, :, 3]
        )[:, :, None] + lace[:, :, :3] * lace[:, :, 3][:, :, None]
        textured = textured * (
            1 - frill[:, :, 3]
        )[:, :, None] + frill[:, :, :3] * frill[:, :, 3][:, :, None]
        textured = np.dstack((textured, self.bra_mask))
        if self.use_ribbon_mesh is False:
            ribbon = skt.rotate(ribbon, 8, resize=True)
            ribbon = resize(ribbon, [1.5, 1.5])
            [r, c, d] = ribbon.shape
            textured[460:460 + r,
                     35:35 + c] = textured[460:460 + r, 35:35 + c] * (
                         1 - ribbon[:, :, 3][:, :, None]
                     ) + ribbon * ribbon[:, :, 3][:, :, None]
        return Image.fromarray(np.uint8(np.clip(textured, 0, 1) * 255))
Ejemplo n.º 36
0
        self.opcode = 12

    def augment_images(self, src_img):
        img_hsv = color.rgb2hsv(src_img)

        h = img_hsv[:, :, 0]
        rnd = 2 * random.random() - 1
        h_new = h + rnd
        h_new[h_new > 1] = h_new[h_new > 1] - 1
        h_new[h_new < 0] = h_new[h_new < 0] + 1
        img_hsv[:,:,0] = h_new
        rgb = color.hsv2rgb(img_hsv)

        return rgb

if __name__ == '__main__':
    from skimage import data,io

    img = data.astronaut()
    img_hsv = color.rgb2hsv(img)
    h = img_hsv[:, :, 0]
    rnd = 2 * random.random() - 1
    print(rnd)
    h_new = h + rnd
    h_new[h_new > 1] = h_new[h_new > 1] - 1
    h_new[h_new < 0] = h_new[h_new < 0] + 1
    img_hsv[:,:,0] = h_new
    rgb = color.hsv2rgb(img_hsv)
    io.imshow(rgb)
    io.show()
def shading_attenuation_method(image, extract, margin):
    """
    Apply the shading attenuation method to an image

    Parameters
    ----------
    image: 3D array
        The image source
    extract: scalar
        Number of pixel to extract, extract x extract
    margin: scalar
        Margin from the borders
    """

    hsv = rgb2hsv(image)
    V = np.copy(hsv[:, :, 2])

    shape = image.shape[0:2]

    """
    Sampling pixels
    ---------------
    """
    Yc, Xc = shatt.sampling_from_corners(margin=margin, extract=extract, shape=shape)
    Yf, Xf = shatt.sampling_from_frames(margin=margin, extract=extract, shape=shape)

    Zc = np.zeros((Xc.shape))
    Zf = np.zeros((Xf.shape))

    for j in range(0, Zc.shape[0]):
        Zc[j] = np.copy(V[Yc[j], Xc[j]])

    for j in range(0, Zf.shape[0]):
        Zf[j] = np.copy(V[Yf[j], Xf[j]])

    """
    Quadratic and cubic polynomial coefficients
    -------------------------------------------
    """
    Ac2 = shatt.quadratic_polynomial_function(Yc, Xc)
    Af2 = shatt.quadratic_polynomial_function(Yf, Xf)

    Ac3 = shatt.cubic_polynomial_function(Yc, Xc)
    Af3 = shatt.cubic_polynomial_function(Yf, Xf)

    """
    Fitting polynomial
    ------------------
    """
    coeffc2 = np.linalg.lstsq(Ac2, Zc)[0]
    coefff2 = np.linalg.lstsq(Af2, Zf)[0]

    coeffc3 = np.linalg.lstsq(Ac3, Zc)[0]
    coefff3 = np.linalg.lstsq(Af3, Zf)[0]

    """
    Processed
    ---------
    """
    Vprocc2 = shatt.apply_quadratic_function(V, coeffc2)
    Vprocf2 = shatt.apply_quadratic_function(V, coefff2)
    Vprocc3 = shatt.apply_cubic_function(V, coeffc3)
    Vprocf3 = shatt.apply_cubic_function(V, coefff3)

    # Convert Value into the range 0-1
    Vprocc2 = shatt.in_range(Vprocc2)
    Vprocf2 = shatt.in_range(Vprocf2)
    Vprocc3 = shatt.in_range(Vprocc3)
    Vprocf3 = shatt.in_range(Vprocf3)

    # Retrieve true color to skin
    muorig = V.mean()
    Vnewc2 = shatt.retrieve_color(Vprocc2, muorig)
    Vnewf2 = shatt.retrieve_color(Vprocf2, muorig)
    Vnewc3 = shatt.retrieve_color(Vprocc3, muorig)
    Vnewf3 = shatt.retrieve_color(Vprocf3, muorig)

    # Convert Value into the range 0-1
    Vnewc2 = shatt.in_range(Vnewc2)
    Vnewf2 = shatt.in_range(Vnewf2)
    Vnewc3 = shatt.in_range(Vnewc3)
    Vnewf3 = shatt.in_range(Vnewf3)

    # Select the image which have least entropy
    Vlist = [V, Vnewc2, Vnewf2, Vnewc3, Vnewf3]
    values = [img_as_ubyte(v) for v in Vlist]

    entropy_vals = [entropy(v) for v in values]
    print('\tentropy: '+str(entropy_vals))
    index = entropy_vals.index(min(entropy_vals))

    hsv[:, :, 2] = np.copy(Vlist[index])
    attenuated = hsv2rgb(hsv)

    return attenuated
Ejemplo n.º 38
0
def flower_field():
    fig = plt.figure(constrained_layout=False, frameon=False)
    ax = fig.add_axes([0, 0, 1, 1])
    ax.set_facecolor((0.1, 0.2, 0.15))

    n_flowers = 200
    n_points = 5000

    flower_size_min = 0.075
    flower_size_max = 0.55

    max_exponent = 2.2
    min_exponent = 1.5

    rand_color = randomcolor.RandomColor()
    axes = []

    secret_hue = np.random.choice(['blue', 'yellow', 'purple', 'red'])

    for flower_index in range(n_flowers):
        size = np.random.uniform(flower_size_min, flower_size_max)
        axes.append(fig.add_axes((np.random.uniform(-0.5, 1.0),
                                  np.random.uniform(-0.5, 1.0),
                                  size,
                                  size),
                                 projection='polar'))

        flower_background_color = np.array(
            rand_color.generate(hue="yellow", luminosity="dark", count=1, format_='Array_rgb')) / 256.
        flower_background_color = flower_background_color[0]

        hue = 'green'
        luminosity = 'dark'
        # if np.random.uniform(0, 1) < 0.01:
        #     hue = secret_hue
        #     luminosity = 'bright'

        all_colors_rgb = np.array(rand_color.generate(hue=hue,
                                                      luminosity=luminosity,
                                                      count=n_points,
                                                      format_='Array_rgb')) / 256.
        all_colors_hsv = np.array([color.rgb2hsv(tmp_color) for tmp_color in all_colors_rgb])
        all_colors_hsv[:, 1] *= np.random.uniform(0.15, 0.3, n_points)
        # all_colors_hsv[:, 2] *= np.random.uniform(0.15, 0.4, n_points)
        # all_colors_hsv[:, 2] *= np.random.uniform(0.5, 0.95, n_points)
        all_colors_rgb = color.hsv2rgb(all_colors_hsv)

        r = 1.5 * np.arange(n_points)
        radial_freq = np.random.randint(3, 6)
        theta = np.random.uniform(0, 360) + np.arange(n_points) * radial_freq
        max_exponent *= 0.9
        max_exponent = np.max([min_exponent, max_exponent])


        exponent = np.random.uniform(min_exponent, max_exponent)
        area = 0.008 * r ** exponent * (size / flower_size_max)

        axes[-1].set_facecolor(flower_background_color)
        axes[-1].set_thetagrids([])
        axes[-1].set_rgrids([])
        axes[-1].set_frame_on(False)
        fillstyle = np.random.choice(['full', 'left', 'right', 'bottom', 'top', 'none'])
        marker = MarkerStyle(marker='o',
                             fillstyle=fillstyle
                             )
        axes[-1].scatter(theta, r, c=all_colors_rgb, s=area, cmap='hsv', alpha=0.95, marker=marker)

    flower_size_min = 0.075
    flower_size_max = 1.5
    n_flowers = 2

    hue = np.random.choice(['blue', 'yellow', 'purple', 'red'])

    for n in range(n_flowers):
        size = 2.5
        axes.append(fig.add_axes(((1 - size) / 2,
                                  (1 - size) / 2,
                                  size,
                                  size),
                                 projection='polar'))

        flower_background_color = np.array(
            rand_color.generate(hue="yellow", luminosity="dark", count=1, format_='Array_rgb')) / 256.
        flower_background_color = flower_background_color[0]

        all_colors_rgb = np.array(rand_color.generate(hue=hue,
                                                      luminosity='bright',
                                                      count=n_points,
                                                      format_='Array_rgb')) / 256.
        all_colors_hsv = np.array([color.rgb2hsv(tmp_color) for tmp_color in all_colors_rgb])
        all_colors_hsv[:, 1] *= np.linspace(1.0, 0.3, n_points)
        all_colors_rgb = color.hsv2rgb(all_colors_hsv)

        r = 1.5 * np.arange(n_points)
        radial_freq = np.random.randint(3, 6)
        theta = np.random.uniform(0, 360) + np.arange(n_points) * radial_freq
        max_exponent *= 0.9
        max_exponent = np.max([min_exponent, max_exponent])


        exponent = np.random.uniform(min_exponent, max_exponent)
        area = 0.008 * r ** exponent * (size / flower_size_max)

        axes[-1].set_facecolor(flower_background_color)
        axes[-1].set_thetagrids([])
        axes[-1].set_rgrids([])
        axes[-1].set_frame_on(False)
        fillstyle = np.random.choice(['left', 'right', 'bottom', 'top'])
        marker = MarkerStyle(marker='o',
                             fillstyle=fillstyle
                             )
        alpha = np.random.uniform(0.7, 0.95)
        axes[-1].scatter(theta, r, c=all_colors_rgb, s=area, cmap='hsv', alpha=alpha, marker=marker)
    plt.tight_layout()
    plt.show()
Ejemplo n.º 39
0
 def testAndMap(self,
                imagename,
                imagepath,
                basetilepath,
                maskpath,
                smooth=False,
                savemask=False,
                savemap=True,
                mapname=None,
                labeled=True,
                st_el_size=100):
     model = self.model
     test_datagen = ImageDataGenerator(rescale=1. / 255)
     test_generator = test_datagen.flow_from_directory(
         directory=basetilepath + "test" + str(self.img_size) + "/",
         target_size=(self.img_size, self.img_size),
         color_mode="rgb",
         batch_size=1,
         class_mode='categorical',
         shuffle=False,
     )
     #test_generator = test_datagen.flow(imlist, [], [], batch_size=1)
     print("Testing " + self.name + " with " + imagename + " tiles")
     filenames = test_generator.filenames
     nb_samples = len(filenames)
     test_generator.reset()
     preds = model.predict_generator(test_generator, nb_samples)
     predicted_class_indices = np.argmax(preds, axis=1)
     labels = (test_generator.class_indices)
     labels = dict((v, k) for k, v in labels.items())
     predictions = [labels[k] for k in predicted_class_indices]
     image_predictions_list = []
     for i in range(len(predictions)):
         if imagename in filenames[i]:
             image_predictions_list.append(predictions[i])
     print("Found " + str(len(image_predictions_list)) + " " + imagename +
           " tiles")
     type1_tot = sum([1 for p in image_predictions_list if p == self.type1])
     type2_tot = sum([1 for p in image_predictions_list if p == self.type2])
     dom = self.type1 if type1_tot > type2_tot else self.type2
     print("Dominant type for " + imagename + " is " + dom + " based on " +
           str(type1_tot) + " type 1 (" + self.type1 +
           ") predictions and " + str(type2_tot) + " type 2 (" +
           self.type2 + ") predictions")
     correct = None
     if labeled:
         if dom in imagename:
             print(imagename + " classified correctly")
             correct = True
         else:
             print("Incorrect classification for " + imagename)
             correct = False
     tilelist = [
         name for name in filenames
         if (predictions[filenames.index(name)] == dom and imagename in name
             )
     ]
     print("Mapping with " + str(len(tilelist)) + " tiles")
     if not mapname:
         mapname = self.name + " size " + str(
             self.img_size
         ) + " tiles of " + imagename + " with prediction " + dom
     rgb_img = Image.open(imagepath + imagename + '.jpg')
     grayscale = rgb_img.convert('L')
     grayarray = np.array(grayscale)
     rows, cols = grayarray.shape
     color_mask = Image.fromarray(np.zeros((rows, cols, 3), dtype=np.uint8))
     for t in tilelist:
         name = os.path.basename(t)
         coords = [
             int(x) for x in re.findall('\\((.*?)\\)', name)[0].split(',')
         ]  #get coordinate tuple
         draw = ImageDraw.Draw(color_mask)
         draw.rectangle(coords, fill="red")
         del draw
     if smooth:
         cv2mask = np.array(color_mask)
         cv2colormask = cv2.cvtColor(cv2mask,
                                     cv2.COLOR_RGB2BGR)  #convert to cv2
         kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                            (st_el_size, st_el_size))
         cv2_color_mask = cv2.morphologyEx(cv2colormask,
                                           cv2.MORPH_OPEN,
                                           kernel,
                                           iterations=3)
         color_mask = Image.fromarray(cv2_color_mask)
         if savemask:
             cv2bwmask = cv2.cvtColor(cv2mask,
                                      cv2.COLOR_RGB2GRAY)  #convert to cv2
             cv2_bw_mask = cv2.morphologyEx(cv2bwmask,
                                            cv2.MORPH_OPEN,
                                            kernel,
                                            iterations=3)
             pil_bw_mask = Image.fromarray(cv2_bw_mask)
             pil_bw_mask = pil_bw_mask.point(lambda x: 0
                                             if x < 1 else 255, '1')
             pil_bw_mask.save(maskpath + self.name + '_' + imagename +
                              '.jpg')
             savemask = False
     if savemask:
         cv2mask = np.array(color_mask)
         cv2bwmask = cv2.cvtColor(cv2mask, cv2.COLOR_RGB2GRAY)
         bw_mask = Image.fromarray(cv2bwmask)
         bw_mask = bw_mask.point(lambda x: 0 if x < 1 else 255, '1')
         bw_mask.save(maskpath + self.name + '_' + imagename + '_mask.jpg')
     alpha = 0.75
     img_color = np.dstack((grayscale, grayscale, grayscale))
     # Convert the input image and color mask to Hue Saturation Value (HSV)
     # colorspace
     img_hsv = color.rgb2hsv(img_color)
     color_mask_hsv = color.rgb2hsv(color_mask)
     # Replace the hue and saturation of the original image
     # with that of the color mask
     img_hsv[..., 0] = color_mask_hsv[..., 0]
     img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha
     img_masked = color.hsv2rgb(img_hsv)
     # Use keras to convert, and save
     img_masked = image.array_to_img(img_masked)
     if savemap:
         img_masked.save(self.codepath + mapname + '.jpg')
     return dom, correct
Ejemplo n.º 40
0
from skimage import io, color
import numpy as np
from time import time
from kmeans.kmeans_utils import cluster_img
from multiprocessing import Pool

if __name__ == '__main__':
    n_colors = 16

    img_rgb = io.imread("../images/contacts/3contacts.jpg")
    img_lab = color.rgb2lab(img_rgb)  # [:, :, 0:3]
    img_hsv = color.rgb2hsv(img_rgb)  # [:, :, 0:3]

    pool = Pool()

    t0 = time()
    clustered_img_rgb, clustered_img_lab, clustered_img_hsv = \
        pool.starmap(cluster_img, [(img_lab, n_colors), (img_rgb, n_colors), (img_hsv, n_colors)])
    print(f'Done. {time()-t0} sec')

    t0 = time()
    clustered_img_rgb, clustered_img_lab, clustered_img_hsv = \
        pool.starmap(cluster_img, [(img_lab, n_colors), (img_rgb, n_colors), (img_hsv, n_colors)])
    print(f'Done. {time()-t0} sec')

    img_to_show = np.vstack(
        (img_rgb, clustered_img_rgb, clustered_img_lab, clustered_img_hsv))
    io.imshow(img_to_show)
    io.show()
Ejemplo n.º 41
0
for slide_ind in range(3, 55):

    print 'slide', slide_ind

    # slide_ind = 23
    imgcolor = Image.open(
        "/home/yuncong/DavidData2015tifFlat/x0.3125_slide/CC35_%02d_x0.3125_z0.tif"
        % slide_ind)
    imgcolor = imgcolor.convert('RGB')
    imgcolor = np.array(imgcolor)
    img_gray = rgb2gray(imgcolor)
    slide_height, slide_width = img_gray.shape[:2]

    if use_hsv:
        imghsv = rgb2hsv(imgcolor)
        img = imghsv[..., 1]
    else:
        img = img_gray

    # plt.imshow(imghsv[...,0], cmap=plt.cm.gray)
    # plt.show()
    # plt.imshow(imghsv[...,1], cmap=plt.cm.gray)
    # plt.show()
    # plt.imshow(imghsv[...,2], cmap=plt.cm.gray)
    # plt.show()

    a = img_gray < 0.98
    a = median(a.astype(np.float), disk(3))
    a = remove_small_objects(a.astype(np.bool),
                             min_size=50,
Ejemplo n.º 42
0
def normalize_illumination(img):
    img = rgb2hsv(img)
    value = img[:, :, 2]
    value = equalize_hist(value)
    img[:, :, 2] = value
    return hsv2rgb(img)
Ejemplo n.º 43
0
if sample_image.shape[2] == 1:
    sample_image = np.dstack([sample_image, sample_image, sample_image])
sample_image = cv2.cvtColor(sample_image, cv2.COLOR_BGR2RGB)
sample_image = sample_image.astype(np.float32) / 255.
sample_label = 1

sample_image_processed = np.expand_dims(sample_image, axis=0)
print(sample_image_processed.shape)

heatmap = cv2.resize(heatmap, (sample_image.shape[0], sample_image.shape[1]))
heatmap = heatmap * 255
heatmap = np.clip(heatmap, 0, 255).astype(np.uint8)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)

from skimage import data, color, io, img_as_float
sample_image_hsv = color.rgb2hsv(sample_image)
heatmap = color.rgb2hsv(heatmap)

alpha = 0.7
sample_image_hsv[..., 0] = heatmap[..., 0]
sample_image_hsv[..., 1] = heatmap[..., 1] * alpha

img_masked = color.hsv2rgb(sample_image_hsv)

f, ax = plt.subplots(1, 2, figsize=(16, 6))
ax[0].imshow(sample_image)
ax[0].set_title(f"Image - Consolidation")
ax[0].axis('off')

ax[1].imshow(img_masked)
ax[1].set_title(
Ejemplo n.º 44
0
def segmentByClustering( rgbImage, colorSpace, clusteringMethod, numberOfClusters):
    # Parameters
    # colorSpace : 'rgb', 'lab', 'hsv', 'rgb+xy', 'lab+xy' or 'hsv+xy'
    # clusteringMethod = 'kmeans', 'gmm', 'hierarchical' or 'watershed'.
    # numberOfClusters positive integer (larger than 2)
    
    
    # Normalizes de image
    def debugImg(rawData):
        import cv2
        import numpy as np
        #import matplotlib.pyplot as plt
        toShow = np.zeros((rawData.shape), dtype=np.uint8)
        cv2.normalize(rawData, toShow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
        #plt.imshow(toShow)
        #plt.show()
        return(toShow)
    
    
    # Color Space Configuration
    # Import modules
    from skimage import io, color
    import numpy as np
    
    # Read the image
    img = rgbImage
    
    # Create the normalized x, y channels
    h = np.indices((img.shape[0],img.shape[1]))
    y = np.uint8((h[0,:,:]/(img.shape[0]-1))*255)
    x = np.uint8((h[1,:,:]/(img.shape[1]-1))*255)
            
    # Change the image into the specified colorSpace
    if colorSpace == 'rgb+xy':
        img = np.dstack((img,x,y))
    
    elif colorSpace == 'lab' or colorSpace=='lab+xy':
        img = color.rgb2lab(img)
        img = debugImg(img)
        if colorSpace=='lab+xy':
            img = np.dstack((img,x,y))
                
    elif colorSpace == 'hsv' or colorSpace=='hsv+xy':
        img = color.rgb2hsv(img)
        img = debugImg(img)
        if colorSpace == 'hsv+xy':
            img = np.dstack((img,x,y))


    #Clustering methods
    y,x,chan = img.shape
    vect = img.reshape(x*y,chan)
    
    import numpy as np
    if clusteringMethod=='hierarchical':
        import cv2
        img = cv2.resize(img, (281,121))
        yh,xh,chanh = img.shape
        vecth = img.reshape(xh*yh,chanh)
    
    if clusteringMethod=='kmeans':
        from sklearn.cluster import KMeans
        kmeans = KMeans(n_clusters=numberOfClusters).fit_predict(vect)
        segmentation = np.reshape(kmeans,(y,x))
    
    elif clusteringMethod == 'gmm':
        from sklearn import mixture
        gmm = mixture.GaussianMixture(n_components=numberOfClusters).fit_predict(vect)
        segmentation = np.reshape(gmm,(y,x))

    elif clusteringMethod == 'hierarchical':
        import scipy
        from sklearn.cluster import AgglomerativeClustering
        clustering = AgglomerativeClustering(n_clusters=numberOfClusters).fit_predict(vecth)
        segmentation = np.reshape(clustering,(yh,xh))
        segmentation = np.uint8(segmentation)
        segmentation = scipy.misc.imresize(segmentation,(y,x),interp='nearest', mode=None)
       
        
        # The idea to implement the watershed algorithm segmentation was taken
        # from [1]
    elif clusteringMethod == 'watershed':
        import numpy as np
        from scipy import ndimage as ndi
        from skimage.morphology import watershed
        from skimage.feature import peak_local_max
        img=np.mean(img,axis=2)
        img1=img*-1 #local MINS
        local_max = peak_local_max(img1, indices=False,num_peaks=numberOfClusters,num_peaks_per_label=1)
        markers=ndi.label(local_max)[0]
        segmentation=watershed(img,markers)
        
    return segmentation


# References 
# [1] http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html
    vrep.simxStartSimulation(clientID,vrep.simx_opmode_oneshot)

    error, motorLeft = vrep.simxGetObjectHandle(clientID, 'nakedCar_motorLeft', vrep.simx_opmode_oneshot_wait)
    error, motorRight = vrep.simxGetObjectHandle(clientID, 'nakedCar_motorRight', vrep.simx_opmode_oneshot_wait)

    error, camera = vrep.simxGetObjectHandle(clientID, 'cam', vrep.simx_opmode_oneshot_wait)
    error, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0,vrep.simx_opmode_streaming)
    time.sleep(0.1)

    error, info = vrep.simxGetInMessageInfo(clientID, vrep.simx_headeroffset_server_state)
    while (info != 0):
        error, resolution, image = vrep.simxGetVisionSensorImage(clientID, camera, 0,vrep.simx_opmode_buffer)
        if error == vrep.simx_return_ok:
            img = np.array(image, dtype=np.uint8)
            img.resize([resolution[1],resolution[0],3])
            imgH = color.rgb2hsv(img)[...,0]
            imgS = color.rgb2hsv(img)[...,1]
            red = (imgH<0.15) & (imgS>0.5)
            green = (imgH>0.2) & (imgH<0.4) & (imgS>0.5)

            redSignal = measure.label(red)
            redSignal = measure.regionprops(redSignal)
            if len(redSignal)!=0:
                if redSignal[0].area/red.size>0.05:
                    error=vrep.simxSetJointTargetVelocity(clientID, motorLeft, 0, vrep.simx_opmode_oneshot)
                    error=vrep.simxSetJointTargetVelocity(clientID, motorRight, 0, vrep.simx_opmode_oneshot)

            greenSignal = measure.label(green)
            greenSignal = measure.regionprops(greenSignal)
            if len(greenSignal)!=0:
                if greenSignal[0].area/green.size>0.05:
def load_one_image(args_color, flake_path, fname, data_path, size_thre):
    tmp_flake = pickle.load(open(os.path.join(flake_path, fname), 'rb'))
    image_labelmap = tmp_flake['image_labelmap']
    tmp_flake = tmp_flake['flakes']

    flakes = []
    feats = []
    if len(tmp_flake) > 0:
        image = Image.open(os.path.join(data_path, fname[:-2] + 'tiff'))
        im_rgb = np.array(image).astype('float')
        im_hsv = color.rgb2hsv(im_rgb)
        im_hsv[:, :, 2] = im_hsv[:, :, 2] / 255.0
        im_gray = color.rgb2gray(im_rgb)
        imH, imW, _ = im_rgb.shape

        # background fitting
        if args_color == 'contrast' or args_color == 'both' or 'bg' in args_color:
            bg_rgb = []
            [C, R] = np.meshgrid(np.arange(0, imW), np.arange(0, imH))
            Y = np.reshape(R, [-1]) / (imH - 1) - 0.5
            X = np.reshape(C, [-1]) / (imW - 1) - 0.5
            A = np.stack([
                np.ones([imH * imW]),
                X * X,
                Y * Y,
                X * Y,
                X,
                Y,
            ],
                         axis=1)
            for c in range(3):
                brob_rlm_model = robustfit.RLM()
                brob_rlm_model.fit(A, np.reshape(im_rgb[:, :, c], [-1]))
                pred_map = np.reshape(brob_rlm_model.predict(A), [imH, imW])
                bg_rgb.append(pred_map)
            bg_rgb = np.stack(bg_rgb, axis=2).astype('float')
            bg_hsv = color.rgb2hsv(bg_rgb)
            bg_hsv[:, :, 2] = bg_hsv[:, :, 2] / 255.0
            bg_gray = color.rgb2gray(bg_rgb)

        for i in range(len(tmp_flake)):
            if tmp_flake[i]['flake_size'] > size_thre:
                # names.append(fname+'-'+str(tmp_flake[i]['flake_id']))
                f_mask_r_min, f_mask_r_max, f_mask_c_min, f_mask_c_max = tmp_flake[
                    i]['flake_exact_bbox']
                f_mask_height = f_mask_r_max - f_mask_r_min
                f_mask_width = f_mask_c_max - f_mask_c_min
                flake_large_bbox = [
                    max(0, f_mask_r_min - int(0.5 * f_mask_height)),
                    min(imH, f_mask_r_max + int(0.5 * f_mask_height)),
                    max(0, f_mask_c_min - int(0.5 * f_mask_width)),
                    min(imW, f_mask_c_max + int(0.5 * f_mask_width))
                ]
                tmp_flake[i]['flake_large_bbox'] = flake_large_bbox
                tmp_flake[i]['flake_img'] = im_rgb[
                    flake_large_bbox[0]:flake_large_bbox[1],
                    flake_large_bbox[2]:flake_large_bbox[3], :].astype(
                        np.uint8)

                flakes.append(tmp_flake[i])

                tmp_fea_ori = tmp_flake[i]['flake_color_fea']
                bwmap = (image_labelmap == i + 1).astype(np.uint8)

                if 'ori' in args_color:
                    tmp_color_fea = list(tmp_fea_ori)
                elif 'contrast' in args_color or 'both' in args_color:
                    # color fea
                    assert bwmap.sum() == tmp_flake[i]['flake_size']
                    contrast_gray = im_gray - bg_gray
                    contrast_hsv = im_hsv - bg_hsv
                    contrast_rgb = im_rgb - bg_rgb
                    flake_color_entropy = cv2.calcHist(
                        [contrast_gray[bwmap > 0].astype('uint8')], [0], None,
                        [256], [0, 256])
                    flake_color_entropy = entropy(flake_color_entropy, base=2)
                    # gray, h, gray std, hsv mean, hsv std, rgb mean, rgb std, gray entropy
                    tmp_fea_contrast = [contrast_gray[bwmap>0].mean(),
                                     contrast_hsv[bwmap>0, 2].mean()] + \
                                     [contrast_gray[bwmap>0].std()] + \
                                     list(contrast_hsv[bwmap>0].mean(0)) + list(contrast_hsv[bwmap>0].std(0)) + \
                                     list(contrast_rgb[bwmap>0].mean(0)) + list(contrast_rgb[bwmap>0].std(0)) + [flake_color_entropy]

                    if 'contrast' in args_color:
                        tmp_color_fea = list(tmp_fea_contrast)
                    elif 'both' in args_color:
                        tmp_color_fea = list(tmp_fea_ori) + list(
                            tmp_fea_contrast)
                else:
                    raise NotImplementedError

                if 'bg' in args_color:
                    tmp_bg_fea = [bg_gray[bwmap>0].mean()] + \
                         [bg_gray[bwmap>0].std()] + \
                         list(bg_hsv[bwmap>0].mean(0)) + list(bg_hsv[bwmap>0].std(0)) + \
                         list(bg_rgb[bwmap>0].mean(0)) + list(bg_rgb[bwmap>0].std(0))

                    tmp_color_fea = list(tmp_color_fea) + list(tmp_bg_fea)

                if 'shape' in args_color:
                    tmp_shape_fea = tmp_flake[i]['flake_shape_fea']
                    len_area_ratio = tmp_shape_fea[0]
                    fracdim = tmp_shape_fea[-1]
                    tmp_color_fea = list(tmp_color_fea) + [
                        len_area_ratio, fracdim
                    ]

                feats.append(tmp_color_fea)

    return flakes, feats
import numpy as np

from skimage import io
from skimage import color
from skimage.data import astronaut
from skimage.color import rgb2gray
from skimage.filters import sobel
from skimage.segmentation import felzenszwalb, slic, quickshift, watershed
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float

im = io.imread('test1-0034.jpg')
img = img_as_float(im[::2, ::2])
#im = 'test1-0034.jpg'

img = color.rgb2hsv(img)
img = img[::3]

segments_fz = felzenszwalb(img, scale=100, sigma=0.5, min_size=50)
segments_slic = slic(img, n_segments=250, compactness=10, sigma=1)
segments_quick = quickshift(img, kernel_size=3, max_dist=6, ratio=0.5)
gradient = sobel(rgb2gray(img))
segments_watershed = watershed(gradient, markers=250, compactness=0.001)

#io.imshow(segments_fz)
io.imshow(segments_watershed)

print("Felzenszwalb number of segments: {}".format(len(
    np.unique(segments_fz))))
print('SLIC number of segments: {}'.format(len(np.unique(segments_slic))))
print('Quickshift number of segments: {}'.format(len(
Ejemplo n.º 48
0
 def __call__(self, img):
     img = np.asarray(img, np.uint8)
     img = color.rgb2hsv(img)
     return img
Ejemplo n.º 49
0
def rgb2hsv(RgbPic):
    HsvPic = color.rgb2hsv(RgbPic)
    return (HsvPic)
Ejemplo n.º 50
0
def segmentByClustering( rgbImage, colorSpace, clusteringMethod, numberOfClusters):
   #module importation
     import pandas as pd
     import numpy as np
     from sklearn.cluster import KMeans
     import matplotlib.pyplot as plt   
     from skimage import io, color
     import cv2
     import ipdb
     from sklearn.cluster import AgglomerativeClustering
     xyimg=[]
     # normalizing function
     def debugImg(rawData):
       toShow = np.zeros((rawData.shape), dtype=np.uint8)
       cv2.normalize(rawData, toShow, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
#       cv2.imwrite('img.jpg', toShow)
     print (type(rgbImage))
     def xy(img):
       height = np.size(img, 0)
       width = np.size(img, 1)
       mat=np.zeros((height,width,2))
       mat[::,::,1]=(mat[::,::,1]+np.arange(width)[np.newaxis,:])/width       
       mat[::,::,0]=(mat[::,::,0]+np.arange(height)[:,np.newaxis])/height
       return mat
   
    
     def merge(img,xy):
         im=np.sum(img,axis=-1)
         xysum=np.sum(xy,axis=-1)
         fin=np.add(im,xysum)/5
         return fin
     #resize if it is hierarchical
     if clusteringMethod=='hierarchical':       
#       rgbImage = cv2.resize(rgbImage, (0,0), fx=0.5, fy=0.5) 
       height = np.size(rgbImage, 0)
       width = np.size(rgbImage, 1)
     else:
       height = np.size(rgbImage, 0)
       width = np.size(rgbImage, 1)
     img=rgbImage
     #change to the specified color space
     if colorSpace == "lab":
      img_lab = color.rgb2lab(rgbImage)    
      debugImg(img_lab)
#      l = img_lab[:,:,0]
#      a = img_lab[:,:,1]
#      b = img_lab[:,:,2]
#      sum = l+a+b
#      sum = sum/3
      img =img_lab
     elif colorSpace == "hsv":
      img_hsv = color.rgb2hsv(rgbImage)    
      debugImg(img_hsv)
#      h = img_hsv[:,:,0]
#      s = img_hsv[:,:,1]
#      v = img_hsv[:,:,2]
#      sum = h+s+v
#      sum = sum/3
#      img = sum
      img =img_hsv
     elif colorSpace == "rgb+xy":
      r = rgbImage[:,:,0]
      g = rgbImage[:,:,1]
      b = rgbImage[:,:,2]
#      img_xyz = color.rgb2xyz(rgbImage)
#      x = img_xyz[:,:,0]
#      y = img_xyz[:,:,1]
      xyimg=xy(rgbImage)
      #img = np.concatenate((r,g,b, x, y), axis=0)
#      sum = r+g+b+x+y
#      sum = sum/5
#      img = sum
      
     elif colorSpace == "lab+xy":
      img_lab = color.rgb2lab(rgbImage)    
      debugImg(img_lab)
#      l = img_lab[:,:,0]
#      a = img_lab[:,:,1]
#      b = img_lab[:,:,2]
#      img_xyz = color.lab2xyz(img_lab)
#      x = img_xyz[:,:,0]
#      y = img_xyz[:,:,1]
#      sum = l+a+b+x+y
#      sum = sum/5
#      #img = np.concatenate((l,a,b, x, y), axis=0)
      img = img_lab
      xyimg=xy(img_lab)
     elif colorSpace == "hsv+xy":
      img_hsv = color.rgb2hsv(rgbImage)    
      debugImg(img_hsv)
#      h = img_hsv[:,:,0]
#      s = img_hsv[:,:,1]
#      v = img_hsv[:,:,2]
#      img_xyz = color.hsv2xyz(img_hsv)
#      x = img_xyz[:,:,0]
#      y = img_xyz[:,:,1]
#      #img = np.concatenate((h,s,v, x, y), axis=0)
#      sum = h+s+v+x+y
#      sum = sum/5
#      img = sum
      img=img_hsv
      xyimg=xy(img)
     else:
       img = rgbImage
#       img = color.rgb2gray(img)
     # preparation to classifiers
     
     
     
     
     #proceed to the specified clustering method
     f=img
     img=merge(f,xyimg)
     print(img.shape)
     print(img)
     debugImg(img)
     plt.imshow(img)
     plt.show()
     if clusteringMethod == "kmeans":
       feat = img.reshape(height*width,1)
       kmeans = KMeans(n_clusters=numberOfClusters).fit_predict(feat)
       segmentation = np.reshape(kmeans,(height,width))

     elif clusteringMethod == "gmm":
       from sklearn import mixture
       feat = img.reshape(height*width,1)
       gmm = mixture.GaussianMixture(n_components=numberOfClusters).fit_predict(feat)
       segmentation = np.reshape(gmm,(height,width))

     elif clusteringMethod == "hierarchical":
       feat = img.reshape(height*width,1)
       clustering = AgglomerativeClustering(n_clusters=numberOfClusters).fit_predict(feat)
       segmentation = (np.reshape(clustering,(height,width)))
       print(clustering)
       print(type(clustering[0][0]))
#       segmentation=cv2.resize(segmentation, None, fx = 2, fy = 2, interpolation = cv2.INTER_CUBIC)  
     else:
        from skimage import morphology
        from skimage import feature
        import skimage
#        img = color.rgb2gray(img)
        sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=3)
        # Compute gradient magnitude
        grad_magn = np.sqrt(sobelx**2 + sobely**2)
        debugImg(grad_magn)

        
        import matplotlib.pyplot as plt



        imagenW=grad_magn

        found=1000000
        minimum=found
        while(found>numberOfClusters): 
            imagenW=morphology.h_minima(grad_magn,found)
            _, labeled_fg = cv2.connectedComponents(imagenW.astype(np.uint8))
            labels = morphology.watershed(grad_magn, labeled_fg)
            found=len(np.unique(labels))
            if found==minimum:
              found=numberOfClusters
            if minimum>found:
              minimum=found

            

        plt.figure()
        plt.imshow(labeled_fg)
        print(labeled_fg)



#        superimposed = img.copy()
#        watershed_boundaries = skimage.segmentation.find_boundaries(labels)
#        superimposed[watershed_boundaries] = 255
#        superimposed[foreground_1] = 255
        segmentation = labels
        
 
     return segmentation
Ejemplo n.º 51
0
def colorize(image, hue, s=1, v=1):
    hsv = color.rgb2hsv(image)
    hsv[:, :, 0] = hue
    hsv[:, :, 1] = s
    hsv[:, :, 2] *= v
    return color.hsv2rgb(hsv)
    val_med = []
    hue_mode = []
    sat_mode = []
    val_mode = []
    hsd = []
    ssd = []
    vsd = []

    counter = -1

    for root, dirs, files in os.walk(input_path):
        for file in files:
            counter += 1

            try:
                img = color.rgb2hsv(imread(root + "/" + file))

                i_hue = np.mean(img[:, :, 0])
                hue.append(i_hue)

                i_sat = np.mean(img[:, :, 1])
                sat.append(i_sat)

                i_val = np.mean(img[:, :, 2])
                val.append(i_val)

                i_hue_med = np.median(img[:, :, 0])
                hue_med.append(i_hue_med)

                i_sat_med = np.median(img[:, :, 1])
                sat_med.append(i_sat_med)
Ejemplo n.º 53
0
def process_one_image(img_name, save_name, fig_save_name, im_i):
    # try:
    #     pickle.load(open(save_name+'.p', 'rb'))
    #     return
    # except:
    #     print('process')
    if os.path.exists(save_name + '.p') and os.path.exists(fig_save_name +
                                                           '.png'):
        return
    print('process %s' % (img_name))
    # bk_image = Image.open(bk_name)
    # bk_rgb = np.array(bk_image).astype('float')
    # bk_gray = np.array(bk_image.convert('L', (0.2989, 0.5870, 0.1140, 0))).astype('float')
    # # bk_hsv = np.array(bk_image.convert('HSV')).astype('float')
    # # to have same result as matlab
    # bk_hsv = color.rgb2hsv(bk_rgb)
    # bk_hsv[:,:,2] = bk_hsv[:,:,2]/255.0

    image = Image.open(img_name)
    im_rgb = np.array(image).astype('float')
    im_gray = np.array(image.convert(
        'L', (0.2989, 0.5870, 0.1140, 0))).astype('float')
    imH, imW = im_gray.shape
    # im_hsv = np.array(image.convert('HSV')).astype('float')
    # to have same result as matlab
    im_hsv = color.rgb2hsv(im_rgb)
    im_hsv[:, :, 2] = im_hsv[:, :, 2] / 255.0

    # res_map, image_labelmap, flake_centroids, flake_sizes, num_flakes = utils.perform_robustfit_multichannel(im_hsv, im_gray, hyperparams['im_thre'], hyperparams['size_thre'])
    res_map, image_labelmap, flake_centroids, flake_sizes, num_flakes = utils.perform_robustfit_multichannel_v3(
        im_hsv, im_gray, hyperparams['im_thre_high'],
        hyperparams['im_thre_low'], hyperparams['size_thre'])

    if num_flakes != len(flake_sizes):
        print(len(flake_sizes), num_flakes, img_name)
        return

    # get bg
    bg_rgb = []
    [C, R] = np.meshgrid(np.arange(0, imW), np.arange(0, imH))
    Y = np.reshape(R, [-1]) / (imH - 1) - 0.5
    X = np.reshape(C, [-1]) / (imW - 1) - 0.5
    A = np.stack([
        np.ones([imH * imW]),
        X * X,
        Y * Y,
        X * Y,
        X,
        Y,
    ], axis=1)
    for c in range(3):
        brob_rlm_model = robustfit.RLM()
        brob_rlm_model.fit(A, np.reshape(im_rgb[:, :, c], [-1]))
        pred_map = np.reshape(brob_rlm_model.predict(A), [imH, imW])
        bg_rgb.append(pred_map)
    bg_rgb = np.stack(bg_rgb, axis=2).astype('float')
    bg_hsv = color.rgb2hsv(bg_rgb)
    bg_hsv[:, :, 2] = bg_hsv[:, :, 2] / 255.0
    bg_gray = color.rgb2gray(bg_rgb)
    # get contrast color features
    contrast_gray = im_gray - bg_gray
    contrast_hsv = im_hsv - bg_hsv
    contrast_rgb = im_rgb - bg_rgb

    flakes = []

    kernel = np.ones((5, 5), np.uint8)

    im_tosave = im_rgb.astype(np.uint8)

    # get features for each flake
    for i in range(num_flakes):
        f_mask_r, f_mask_c = np.nonzero(image_labelmap == i + 1)
        f_mask_r_min = min(f_mask_r)
        f_mask_r_max = max(f_mask_r)
        f_mask_height = f_mask_r_max - f_mask_r_min
        f_mask_c_min = min(f_mask_c)
        f_mask_c_max = max(f_mask_c)
        f_mask_width = f_mask_c_max - f_mask_c_min
        flake_exact_bbox = [
            f_mask_r_min, f_mask_r_max + 1, f_mask_c_min, f_mask_c_max + 1
        ]
        flake_large_bbox = [
            max(0, f_mask_r_min - int(0.1 * f_mask_height)),
            min(imH, f_mask_r_max + int(0.1 * f_mask_height)),
            max(0, f_mask_c_min - int(0.1 * f_mask_width)),
            min(imW, f_mask_c_max + int(0.1 * f_mask_width))
        ]

        bwmap = (image_labelmap == i + 1).astype(np.uint8)
        _, flake_contours, _ = cv2.findContours(bwmap, cv2.RETR_EXTERNAL,
                                                cv2.CHAIN_APPROX_NONE)
        # print(im_tosave)
        # print(flake_contours[0])
        # print(flake_contours[0].shape)
        im_tosave = cv2.drawContours(im_tosave, flake_contours[0], -1,
                                     (255, 0, 0), 2)
        '''
        flake_contours = np.squeeze(flake_contours[0], 1)
        # row, column
        flake_contours = np.flip(flake_contours, 1)

        
        # compute convex hull of the contours
        flake_convexhull = cv2.convexHull(flake_contours)

        # shape fea
        flake_shape_len_area_ratio = flake_contours.shape[0] / (bwmap.sum() + 0.0)
        contours_center_dis = cdist(np.expand_dims(flake_centroids[i],0), flake_contours)
        flake_shape_contour_hist = np.histogram(contours_center_dis, bins=15)[0]
        flake_shape_contour_hist = flake_shape_contour_hist / flake_shape_contour_hist.sum()
        flake_shape_fracdim = utils.fractal_dimension(bwmap)
        
        inner_bwmap = cv2.erode(bwmap, kernel, iterations=1)

        # color fea
        flake_color_fea = [im_gray[bwmap>0].mean(), 
                         im_hsv[bwmap>0, 2].mean()] + \
                         [im_gray[bwmap>0].mean(), im_gray[bwmap>0].std()] + \
                         list(im_hsv[bwmap>0].mean(0)) + list(im_hsv[bwmap>0].std(0)) + \
                         list(im_rgb[bwmap>0].mean(0)) + list(im_rgb[bwmap>0].std(0))
        # flake_color_entropy = entropy(im_gray[bwmap>0].astype('uint8'), disk(5))
        flake_color_entropy = cv2.calcHist([im_gray[bwmap>0].astype('uint8')],[0],None,[256],[0,256])
        flake_color_entropy = entropy(flake_color_entropy, base=2)
        flake_inner_color_fea = [0] * 16
        flake_inner_color_entropy = 0
        flake_inner_contrast_color_fea = [0] * 16
        if inner_bwmap.sum() > 0:
            flake_inner_color_fea = [im_gray[inner_bwmap>0].mean(), 
                         im_hsv[inner_bwmap>0, 2].mean()] + \
                         [im_gray[inner_bwmap>0].mean(), im_gray[inner_bwmap>0].std()] + \
                         list(im_hsv[inner_bwmap>0].mean(0)) + list(im_hsv[inner_bwmap>0].std(0)) + \
                         list(im_rgb[inner_bwmap>0].mean(0)) + list(im_rgb[inner_bwmap>0].std(0))

            # flake_inner_color_entropy = entropy(im_gray[inner_bwmap>0].astype('uint8'), disk(5))
            flake_inner_color_entropy = cv2.calcHist([im_gray[inner_bwmap>0].astype('uint8')],[0],None,[256],[0,256])
            flake_inner_color_entropy = entropy(flake_inner_color_entropy, base=2)

            flake_inner_contrast_color_entropy = cv2.calcHist([contrast_gray[inner_bwmap>0].astype('uint8')],[0],None,[256],[0,256])
            flake_inner_contrast_color_entropy = entropy(flake_inner_contrast_color_entropy, base=2)
            flake_inner_contrast_color_fea = [contrast_gray[inner_bwmap>0].mean(), 
                         contrast_hsv[inner_bwmap>0, 2].mean()] + \
                         [contrast_gray[inner_bwmap>0].std()] + \
                         list(contrast_hsv[inner_bwmap>0].mean(0)) + list(contrast_hsv[inner_bwmap>0].std(0)) + \
                         list(contrast_rgb[inner_bwmap>0].mean(0)) + list(contrast_rgb[inner_bwmap>0].std(0)) + list(flake_inner_contrast_color_entropy)
        # get contrast color features
        flake_contrast_color_entropy = cv2.calcHist([contrast_gray[bwmap>0].astype('uint8')],[0],None,[256],[0,256])
        flake_contrast_color_entropy = entropy(flake_contrast_color_entropy, base=2)
        # gray, h, gray std, hsv mean, hsv std, rgb mean, rgb std, gray entropy
        flake_contrast_color_fea = [contrast_gray[bwmap>0].mean(), 
                         contrast_hsv[bwmap>0, 2].mean()] + \
                         [contrast_gray[bwmap>0].std()] + \
                         list(contrast_hsv[bwmap>0].mean(0)) + list(contrast_hsv[bwmap>0].std(0)) + \
                         list(contrast_rgb[bwmap>0].mean(0)) + list(contrast_rgb[bwmap>0].std(0)) + [flake_contrast_color_entropy]

        flake_bg_color_fea = [bg_gray[bwmap>0].mean()] + \
                         [bg_gray[bwmap>0].std()] + \
                         list(bg_hsv[bwmap>0].mean(0)) + list(bg_hsv[bwmap>0].std(0)) + \
                         list(bg_rgb[bwmap>0].mean(0)) + list(bg_rgb[bwmap>0].std(0))

        flake_i = dict()
        flake_i['img_name'] = img_name
        flake_i['flake_id'] = i+1
        flake_i['flake_size'] = flake_sizes[i]
        flake_i['flake_exact_bbox'] = flake_exact_bbox
        flake_i['flake_large_bbox'] = flake_large_bbox
        flake_i['flake_contour_loc'] = flake_contours.astype('int16')
        flake_i['flake_convexcontour_loc'] = flake_convexhull.astype('int16')
        flake_i['flake_center'] = flake_centroids[i]
        # flake_i['flake_img'] = im_rgb[flake_large_bbox[0]: flake_large_bbox[1], flake_large_bbox[2]:flake_large_bbox[3], :].astype(np.uint8)
        flake_i['flake_shape_fea'] = np.array([flake_shape_len_area_ratio] + list(flake_shape_contour_hist) + [flake_shape_fracdim])
        flake_i['flake_color_fea'] = np.array(flake_color_fea + [flake_color_entropy] + flake_inner_color_fea + [flake_inner_color_entropy])
        flake_i['flake_contrast_color_fea'] = np.array(flake_contrast_color_fea)
        flake_i['flake_innercontrast_color_fea'] = np.array(flake_inner_contrast_color_fea)
        flake_i['flake_bg_color_fea'] = np.array(flake_bg_color_fea)

        # subsegment the flake
        if flake_i['flake_size'] > 100:
            n_clusters = hyperparams['n_clusters']
            flake_rgb = im_rgb[bwmap>0]
            flake_gray = im_gray[bwmap>0]
            flake_hsv = im_hsv[bwmap>0]

            flake_contrast_rgb = im_rgb[bwmap>0] - bg_rgb[bwmap>0]
            flake_contrast_gray = im_gray[bwmap>0] - bg_gray[bwmap>0]
            flake_contrast_hsv = im_hsv[bwmap>0] - bg_hsv[bwmap>0]
            
            pixel_features = np.concatenate([flake_rgb, flake_hsv, np.expand_dims(flake_gray, 1)], 1)
            pixel_features = StandardScaler().fit_transform(pixel_features)
            n_pixel = pixel_features.shape[0]
            cluster_rslt = KMeans(n_clusters=n_clusters, random_state=0, n_jobs=-1).fit(pixel_features)
            assignment = cluster_rslt.labels_
            # # get the overlayed image
            # overlay = np.zeros([n_pixel, 3], dtype=np.uint8)
            # overlay[assignment==0] = (255,0,0)
            # overlay[assignment==1] = (0,255,0)
            # overlay[assignment==2] = (0,0,255)
            # ori_bgr = im_bgr_tosave[bwmap>0]
            # overlay_bgr = cv2.addWeighted(np.expand_dims(ori_bgr,0), 0.75, np.expand_dims(overlay,0), 0.25, 0)
            # im_bgr_tosave[bwmap>0] = overlay_bgr[0,:,:]
            all_subsegment_features = []
            all_subsegment_keys = []
            for ci in range(n_clusters):
                subseg_gray = flake_gray[assignment==ci]
                subseg_contrast_gray = flake_contrast_gray[assignment==ci]
                # print(len(subseg_gray))
                subseg_hsv = flake_hsv[assignment==ci]
                subseg_rgb = flake_rgb[assignment==ci]
                subseg_contrast_hsv = flake_contrast_hsv[assignment==ci]
                subseg_contrast_rgb = flake_contrast_rgb[assignment==ci]

                sub_flake_color_entropy = cv2.calcHist([subseg_gray.astype('uint8')],[0],None,[256],[0,256])
                sub_flake_color_entropy = entropy(sub_flake_color_entropy, base=2)[0]
                sub_flake_contrast_color_entropy = cv2.calcHist([subseg_contrast_gray.astype('uint8')],[0],None,[256],[0,256])
                sub_flake_contrast_color_entropy = entropy(sub_flake_contrast_color_entropy, base=2)[0]

                sub_flake_color_fea = [subseg_gray.mean(), 
                         subseg_hsv[:, 2].mean()] + \
                         [subseg_hsv.std()] + \
                         list(subseg_hsv.mean(0)) + list(subseg_hsv.std(0)) + \
                         list(subseg_rgb.mean(0)) + list(subseg_rgb.std(0)) + [sub_flake_color_entropy] + \
                         [subseg_contrast_gray.mean(), 
                         subseg_contrast_hsv[:, 2].mean()] + \
                         [subseg_contrast_gray.std()] + \
                         list(subseg_contrast_hsv.mean(0)) + list(subseg_contrast_hsv.std(0)) + \
                         list(subseg_contrast_rgb.mean(0)) + list(subseg_contrast_rgb.std(0)) + [sub_flake_contrast_color_entropy]
                # all_subsegment_features[sub_flake_color_fea[0]] = sub_flake_color_fea
                all_subsegment_features.append(sub_flake_color_fea)
                all_subsegment_keys.append(sub_flake_color_fea[0])
            # sort based on gray values
            subsegment_features = []
            key_ids = np.argsort(all_subsegment_keys)
            for key_id in key_ids:
                subsegment_features.extend(all_subsegment_features[key_id])

            subsegment_features = np.array(subsegment_features)
            if subsegment_features.shape[0] != 32 * n_clusters:
                print('wrong', save_name, i, subsegment_features.shape)
            assert subsegment_features.shape[0] == 32 * n_clusters
            flake_i['subsegment_features_%d'%(n_clusters)] = subsegment_features
            flake_i['subsegment_assignment_%d'%(n_clusters)] = assignment
        flakes.append(flake_i)
    '''

    # save mat and images
    to_save = dict()
    to_save['bg_rgb'] = bg_rgb
    to_save['res_map'] = res_map
    to_save['image_labelmap'] = image_labelmap
    to_save['flakes'] = flakes

    pickle.dump(to_save, open(save_name + '.p', 'wb'))
    cv2.imwrite(fig_save_name + '.png', np.flip(im_tosave, 2))
Ejemplo n.º 54
0
def rubber_band_binary_img(img):
    hsv_img = color.rgb2hsv(img)
    hue_img = hsv_img[:, :, 0]
    sat_img = hsv_img[:, :, 1]
    val_img = hsv_img[:, :, 2]
    return (hue_img > 0.45) & (hue_img < 0.70) & (sat_img > 0.2) & (val_img > 0.5)
Ejemplo n.º 55
0
def diseaseFeatureExtraction(filename):
    selem = disk(8)

    #THRESHOLDING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFF
    image = data.imread(filename)
    image = checkPythonImage(image)

    hsv2 = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)

    grayimage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    grayimage = checkPythonGrayImage(grayimage)

    thresh = threshold_otsu(grayimage)

    elevation_map = sobel(grayimage)

    markers = np.zeros_like(grayimage)

    if ((grayimage < thresh).sum() > (grayimage > thresh).sum()):
        markers[grayimage < thresh] = 1
        markers[grayimage > thresh] = 2
    else:
        markers[grayimage < thresh] = 2
        markers[grayimage > thresh] = 1

    segmentation = morphology.watershed(elevation_map, markers)

    segmentation = dilation(segmentation - 1, selem)
    segmentation = ndimage.binary_fill_holes(segmentation)

    segmentation = np.logical_not(segmentation)
    grayimage[segmentation] = 0

    watershed_mask = np.empty_like(grayimage, np.uint8)
    width = 0
    height = 0
    while width < len(watershed_mask):

        while height < len(watershed_mask[width]):

            if grayimage[width][height] == 0:
                watershed_mask[width][height] = 0
            else:
                watershed_mask[width][height] = 1

            height += 1
            pass

        width += 1
        height = 0
        pass

    #SPLITTING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
    image = cv2.bitwise_and(image, image, mask=watershed_mask)
    hsv = ''
    if image.shape[2] == 3:
        hsv = color.rgb2hsv(image)
    elif image.shape[2] == 4:
        image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
        hsv = color.rgb2hsv(image)
    h, s, v = cv2.split(hsv2)

    #MASKING STUFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
    mask = cv2.inRange(h, 40, 80)
    cv2.bitwise_not(mask, mask)

    res = cv2.bitwise_and(image, image, mask=mask)
    res_gray = cv2.bitwise_and(grayimage, grayimage, mask=mask)

    harfeatures = haralick(res.astype(int),
                           ignore_zeros=True,
                           return_mean=True)

    #glcm = greycomatrix(res_gray, [5], [0], 256)
    #contrast = greycoprops(glcm, 'contrast')[0, 0]
    #ASM = greycoprops(glcm, 'ASM')[0, 0]
    #dissimilarity = greycoprops(glcm, 'dissimilarity')[0, 0]
    #homogeneity = greycoprops(glcm, 'homogeneity')[0, 0]
    #energy = greycoprops(glcm, 'energy')[0, 0]

    features = []

    #features.append(contrast)
    #features.append(ASM)
    #features.append(dissimilarity)
    #features.append(homogeneity)
    #features.append(energy)

    hist = cv2.calcHist([res], [0], None, [256], [0, 256])
    w, h, c = res.shape
    numPixel = w * h

    num = 0
    for index in hist:

        if num != 0 and num < 255:
            features.append(index[0] / (numPixel - hist[0][0]))

        num = num + 1

        pass

    for harfeature in harfeatures:
        features.append(harfeature)
        pass

    output = np.empty((1, len(features)), 'float')

    a = np.array(list(features))
    output[0, :] = a[:]
    return output
def find_circles(img):
    circle = []
    mean_v = np.average(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)[:, :, 2])
    gam = 1
    if (mean_v < 100):  #dark images
        gam = 15
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        rgb = filters.gaussian(rgb, sigma=1.8, multichannel=True)
        pMin = np.percentile(rgb, 25, interpolation='midpoint')
        pMax = np.percentile(rgb, 100 - 5, interpolation='midpoint')
        rgb = exposure.rescale_intensity(rgb, in_range=(pMin, pMax))
        rgb = exposure.adjust_gamma(rgb, gamma=gam)
        rgb = color.rgb2hsv(rgb)
        blackWhite = 1 - rgb[:, :, 2]
        blackWhite[blackWhite[:, :] != 0] = 1

        # make edge by dilatation and morphology
        dil = binary_dilation(blackWhite, square(3))
        dil = 1 - dil
        dil = morphology.remove_small_holes(dil, 12000)
        dil = morphology.remove_small_objects(dil, 3000)
        dil = binary_dilation(dil, disk(7))

        # make edge by canny and morphology
        edges = canny(blackWhite, sigma=1.2)
        edges = binary_dilation(edges, disk(3))
        edges = morphology.remove_small_objects(edges, 1000)
        edges = binary_opening(edges, disk(3))
        edges = binary_dilation(edges, disk(5))
        edges = binary_fill_holes(edges)
        edges = morphology.remove_small_objects(edges, 8000)

        cv_image = img_as_ubyte(edges & dil)
    else:  #bright images
        gam = 0.7
        # make edge by adaptiveThreshold and morphology
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        thresh = cv2.adaptiveThreshold(gray, 255,
                                       cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                       cv2.THRESH_BINARY_INV, 33, 3)
        kernel = np.ones((9, 9), np.uint8)
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
        kernel = np.ones((19, 19), np.uint8)
        thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
        th_1 = cv2.normalize(thresh, None, 0, 1, cv2.NORM_MINMAX)
        th_1 = binary_fill_holes(th_1)
        th_1 = morphology.remove_small_objects(th_1, 8000)
        th_1 = morphology.binary_dilation(th_1, disk(7))

        cv_image = img_as_ubyte(th_1)

        _, contours, _ = cv2.findContours(cv_image, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)

        if contours is not None:
            for cnt in contours:
                approx = cv2.approxPolyDP(cnt, .03 * cv2.arcLength(cnt, True),
                                          True)
                vert = len(approx)
                if (3 < vert):
                    area = cv2.contourArea(cnt)
                    if (8000 < area < 200000):
                        (cx, cy), radius = cv2.minEnclosingCircle(cnt)
                        radius = radius * 0.95
                        circleArea = radius * radius * np.pi
                        ratio = area / circleArea
                        if (ratio > 0.65):
                            circle.append((int(cx), int(cy), int(radius)))
    return circle
Ejemplo n.º 57
0
   Achtet dabei darauf, dass die Maskeneinträge dort eine entgegengesetzte 
   Wirkung haben. Werte, die in der Maske eine 1 tragen, werden ignoriert.

2. Ermittelt nun den Farbton (über HSI oder HSV) für jeden Pixel und berechnet 
   je Bild den mittleren Farbton für den mit Einsen markierten Bereich.
"""
for imageID, classLabel in zip(['02881', '02890', '04650', '04666'],
                               ['Malve', 'Malve', 'Hahnenfuß', 'Hahnenfuß']):
    #gleichzeitiges iterieren über zwei Listen
    print(imageID, classLabel)
    img = imread('./blumen/image_' + imageID + '.jpg')
    mask = imread('./blumen/image_' + imageID + '_maske.png')
    #die Maske kann ganz normal gelesen werden
    grey = np.mean(img, axis=2)
    #Grauwert als Mittelwert der drei Farbwerte -> Mittelung über die Achse 2
    hue = rgb2hsv(img)[:, :, 0]
    #Unwandlung in HSV-Farbraun und Extraktion des Farbwert-Kanals
    plt.figure(plt.gcf().number + 1)
    plt.imshow(img)
    plt.figure(plt.gcf().number + 1)
    plt.imshow(mask * 255, cmap='gray')

    maskedGrey = np.ma.array(grey, mask=1 - mask)
    #gegebene Maske muss gedreht werden, da hier 1 Hintergund bedeutet
    maskedHue = np.ma.array(hue, mask=1 - mask)
    greyMean = np.ma.mean(maskedGrey)
    #Mittelwert auf Vordergrund berechnen über np.ma.mean
    hueMean = np.ma.mean(maskedHue)

    #Alternative:
    #    hueMean = np.mean(hue[mask==1])
# Yellow represents 1 in the images, or a selected pixel.
# =============================================================================

fileIn = 'C:/school/Ag-AI/images - Copy/blighted/DSC00200.JPG'

#Conservative values
lowRed = 165
highRed = 240
lowGreen = 160
highGreen = 200
lowBlue = 135
highBlue = 240

rgb_img = plt.imread(fileIn)
red = rgb_img[:, :, 0]
hsv_img = rgb2hsv(rgb_img)
hue_img = hsv_img[:, :, 0]
sat_img = hsv_img[:, :, 1]
value_img = hsv_img[:, :, 2]

#saturation mask to isolate foreground
satMask = (sat_img > .11) | (value_img > .3)
#hue and value mask to remove additional brown from background
mask = (hue_img > .14) | (value_img > .48)
#healthy corn mask to remove healthy corn, leaving only blighted pixels
nonBlightMask = hue_img < .14

#get foreground
rawForeground = np.zeros_like(rgb_img)
rawForeground[mask] = rgb_img[mask]
Ejemplo n.º 59
0
    y = mag_i - mag_z
    angle = np.rad2deg(np.arctan2(x, y)) / 180
    ones = np.ones_like(angle)
    hsv = np.reshape((angle, ones, ones), (1, -1, 3))
    return color.hsv2rgb(hsv).reshape((-1, 3))
colors2 = auto_color(maga, magi, magz)
ax[0].clear()
ax[0].scatter(maga, magi - magz, c=colors2, s=1)
colors2.shape
colors.shape
colors2.min(axis=0)
colors2.max(axis=0)
colors3 = (colors2 - _85) / (_86 - _85)
ax[0].scatter(maga, magi - magz, c=colors2, s=1)
ax[0].scatter(maga, magi - magz, c=colors3, s=1)
color.rgb2hsv([[[0, 1, 1]]])
color.rgb2hsv(np.array([[[0, 1, 1]]], dtype=float))
color.rgb2hsv([[[0, 1, 1.]]])
color.rgb2hsv([[[1., 1, 1.]]])
color.rgb2hsv([[[0., 0, 1.]]])
color.rgb2hsv([[[1., 0, 1.]]])
h = np.linspace(0, 1, 10000)
s = np.ones_like(h)
v = np.ones_like(h)
h = np.linspace(0, 1, 10000).reshape((100, 100))
s = np.ones_like(h)
v = np.ones_like(h)
rgb = color.hsv2rgb(np.dstack((h, s, v)))
fig2, ax2 = plt.subplots()
ax2.imshow(rgb)
def auto_color(mag_a, mag_i, mag_z):
input_path = sys.argv[1]
descriptor = sys.argv[2]

filename = []
hue = []
sat = []
val = []

counter = -1

for file in glob.glob(os.path.join(input_path, '*.png')):
    counter += 1

    try:
        img = color.rgb2hsv(imread(file))

        m_hue = float(mode(img[:, :, 0], axis=None)[0])
        hue.append(m_hue)

        i_sat = np.mean(img[:, :, 1])
        sat.append(i_sat)

        i_val = np.mean(img[:, :, 2])
        val.append(i_val)

        filename.append(file)

        print counter, file

    except: