def mean_shift(image_rgb, radius=20, bandwidth=4, eps=1, proc_count=4): start = time.time() print("Mean Shift: Radius =", radius, ", Bandwidth =", bandwidth, ", EPS =", eps) print("Initializing ...") image = color.rgb2luv(image_rgb) coords = np.rollaxis(np.indices(image.shape[:2]), 0, 3).reshape(-1, 2) print("Iterating Points ...") partial_iterate = partial(_do_mean_shift, image=image, radius=radius, bandwidth=bandwidth, eps=eps) pool = Pool(proc_count) segmentation = np.array(pool.map(partial_iterate, coords)) pool.close() pool.join() print("Post Processing ...") sys.setrecursionlimit(image.shape[0] * image.shape[1] + 100) segmentation = segmentation.reshape(image.shape) _floodfill_compression(segmentation, bandwidth) print("Ending ...") segmentation = color.luv2rgb(segmentation) end = time.time() print("Time elapsed:", end - start, "s") return segmentation
def read_image(filename): """Read an image from the disk and output data arrays.""" image_array_rgb = misc.imread(filename, mode='RGB') # image_array_grey = misc.imread(filename, flatten=True, mode='F') image_array_grey = color.rgb2grey(image_array_rgb)*255 image_array_luv = color.rgb2luv(image_array_rgb) return image_array_rgb, image_array_grey, image_array_luv
def test_rgb2luv_brucelindbloom(self): """ Test the RGB->Lab conversion by comparing to the calculator on the authoritative Bruce Lindbloom [website](http://brucelindbloom.com/index.html?ColorCalculator.html). """ # Obtained with D65 white point, sRGB model and gamma gt_for_colbars = np.array([ [100, 0, 0], [97.1393, 7.7056, 106.7866], [91.1132, -70.4773, -15.2042], [87.7347, -83.0776, 107.3985], [60.3242, 84.0714, -108.6834], [53.2408, 175.0151, 37.7564], [32.2970, -9.4054, -130.3423], [0, 0, 0]]).T gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2) assert_array_almost_equal(rgb2luv(self.colbars_array), gt_array, decimal=2)
def salvarcombinacoes(img): img_rgb = color.convert_colorspace(img, 'RGB', 'RGB') img_hsv = color.convert_colorspace(img_rgb, 'RGB', 'HSV') img_lab = color.rgb2lab(img_rgb) img_hed = color.rgb2hed(img_rgb) img_luv = color.rgb2luv(img_rgb) img_rgb_cie = color.convert_colorspace(img_rgb, 'RGB', 'RGB CIE') img_xyz = color.rgb2xyz(img_rgb) img_cmy = rgb2cmy(img_rgb) lista = [img_rgb, img_hsv, img_lab, img_hed, img_luv, img_rgb_cie, img_xyz, img_cmy] lista2 = ["rgb", "hsv", "lab", "hed", "luv", "rgb_cie", "xyz", "cmy"] for i in range(len(lista)): for j in range(len(lista)): for k in range(3): for l in range(3): nome = lista2[i] + str(k) + lista2[j] + str(l) + ".jpg" io.imsave(nome, juntarcanais(lista[i][:, :,k], lista[j][:, :, l]), ) return
def calc_temperature_distance(image, percentiles, max_distance): """ Calculates the temperature of the center 25% of an image based on percentiles, and returns the mean distance of these percentiles from the 'Planckian Locus'. This distance is useful for flagging images that were improperly saved or processed by the camera. Temperature and distance are based on the method of Robertson (1968). Parameters ---------- image : ndarray rgb image percentiles : float list of percentiles to use to calculate temperature and distance. max_distance : int or None What is the maximum distance allowed from the Planckian locus? If None, returns the distance. Otherwise, returns a boolean of (distance < max_distance). Returns ------- float or bool See also -------- `colour.uv_to_CCT_Robertson1968`, `skimage.color.rgb2luv` """ luv = color.rgb2luv(_center_image(image)) u = [np.percentile(luv[:, :, 1], i) for i in percentiles] v = [np.percentile(luv[:, :, 2], i) for i in percentiles] dist = [colour.uv_to_CCT_Robertson1968((u[i], v[i]))[1] for i in range(len(percentiles))] dist = np.mean(dist) if max_distance == None: out = dist else: out = dist < max_distance return(out)
print('The type of the image representation is {}'.format(im.dtype)) print('The max/min pixel values are ({}, {})'.format(im.max(), im.min())) # Here rgb2grey calculate the luma of the original image grey_im = color.rgb2grey(im) print('The dimension of the grey image is {}'.format(grey_im.shape)) print('The type of the image representation is {}'.format(grey_im.dtype)) print('The max/min pixel values are ({}, {})'.format(grey_im.max(), grey_im.min())) plt.axis('off') io.imshow(grey_im) plt.show() # Here we show the gray-scale channel in other color spaces hsv_im = color.rgb2hsv(im) luv_im = color.rgb2luv(im) yuv_im = color.rgb2yuv(im) plt.figure(figsize=(12, 8)) plt.subplot(221) plt.title('grey') plt.axis('off') plt.imshow(grey_im, cmap='gray') plt.subplot(222) plt.title('hsv') plt.axis('off') plt.imshow(hsv_im[:, :, 2], cmap='gray') # v channel plt.subplot(223)
def computeFeatureVectorBatch(P, img_list, img_idx, rot, img_min=[], img_max=[]): imgcol = io.imread_collection([img_list[k] for k in img_idx]) if P.use_hog == 0: hogsz = 0 else: hogsz = (P.hognumorient * np.prod( (P.imgresize / P.hogcellsize) / P.hogcellblock)) if P.use_lum == 0: lumsz = 0 else: lumsz = (6 * P.m_block * P.n_block) featvecsize = lumsz + hogsz datastor = DataVec([], [], [], [], rot) datastor.feat = np.zeros((featvecsize, len(img_idx))) datastor.labels = (datastor.rot / 90) * np.ones(len(img_idx)) for i in range(0, len(img_idx)): img = exposure.equalize_adapthist(imgcol[i]) img = resize(img, P.imgresize) # rotate as needed if datastor.rot != 0: img = ndimage.rotate(img, datastor.rot) if P.use_hog == 1: # extract hog features img1 = color.rgb2gray(img) img1 = gaussian(img1, sigma=2) hogarray = hog(img1, orientations=P.hognumorient, pixels_per_cell=P.hogcellsize, cells_per_block=P.hogcellblock) if P.use_lum == 1: # extract LUV moment features img2 = color.rgb2luv(img) patches = image.extract_patches_2d(img2, (P.m_block, P.n_block)) # compute mean and variance for each channel pmean = np.mean(patches, 0) pvar = np.var(patches, 0) # concatenate into feature vector feat = np.concatenate((pmean, pvar), 2) if P.use_hog == 1 and P.use_lum == 1: datastor.feat[:, i] = np.concatenate((feat.flatten(), hogarray), 0) elif P.use_hog == 1 and P.use_lum == 0: datastor.feat[:, i] = hogarray elif P.use_hog == 0 and P.use_lum == 1: datastor.feat[:, i] = feat.flatten() # normalize if P.normalize_lum == 1: if len(img_min) == 0: if P.use_lum == 0: img_min = np.zeros(datastor.shape[0]) else: img_min = np.nanmin(datastor.feat, 1) if P.use_hog == 1: img_min[lumsz:lumsz + hogsz] = 0 datastor.img_min = img_min if len(img_max) == 0: if P.use_lum == 0: img_max = np.zeros(datastor.shape[0]) else: img_max = np.nanmax(datastor.feat, 1) if P.use_hog == 1: img_max[lumsz:lumsz + hogsz] = 0 datastor.img_max = img_max tmp1 = (img_max - img_min) if P.use_hog == 1: tmp1[lumsz:lumsz + hogsz] = 1 tmp = np.tile(tmp1, (len(img_idx), 1)).T datastor.feat = (datastor.feat - np.tile(img_min, (len(img_idx), 1)).T) / tmp return datastor
category_sum = 0 category_count = 0 x_values = [] for file_name in glob.glob("../movie_categories/" + c + "/*.txt"): f = open(file_name) prev = [0.,0.,0.] count = 0 temp_sum = 0 array = f.read().split('\n')[:-1] for line in array: count += 1 r,g,b = line.split() r,g,b = float(r), float(g), float(b) rgb_vec = [[[r,g,b]],[[r,g,b]]] luv_vec = rgb2luv(rgb_vec) luv_vec = luv_vec[0][0] diff = LA.norm(luv_vec-prev) temp_sum += diff prev = luv_vec avg_change = temp_sum / count x_values.append(avg_change) category_count += 1 category_sum += avg_change #### writing output to file here avg_change = str(avg_change) concat_name = file_name.rsplit('/',1)[-1] output = open('../movie_dynamics/'+ c + "/" + concat_name , 'w') output.write(avg_change)
def test_rgb2luv_dtype(self): img = self.colbars_array.astype('float64') img32 = img.astype('float32') assert rgb2luv(img).dtype == img.dtype assert rgb2luv(img32).dtype == img32.dtype
def createDataSet (rawDataDir, patch_size, som): #Create the training set from a directory with color images si, sj = patch_size[0], patch_size[1] cantFeatures = si*sj+2 cantData = len(os.listdir(rawDataDir)) * (256-si) * (256-sj) X = np.zeros((cantData, cantFeatures)) #data y = np.zeros((cantData, 1), dtype=np.int) #labels dataRow=0 #count data rows for filename in os.listdir(rawDataDir): if filename.endswith(".jpg"): img = misc.imread(rawDataDir+'/'+filename) #load image from file print (' Processing image: ' + filename + ' ' + str(img.shape)) imgLuv = color.rgb2luv(img) #transform the image to CIE LUV codebook = som._normalizer.denormalize_by(som.data_raw, som.codebook.matrix) #obtain the "patches" from each figure for i in range(0, img.shape[0]-si): for j in range(0, img.shape[1]-sj): #print (' Processing patch: ' + str(j) + ', ' + str (i)) subImg = imgLuv[i:i+si, j:j+sj, :] #print(subImg.shape) #misc.imsave('/tmp/parche'+str(x)+'_'+str(y)+'.png', color.luv2rgb(subImg)) pixelUV = subImg[si//2, sj//2, 1:] # obtain the center pixel, only the U and V components pixelGroup = mySom.getBMU(som, pixelUV.reshape(1,-1), codebook) # get the group of the pixel (the Best Matching Unit of the SOM). For y NN #print (pixelGroup) pixelPos = np.array([[i,j]]) #print(pixelPos) #print(subImg[:,:,0]) patchL = subImg[:,:,0] # get the L components of the patch. For X NN patchLpos = np.concatenate((patchL.reshape(1, si*sj), pixelPos),1) #print(patchLpos) #print (' Updating X...') X[dataRow] = patchLpos #print (' Updating Y...') y[dataRow] = pixelGroup.reshape(1,-1) #if X is None: # X = patchLpos #else: # print (' Concatenating X...') # X = np.concatenate((X, patchLpos)) #if y is None: # y = pixelGroup.reshape(1,-1) #else: # print (' Concatenating y...') # y = np.concatenate((y, pixelGroup.reshape(1,-1))) dataRow = dataRow+1 return X, y
img = Image.open('images/beach-sunset-mountain.jpg') plt.imshow(img) # ### Converting from RGB colorspace to CIE LUV colorspace # # Images can live in different colorspaces. Every colorspace has its advantages depending on the tasks. RGB may be good for viewing, but a colorspace that involves spatial information is better to distinguish sceneries. For this task, we use images from a colorspace called **CIE 1976 (L*, u*, v*)** or **LUV** for short. Converting images from RGB to LUV colorspace will [Boutell et. al. 2004](https://www.rose-hulman.edu/~boutell/publications/boutell04PRmultilabel.pdf) # 1. remove the nonlinear dependencies among RGB values, # 2. have better uniformities among LUV values, and # 3. have less complexity in its mapping. # In[ ]: # import skimage package from skimage.color import rgb2luv img_luv = rgb2luv(img) plt.imshow(img_luv) img_luv.shape # ### Dividing images into 7x7 blocks and calculating first and second order moment for each block # In[ ]: (h, w, c) = img_luv.shape # note that the image size might not be divisible by 7, # let us remove the extra pixles in the last row/column num_blocks_w = 7 # number of blocks in width num_blocks_h = 7 # numbe of blocks in height block_w = w / num_blocks_w block_h = h / num_blocks_h img_blocks = [[[] for _ in xrange(num_blocks_w)] for _ in xrange(num_blocks_h)]
pipeline = pickle.load( open( 'pkls/grid_patch' + str(patch_size[0]) + 'x' + str(patch_size[1]) + '_som' + str(som_size[0]) + 'x' + str(som_size[1]) + '.pkl', 'rb')) som = pickle.load( open( 'pkls/trainedSOM' + str(som_size[0]) + 'x' + str(som_size[1]) + '.pkl', 'rb')) print(pipeline.grid_scores_) for filename in os.listdir(image_dir): if filename.endswith(".jpg"): img = misc.imread(image_dir + '/' + filename) #load image from file imgLuv = color.rgb2luv(img) imgGrey = imgLuv[:, :, 0] originalUV = imgLuv[:, :, 1:] img_predict_size = [ img.shape[0] - patch_size[0] + 1, img.shape[1] - patch_size[1] + 1 ] print('Predicting for ', filename, img.shape, img_predict_size) X_predict = np.zeros((img_predict_size[0] * img_predict_size[1], patch_size[0] * patch_size[1])) pos = 0 for j in range(0, 256 - patch_size[0] + 1, 1): # con patch 5x5: range(0, 252, 1) for i in range(0, 256 - patch_size[1] + 1, 1):
def rgb2luv(self,imageArray): return color.rgb2luv(imageArray)
img_blue = cv2.imread("wavelengthPairs/f6b.jpg") img_red = cv2.imread("wavelengthPairs/f6r.jpg") img_green = cv2.imread("wavelengthPairs/f6g.jpg") img_height, img_width = 480, 640 n_channels = 4 transparent_img = np.zeros((img_height, img_width, n_channels), dtype=np.uint8) # Save the image for visualization cv2.imwrite("./transparent_img.png", transparent_img) img = cv2.addWeighted(img_blue, 0.5, img_red, 0.5, 0) final_img = cv2.addWeighted(img, 0.5, img_green, 0.5, 0) img_xyz = rgb2xyz(final_img) img_luv = rgb2luv(final_img) #cv2.rectangle(img_xyz, (250, 170), (300, 220), (255,0,0), 2) #final_img = cv2.resize(final_img,(360,360)) # resize of image cv2.imshow("result",final_img) #img_xyz.convertTo(img_result, CV_8UC3, 255.0); cv2.imwrite("mergedImg.jpg", final_img) img_read = cv2.imread("mergedImg.jpg", 0) img_segmented = imgSegmentation(img_read) cv2.imshow("Segmented image", img_segmented) image = img_as_float(img_segmented) # pixel intensity arithmetic mean print(np.mean(image)) # pixel intensity standard deviation print(np.std(image)) print(np.average(image))
y0 = center[1] return numpy.exp(-4*numpy.log(2) * ((x-x0)**2 + (y-y0)**2) / fwhm**2) #preloading print("loading data...") size = 50 images, labels, classes = loader.loadTrainingImagesPoleNumbersAndClasses() amount = len(images) print("resizing...") resized = [image_operations.cropAndResize(img, 0.1,size) for img in images] print("luv...") luv = [color.rgb2luv(img) for img in resized] print("hed...") hed = [color.rgb2hed(img) for img in resized] print("grayscaling...") grayscaled = [color.rgb2gray(img) for img in resized] #print("edges...") print("brightness features") brightness = util.loading_map(extraction.calculateDarktoBrightRatio, resized) print("luv features") luv_features = util.loading_map(lambda x: extraction.split_image_features( lambda y : extraction.color_features(y, mean = True, std = True), 7, x), luv) print('\a')
LUV = [] subset_indices = np.load('subset40p_indices.npy') for count in subset_indices: print(count) current_image = path + str(count) img = Image.open(current_image) # img = misc.imread(current_image) img = img.resize((128, 128), Image.ANTIALIAS) # img.show() # print (img.shape) img = np.array(img) # if img.shape[2] != 3: # bad_indices.append(count) # good_indices.remove(count) # continue arr = color.rgb2luv(img) LUV.append(arr) # IV_Current = arr[:,:,2] # IV.append(IV_Current) # IU_Current = arr[:,:,1] # IU.append(IU_Current) # IL_Current = arr[:,:,0] # IL.append(IL_Current) # # np.save('good_indices.npy', good_indices) # # np.save('bad_indices.npy', bad_indices) # np.save('LUV_V_40p.npy',IV) # np.save('LUV_U_40p.npy',IU) # np.save('LUV_L_40p.npy',IL) np.save('LUV_40p.npy', LUV)
# normalize image if not already in correct format if img.dtype != 'float32': img = cv2.normalize(img.astype('float32'), None, 0.0, 1.0, cv2.NORM_MINMAX) # get dimensions of input image x, y, z = img.shape # store a copy of original image for comparison img1 = np.zeros((x, y, z)) img1 = img.copy() alpha = None # Convert to colorspaces CIELAB, CIELUV, and CIELCH lab = color.rgb2lab(img) # - RGB to LAB luv = color.rgb2luv(img) # - RGB to LUV LCH = color.lab2lch(img) # - LAB to LCH # Compute G(x, y) for image Gx = np.zeros((x, y)) # Create empty array for Gx component Gy = np.zeros((x, y)) # Create empty array for Gy component for i in range(1, x-1): for j in range(1, y-1): #Calculate the Gx and Gy per pixel using color difference from Eq 3 (on pg 2 of paper) Gx[i, j] = color_difference(lab[i + 1, j, :], lab[i - 1, j, :], luv[i + 1, j, :], luv[i - 1, j, :], alpha) Gy[i, j] = color_difference(lab[i, j + 1, :], lab[i, j - 1, :], luv[i, j + 1, :], luv[i, j - 1, :], alpha) # Assign Lightness, chroma, and hue arrays from LCH to their own respective arrays L = LCH[:, :, 0] C = LCH[:, :, 1] H = LCH[:, :, 2]
def test_luv_rgb_roundtrip(self): img_rgb = img_as_float(self.img_rgb) assert_array_almost_equal(luv2rgb(rgb2luv(img_rgb)), img_rgb)
def colors(path): "yield x, y value from a resized image." img = gaussian_filter(rgb2luv(resize(imread(path), (256, 256))), sigma=0.4, multichannel=True) return img.reshape((256 * 256, 3))
def __call__(self, img): img = np.asarray(img, np.uint8) img = color.rgb2luv(img) return img
def to_luv(self): return Image(rgb2luv(self.to_rgb_from_gray()[:, :, :3])).convert_type(self.dtype)
def trans(self, img): rst = color.rgb2luv(img) + 128 #print('============', rst.min(), rst.max()) return rst.astype(np.uint8)
import som as mySom import sys import pickle import numpy as np from scipy import misc from skimage import color if len(sys.argv) < 2: print('A filename is needed') sys.exit(1) filename = sys.argv[1] img = misc.imread(filename) imgLUV = color.rgb2luv(img) imgL = imgLUV[:, :, 0] imgUV = imgLUV[:, :, 1:] for mapsize in [[2, 2], [3, 3], [5, 5], [10, 10]]: somFile = open( 'pkls/trainedSOM' + str(mapsize[0]) + 'x' + str(mapsize[1]) + '.pkl', 'rb') som = pickle.load(somFile) somFile.close() imgCode = mySom.getCodeword(som, imgUV.reshape(256 * 256, 2)) refImg = np.concatenate((imgL.reshape(256 * 256, 1), imgCode), 1) refImg = refImg.reshape(256, 256, 3) misc.imsave( filename.replace('.jpg', '') + '_reference_' + str(mapsize[0]) + 'x' + str(mapsize[1]) + '.png', color.luv2rgb(refImg))
ax2.axis("off") return plt #Input's Block #Single Reader img = data.imread('img/nor.jpg', False,) #Set Reader #Convert Block img_rgb = color.convert_colorspace(img, 'RGB', 'RGB') #No need img_hsv = color.convert_colorspace(img_rgb, 'RGB', 'HSV') img_lab = color.rgb2lab(img_rgb) img_hed = color.rgb2hed(img_rgb) img_luv = color.rgb2luv(img_rgb) img_rgb_cie = color.convert_colorspace(img_rgb, 'RGB', 'RGB CIE') img_xyz = color.rgb2xyz(img_rgb) #Save Test Block """io.imsave("image_hsv.jpg", img_hsv, ) io.imsave("image_lab.jpg", img_lab, ) io.imsave("image_hed.jpg", img_hed, ) io.imsave("image_luv.jpg", img_luv, ) io.imsave("image_rgb_cie.jpg", img_rgb_cie, ) io.imsave("image_xyz.jpg", img_xyz, ) """ #Layers Block """ canalExtration(img_rgb, "RGB").show() canalExtration(img_hsv, "HSV").show()
def luv_features(block): blk = rgb2luv(block) return [*mean(blk), *std_dev(blk), *skew_(blk), *variance(blk), *entropy_(blk)]
# construct the model and define loss & optimizer pred = cnn(x, weights, biases) # define loss and optimizer cost = tf.reduce_mean(tf.nn.l2_loss(y - pred)) opt = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # load the images print('Loading training images...') tr_path = 'cse190-data/train/*.png' tr_imgs = [] for fn in glob.glob(tr_path): tr_imgs.append(imread(fn, mode='RGB')) # convert training images to grayscale tr_imgs = np.array(tr_imgs) tr_imgs = color.rgb2luv(tr_imgs) #tr_gray = color.rgb2gray(tr_imgs) tr_n = len(tr_imgs) tr_x = tr_imgs[:, :, :, 0].reshape((tr_n, 64, 64, 1)) tr_y = tr_imgs[:, :, :, 1:].reshape((tr_n, 64, 64, 2)) print('%d training images loaded!' % tr_n) print('Loading test images...') tst_path = 'cse190-data/test/*.png' tst_imgs = [] for fn in glob.glob(tst_path): tst_imgs.append(imread(fn, mode='RGB')) # convert test images to grayscale tst_imgs = np.array(tst_imgs) tst_imgs = color.rgb2luv(tst_imgs) tst_n = len(tst_imgs)