def compute(self, image): """ Compute the GLCM features. """ assert (image.ndim == 2) w, h = image.shape nw = int(w / self.wsize_) nh = int(h / self.wsize_) nf = len(self.which_feats_) ft = np.zeros((nf, nw * nh)) # features will be on rows k = 0 for x in np.arange(0, nw): for y in np.arange(0, nh): x0, y0 = x * self.wsize_, y * self.wsize_ x1, y1 = x0 + self.wsize_, y0 + self.wsize_ glcm = greycomatrix(image[y0:y1, x0:x1], self.dist_, self.theta_, self.levels_, self.symmetric_, self.normed_) ft[:, k] = np.array([greycoprops(glcm, f)[0, 0] for f in self.which_feats_]) k += 1 res = {} k = 0 for f in self.which_feats_: res[f] = ft[k, :] k += 1 return res
def generate_glcm(array, distance, angle_in_deg): """This function computes a grey level co-occurrence map from a quantized map for a given list of angles and a distance Arguments: array {np.array} -- an int array quantized map distance {int} -- the distance offset for computation of co-occurrence angle_in_deg {int} -- integer degree angle between pairs to be considered for co-occurrence Returns: glcm {np.array} -- a uint32 array in which the value glcm[i, j] is the number of times that grey-level j occurs at a distance `d` and at an angle `theta` from grey-level i. """ params = load_params() glcm = greycomatrix(array, distances=[distance], angles=[angle_in_deg * (np.pi / 180)], levels=params["quantization"]["n_levels"], symmetric=False, normed=False) # remove two superfluous dimensions from `glcm` return np.squeeze(glcm)
def get_haralick_mt_value(img_array, center_x, center_y, window_size, greylevels, haralick_feature, symmetric, mean): # extract subpart of image (todo: pass in result from view_as_windows) min_x = int(max(0, center_x - window_size / 2 - 1)) min_y = int(max(0, center_y - window_size / 2 - 1)) max_x = int(min(img_array.shape[1] - 1, center_x + window_size / 2 + 1)) max_y = int(min(img_array.shape[0] - 1, center_y + window_size / 2 + 1)) cropped_img_array = img_array[min_y:max_y, min_x:max_x] # co-occurence matrix of all 8 directions and sum them cmat = greycomatrix(cropped_img_array, [1], [ 0, 1 * np.pi / 4, 2 * np.pi / 4, 3 * np.pi / 4, 4 * np.pi / 4, 5 * np.pi / 4, 6 * np.pi / 4, 7 * np.pi / 4 ], levels=greylevels) cmat = np.sum(cmat, axis=3) cmat = cmat[:, :, 0] # extract haralick using mahotas library: har_feature = mt.features.texture.haralick_features([cmat], return_mean=mean) # output: if mean: return har_feature[haralick_feature] return har_feature[direction, haralick_feature]
def compute(self, image): """ Compute the GLCM features. """ assert (image.ndim == 2) w, h = image.shape nw = int(w / self.wsize_) nh = int(h / self.wsize_) nf = len(self.which_feats_) ft = np.zeros((nf, nw * nh)) # features will be on rows k = 0 for x in np.arange(0, nw): for y in np.arange(0, nh): x0, y0 = x * self.wsize_, y * self.wsize_ x1, y1 = x0 + self.wsize_, y0 + self.wsize_ glcm = greycomatrix(image[y0:y1, x0:x1], self.dist_, self.theta_, self.levels_, self.symmetric_, self.normed_) ft[:, k] = np.array( [greycoprops(glcm, f)[0, 0] for f in self.which_feats_]) k += 1 res = {} k = 0 for f in self.which_feats_: res[f] = ft[k, :] k += 1 return res
def _glcm_measures(X): measures = [] glcm = greycomatrix(X, [1], [0], levels=8, normed=True) for p in ('contrast', 'dissimilarity', 'homogeneity', 'energy', 'correlation', 'ASM'): res = greycoprops(glcm, p) measures.extend(list(res.reshape(res.size))) return measures
def getGLCM(img, config): # scale image to 8bit or less img_scale = np.clip(img, config['min'], config['max']) img_scale = (img_scale - config['min']) / (config['max'] - config['min']) img8 = (img_scale * (config['levels'] - 1)).astype(np.uint8) # return glcm return greycomatrix(img8, config['distance'], config['angle'], levels=config['levels'])
def get_haralick_mt_value(self, img_array, center_x, center_y, window_size, greylevels, haralick_feature, symmetric, mean): """Gets the haralick texture value at the center of an x, y coordinate. Parameters ---------- image_array : numpy.array image to calculate texture center_x : int x center of coordinate center_y int y center of coordinate window_size : int size of window to pull for calculation greylevels : int number of bins haralick_feature : HaralickFeature desired haralick feature symmetric : bool whether or not we should use the symmetrical cooccurence matrix mean : bool whether we return the mean of the feature or not Returns ------- float number representing value of haralick texture at coordinate """ # extract subpart of image (todo: pass in result from view_as_windows) min_x = int(max(0, center_x - window_size / 2 - 1)) min_y = int(max(0, center_y - window_size / 2 - 1)) max_x = int(min(img_array.shape[1] - 1, center_x + window_size / 2 + 1)) max_y = int(min(img_array.shape[0] - 1, center_y + window_size / 2 + 1)) cropped_img_array = img_array[min_y:max_y, min_x:max_x] # co-occurence matrix of all 8 directions and sum them cooccurence_matrix = greycomatrix(cropped_img_array, [1], self.cooccurence_angles, levels=greylevels) cooccurence_matrix = np.sum(cooccurence_matrix, axis=3) cooccurence_matrix = cooccurence_matrix[:,:,0] # extract haralick using mahotas library: har_feature = mt.features.texture.haralick_features([cooccurence_matrix], return_mean=mean) # output: if mean: return har_feature[haralick_feature] return har_feature[0, haralick_feature]
def get_features(image): glcm = greycomatrix(image, distances=[5], angles=[0], levels=256, symmetric=True, normed=True) contrast = np.array(greycoprops(glcm, 'contrast')) dissimilarity = np.array(greycoprops(glcm, 'dissimilarity')) homogeneity = np.array(greycoprops(glcm, 'homogeneity')) energy = np.array(greycoprops(glcm, 'energy')) correlation = np.array(greycoprops(glcm, 'correlation')) ASM = np.array(greycoprops(glcm, 'ASM')) listFeatures = [ contrast, dissimilarity, homogeneity, energy, correlation, ASM ] return listFeatures
def feature_build(img): from skimage.feature.texture import greycoprops, greycomatrix, local_binary_pattern from skimage.color import rgb2gray img = np.asarray(rgb2gray(img.numpy()), dtype=np.uint8) mat = greycomatrix(img, [1, 2], [0, np.pi/2], levels=4, normed=True, symmetric=True) features = [] if (True): features.append(greycoprops(mat, 'contrast')) features.append(greycoprops(mat, 'dissimilarity')) features.append(greycoprops(mat, 'homogeneity')) #features.append(greycoprops(mat, 'energy')) #features.append(greycoprops(mat, 'correlation')) features = np.concatenate(features) else: radius = 2 features = local_binary_pattern(img, 8*radius, radius, method='default') #'ror', 'uniform', 'var' feature = features.flatten() return torch.tensor(feature).float()
def _calculate_haralick_feature_values(self, img_array, center_x, center_y): """Gets the haralick texture feature values at the x, y, z coordinate. , pos[1] :param image_array: image to calculate texture :type image_array: numpy.ndarray :param center_x: x center of coordinate :type center_x: int :param center_y: y center of coordinate :type center_y: int :param window_size: size of window to pull for calculation :type window_size: int :param num_unique_angles: number of bins :type num_unique_angles: int :param haralick_feature: desired haralick feature :type haralick_feature: HaralickFeature :returns: A 13x1 vector of haralick texture at the coordinate. :rtype: numpy.ndarray """ # extract subpart of image (todo: pass in result from view_as_windows) window_size = self.haralick_window_size min_x = int(max(0, center_x - window_size / 2 - 1)) min_y = int(max(0, center_y - window_size / 2 - 1)) max_x = int(min(img_array.shape[1] - 1, center_x + window_size / 2 + 1)) max_y = int(min(img_array.shape[0] - 1, center_y + window_size / 2 + 1)) cropped_img_array = img_array[min_y:max_y, min_x:max_x] # co-occurence matrix of all 8 directions and sum them cooccurence_matrix = greycomatrix(cropped_img_array, [1], self.cooccurence_angles, levels=self.num_unique_angles) cooccurence_matrix = np.sum(cooccurence_matrix, axis=3) cooccurence_matrix = cooccurence_matrix[:, :, 0] # extract haralick using mahotas library return mt.features.texture.haralick_features([cooccurence_matrix], return_mean=True)
def getFeatureVector(img): GLCM_Input = [] #to store the CSLBP values startTime = time.time() #start timer #get image shapes imgRows = img.shape[0] imgCols = img.shape[1] #loop on all pixels starting from the 1 row till the row before the last one and same for columns for i in range(1, imgRows - 1): GLCM_Row = [] for j in range(1, imgCols - 1): neighbours = [] neighboursCoord = getNeighbours( [i, j]) #get the pixels' neighbours coordinates for p in neighboursCoord: x = p[0] y = p[1] neighbours.append( img[x][y]) #get those neighbours from their coordinates CSP_LP_SUM = CSP_LP( neighbours, 8, 0.01) #get the CSLBP value from those neighours GLCM_Row.append( CSP_LP_SUM) #append the CSP_LP_SUM to the row of the GLCM list GLCM_Input.append(GLCM_Row) #append the GLCM_ROW to the GLCM_Input #initialize hte GLCM matrix with four directino = 0,45,90,135 with distance = 1 and levels = 16 GLCM_Matrix = ski.greycomatrix( GLCM_Input, distances=[1], angles=[0, math.pi / 4, math.pi / 2, math.pi * (5 / 4)], levels=16) #get all the features by appending the 4 matrices into single feature vector allFeatures = [] featureVector = [] for k in range(GLCM_Matrix.shape[3]): for i in range(GLCM_Matrix.shape[0]): for j in range(GLCM_Matrix.shape[1]): featureVector.append(GLCM_Matrix[i][j][0][k]) return featureVector
def compute_feature_vector(self, patch): #GLCMmat = self.GLCM(patch,self.angle,self.distance,sym = True,norm = True) #patch = patch.astype(np.uint8) range_patch = patch.max() - patch.min() shape = patch.shape if (range_patch == 0): range_patch = range_patch + 1 patch_scaled = sklearn.preprocessing.minmax_scale( patch.ravel(), feature_range=(0, range_patch)).reshape(shape) patch_scaled = patch_scaled.astype('uint8') M = greycomatrix(image=patch_scaled, distances=[self.distance], angles=[self.angle], levels=range_patch + 1, symmetric=True, normed=True) GLCM = np.squeeze(M, axis=2) GLCM = np.squeeze(GLCM, axis=2) self.feature_vector[0] = self.ASM(GLCM) self.feature_vector[1] = self.contrast(GLCM) self.feature_vector[2] = self.dissimilarity(GLCM) self.feature_vector[3] = self.homogeneity(GLCM) self.feature_vector[4] = self.energy(GLCM) self.feature_vector[5] = self.entropy(GLCM) self.feature_vector[6] = self.svar(GLCM) self.feature_vector[7] = greycoprops(M, prop='correlation') self.feature_vector[8] = self.sum_avg(GLCM) self.feature_vector[9] = self.sum_entropy(GLCM) self.feature_vector[10] = self.dif_entropy(GLCM) self.feature_vector[11] = self.clustershade(GLCM) self.feature_vector[12] = self.clusterprom(GLCM) print(self.i) self.i = self.i + 1 return self.feature_vector
def Maskgenerator(generatorfile_image, generatorfile_GT, Imgoverlay=True, GToverlay=False): real_image_stack = generatorfile_image[0] real_GT_stack = generatorfile_GT[0] for i in range(0, file_amount): real_image = real_image_stack[i, :, :, :] image = real_image[:, :, 0] normal_image = real_image[:, :, 0] # plt.imshow(image, interpolation='none', cmap='gray') fig = plt.figure(figsize=(10, 8), dpi=300) ax1 = fig.add_subplot(1, 2, 1) ax1.set_xlim([0, 512]) ax1.set_ylim([512, 0]) ax2 = fig.add_subplot(1, 2, 2) if Imgoverlay == True: ax1.imshow(image, interpolation='none', cmap='gray') predictions = model.predict(real_image_stack) two = predictions[i, :, :, 0] two = np.where(two > 0.4, 1, 0) two = two.astype(np.uint8) # plt.imshow(two) # plt.savefig("Predict_Only"+str(i)+".png") GapImage = GapFill(two, i) RemovedImage = RemoveSmall(GapImage, i) # labeled_mask = measure.label(RemovedImage, connectivity=2) # ilm_mask = (labeled_mask==1) # # Pre_seperated_mask = (labeled_mask==2).astype(np.uint8) # Pre_seperated_mask = Pre_seperated_mask.astype(np.float32) empty_array, path_top, path_bottom_dist = Costfunction( image=RemovedImage) drusen_array, gradient_drusen, avarage_array, image_drusen, histogram_plot = HistoDrusen( top_oned_array=path_top, bottom_oned_array=path_bottom_dist, imagearray=normal_image, i=i) ax2.set_xlim([1, 256]) ax2.set_ylim([0, 500]) ax2.plot(histogram_plot) x = histogram_plot[:, 0] print(x) peaks, properties = find_peaks(x, prominence=1, width=3) ax2.plot(peaks, x[peaks], "x") ax2.vlines(x=peaks, ymin=x[peaks] - properties["prominences"], ymax=x[peaks], color="C1") ax2.hlines(y=properties["width_heights"], xmin=properties["left_ips"], xmax=properties["right_ips"], color="C1") # drusenfinder = DrusenFinder(top_oned_array=top_oned_array, bottom_oned_array=bottom_oned_array, drusenarray=) # drusenfinder = np.ma.masked_where(drusenfinder == 0, drusenfinder) # drusenarray, gradient_drusen, avarage_array = HistoDrusen(top_oned_array=top_oned_array, bottom_oned_array=bottom_oned_array, imagearray=image, i=i) # ILM_mask = (labeled_mask==1).astype(np.uint8) # ILM_mask = np.ma.masked_where(ILM_mask == 0, ILM_mask) # OBM_mask = OBM_RPE_seperated_mask[:,:,0] # OBM_mask = np.ma.masked_where(OBM_mask == 0, OBM_mask) # # # # RPE_mask = OBM_RPE_seperated_mask[:,:,1] # RPE_mask = np.ma.masked_where(RPE_mask == 0, RPE_mask) if GToverlay == True: GT = real_GT_stack[i, :, :, 0] GT = GT.astype(np.uint8) data_mask = np.ma.masked_where(GT == 0, GT) plt.imshow(data_mask, interpolation='none', cmap='brg', alpha=0.5, vmin=0) total_drusen = 0 for m in range(0, len(avarage_array)): col_avarage = avarage_array[m] if col_avarage >= 0: total_drusen = total_drusen + col_avarage mean = np.zeros((file_amount), dtype=np.float32) std = np.zeros((file_amount), dtype=np.float32) var = np.zeros((file_amount), dtype=np.float32) contrast = np.zeros((file_amount), dtype=np.float32) homogeneity = np.zeros((file_amount), dtype=np.float32) energy = np.zeros((file_amount), dtype=np.float32) mean[i] = np.mean(histogram_plot, dtype=np.float32) std[i] = np.std(histogram_plot, dtype=np.float32) var[i] = np.var(histogram_plot, dtype=np.float32) drusen_array = np.ma.masked_where(drusen_array == 0, drusen_array) distances = [1, 5, 10, 15, 20] angles = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4] properties = ["contrast", "homogeneity", "energy"] glcm = greycomatrix(drusen_array, distances=distances, angles=angles, levels=256, symmetric=True, normed=True) contrast = greycoprops(glcm, prop="contrast") homogeneity = greycoprops(glcm, prop="homogeneity") energy = greycoprops(glcm, prop="energy") correlation = greycoprops(glcm, prop="correlation") dissimilarity = greycoprops(glcm, prop="dissimilarity") ''' Average Drusen Height Calc ''' avarage_rpeheight = np.average(avarage_array) #print(texture) #print(histogram_plot) # # topvalue = np.amax(avarage_array) # empty_array = np.ma.masked_where(empty_array == 0, empty_array) ax1.imshow(empty_array, interpolation='none', alpha=0.8, cmap="brg") # ilm_mask = np.ma.masked_where(ilm_mask == 0, ilm_mask) # plt.imshow(ilm_mask, interpolation='none', alpha=0.8, cmap="brg") ax1.imshow(gradient_drusen, interpolation='none', alpha=0.5, vmin=8, vmax=28, cmap="RdYlGn_r") # plt.imshow(OBM_mask, interpolation='none', alpha=0.8, cmap='gist_rainbow', vmax=1) # plt.imshow(RPE_mask, interpolation='none', alpha=0.8, cmap="rainbow", vmax=1) ax1.text(10.0, 600.0, "DrusenPixs: " + str(total_drusen), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(10.0, 630.0, "mean: " + str(mean[i]), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(10.0, 660.0, "STD: " + str(std[i]), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(10.0, 690.0, "Var: " + str(var[i]), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(10.0, 720.0, "RPE-OBM " + str(avarage_rpeheight), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(180.0, 600.0, "homogeneity " + str(np.average(homogeneity)), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(180.0, 630.0, "energy: " + str(np.average(energy)), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(180.0, 660.0, "contrast: " + str(np.average(contrast)), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(180.0, 690.0, "correlation: " + str(np.average(correlation)), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) ax1.text(180.0, 720.0, "dissimilairtiy: " + str(np.average(dissimilarity)), verticalalignment='bottom', horizontalalignment='left', color='white', fontsize=8) plt.savefig("GDL_CHECKKING_" + str(i) + "_Final-image.png", bbox_inches='tight', pad_inches=0) plt.close()
for ii, i_row in enumerate(np.arange(0, rows, stride_y[rr])): if i_row % 50 == 0: print("%.1f percent" % (float(i_row) / float(rows) * 100), end='\r') for jj, i_col in enumerate(np.arange(0, cols, stride_x[rr])): # clip the raster subset for further calculations subset = layer.values[i_row:i_row + stride_y[rr], i_col:i_col + stride_x[rr]] if np.isnan(subset).sum() == 0: mean.values[ii, jj] = np.mean(subset) variance.values[ii, jj] = np.var(subset) subset_scaled = 255 * (subset - layer_min) / (layer_max - layer_min) glcm = tex.greycomatrix(subset_scaled.astype('int'), [1], [0, pi / 4, pi / 2, pi * 3 / 4], levels=256) contrast.values[ii, jj] = tex.greycoprops( glcm, 'contrast')[0].mean() dissimilarity.values[ii, jj] = tex.greycoprops( glcm, 'dissimilarity')[0].mean() homogeneity.values[ii, jj] = tex.greycoprops( glcm, 'homogeneity')[0].mean() correlation.values[ii, jj] = tex.greycoprops( glcm, 'correlation')[0].mean() asm.values[ii, jj] = tex.greycoprops(glcm, 'ASM')[0].mean() # write array to new geotiff prefix = sfile.split('/')[-1][:-8] res_str = str(resolution).zfill(3) io.write_xarray_to_GeoTiff(
def GetMatrix(self, distances=[5], angles=[0, pi / 4, pi / 2, 3 * pi / 4]): return ft.greycomatrix(self.image_array, distances, angles)
def calc_coomatrix(in_img): return greycomatrix(image=in_img, distances=dist_list, angles=angle_list, levels=4)
def GetMatrixPatch(self, patch, distances, angles): return ft.greycomatrix(patch, distances, angles)
def get_GLCM_features(image, distances=(0), angles=None, levels=256, symmetric=True, normed=True, features=None): """ Function to return features extracted from the gray level co-occurrence matrix of an image :param image: OpenCV numpy array_like of uint8 :param distances: array_like, object List of pixel pair distance offsets :param angles: array_like List of pixel pair angles in radians. :param levels: int, optional The input image should contain integers in [0, levels-1], where levels indicate the number of grey-levels counted (typically 256 for an 8-bit image). Default= 256. :param symmetric: bool, optional If True, the output matrix P[:, :, d, theta] is symmetric. This is accomplished by ignoring the order of value pairs, so both (i, j) and (j, i) are accumulated when (i, j) is encountered for a given offset. Default= False. :param normed: bool, optional If True, normalize each matrix P[:, :, d, theta] by dividing by the total number of accumulated co-occurrences for the given offset. The elements of the resulting matrix sum to 1. Default= False. :param features: array_like The list of desired features that can be extracted from GLCM matrix. Accepted values for array elements include: "energy", "contrast", "homogeneity", "ASM", "dissimilarity", "correlation", "entropy" :return: GLCM feature dictionary containing feature names as keys and the corresponding feature values Features included - Energy, Contrast, Homogeneity, Entropy, ASM, Dissimilarity, Correlation """ if angles is None: angles = [0, np.pi / 4, 2 * np.pi / 4, 3 * np.pi / 4] if features is None: features = [ "energy", "contrast", "homogeneity", "ASM", "dissimilarity", "correlation", "entropy" ] else: accepted_features = [ "energy", "contrast", "homogeneity", "ASM", "dissimilarity", "correlation", "entropy" ] for f in features: if f not in accepted_features: raise Exception("Feature " + f + "is not accepted in the set of features") image_glcm = sktex.greycomatrix(image, distances, angles, levels=levels, symmetric=symmetric, normed=normed) output_features = dict() for feature in features: if feature == "entropy": entropy = np.zeros((1, 4)) for i in range(image_glcm.shape[0]): for j in range(image_glcm.shape[1]): entropy -= image_glcm[i, j] * np.ma.log(image_glcm[i, j]) output_features[feature] = entropy else: output_features[feature] = sktex.greycoprops(image_glcm, feature) return output_features
from PIL import Image img = io.imread('..\contoh_fullsize.jpg') gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) #cv2.imshow('..\gray', gray) #cv2.imshow("h",gray) #cv2.waitKey() cv2.imwrite('..\gray.png', gray) gray = np.array(gray, dtype=np.uint8) #print(np.shape(gray)) #print(type(gray)) #glcm = greycomatrix(gray, [1], [0, np.pi/4, np.pi/2, 3*np.pi/4], levels=256) #glcm = greycomatrix(gray, [1], [0], levels=256, normed=True, symmetric=True) #glcm = greycomatrix(gray, [1], [0, 3*np.pi/4, np.pi/2, np.pi/4], levels=256) d = 1 glcm = greycomatrix(gray, [d], [0, 3 * np.pi / 4, np.pi / 2, np.pi / 4], levels=256) print(np.shape(glcm)) print(glcm[0:7, 0:7, 0, 0]) diss = greycoprops(glcm, 'dissimilarity') contrast = greycoprops(glcm, 'contrast') correlation = greycoprops(glcm, 'correlation') homogeneity = greycoprops(glcm, 'homogeneity') ASM = greycoprops(glcm, 'ASM') print(homogeneity[0, 0], ' ', homogeneity[0, 1], ' ', homogeneity[0, 2], ' ', homogeneity[0, 3]) def calc_glcm(gray, index_baris, index_kolom, x_offset, y_offset): x, y = gray.shape
"ASM", ] prop_imgs = {} for c_prop in grayco_prop_list: prop_imgs[c_prop] = np.zeros_like(cortex_img, dtype=np.float32) score_img = np.zeros_like(cortex_img, dtype=np.float32) out_df_list = [] for patch_idx in tqdm(np.unique(region_labels)): xx_box, yy_box = np.where(region_labels == patch_idx) glcm = greycomatrix( cortex_img[xx_box.min():xx_box.max(), yy_box.min():yy_box.max()], [5], [0], 256, symmetric=True, normed=True, ) mean_score = np.mean(cortex_mask[region_labels == patch_idx]) score_img[region_labels == patch_idx] = mean_score out_row = dict( intensity_mean=np.mean(cortex_img[region_labels == patch_idx]), intensity_std=np.std(cortex_img[region_labels == patch_idx]), score=mean_score, ) for c_prop in grayco_prop_list:
def main(): parser = argparse.ArgumentParser(description=""" Construct a traditional machine learning data matrix by extracting features from the objects in images. """) parser.add_argument('--plate', '-p', help="The plate the input image is associated with", required=True) parser.add_argument('--well', '-w', help="The well the input image is associated with", required=True) parser.add_argument( "--input-image", "-i", help= "Input image for which cell_clustering.py was run on to produce --label-image.", required=True) parser.add_argument( "--label-image", "-l", help= "Output of cell_clustering.py or other method for segmenting images. Must be a CSV of an array of the same shape as the input image and has an integer in each cell assigning a pixel to an object. -1 is used for background pixels. Objects start counting at 0.", required=True) parser.add_argument( "--treatments", "-t", help="Treatment metadata file output from parse_treatment.py") parser.add_argument( "--outfile", "-o", help= "Output CSV which is a traditional ML data matrix with shape n_objects x n_features. The objects correspond to the objects in the --infile. The features include region properties, texture properties, and which chemical treatment was used." ) args = parser.parse_args() ofh = open(args.outfile, 'w') intensity_image = imread(args.input_image) label_image = np.genfromtxt(args.label_image, delimiter=",").astype('int') + 1 ofh.write('{}\n'.format(",".join(HEADER))) for i in range(np.max(label_image)): intensity_image_slice = np.copy(intensity_image) intensity_image_slice[label_image != i + 1] = 0 label_image_bool = np.copy(label_image) label_image_bool[label_image != i + 1] = 0 label_image_bool[label_image == i + 1] = 1 prop_list = regionprops(label_image_bool, intensity_image_slice) prop = prop_list[0] # region properties - {{ # extract simple scalar region properties scalar_region_props = list( map(lambda x: prop[x], SCALAR_REGION_PROPERTIES)) # extract region properties that require some processing local_centroid_row = prop['local_centroid'][0] local_centroid_col = prop['local_centroid'][1] weighted_centroid_row = prop['weighted_centroid'][0] weighted_centroid_col = prop['weighted_centroid'][1] region_props = scalar_region_props + [ local_centroid_row, local_centroid_col, weighted_centroid_row, weighted_centroid_col ] # }} - region properties # extract texture properties for the object i - {{ # 2nd and 3rd parameters encode a 1-pixel offset to the right, up, left, and down # n_dists = 1, n_angle = 4 levels = np.max(intensity_image_slice) + 1 grey_rv = greycomatrix(intensity_image_slice, [1], [0, np.pi / 2, np.pi, 3 * np.pi / 2], levels=levels) # greycoprops is (n_dist x n_angle) # compute average of the texture property avg_texture_props = [] for texture_prop in TEXTURE_PROPS: props_rv = greycoprops(grey_rv, texture_prop) avg_texture_props.append(np.mean(props_rv)) # }} - texture properties # combine all properties all_props = [args.plate, args.well] + region_props + avg_texture_props ofh.write(','.join(map(str, all_props)) + '\n')