示例#1
0
def entropy(img,dim):
    
    all_entropy = np.zeros(img.shape[dim])

    if dim == 0:
        for i in range(img.shape[dim]):
            all_entropy[i] = shannon_entropy(img[i,:,:])
        
        threshold = np.sort(all_entropy)[-33]
        reduced_img = img[all_entropy > threshold,:,:] 
        
    elif dim == 1:
         for i in range(img.shape[dim]):
            all_entropy[i] = shannon_entropy(img[:,i,:])
            
         threshold = np.sort(all_entropy)[-33]
         reduced_img = img[:,all_entropy > threshold,:] 
            
    elif dim == 2:
         for i in range(img.shape[dim]):
             all_entropy[i] = shannon_entropy(img[:,:,i])
         
         threshold = np.sort(all_entropy)[-33]
         reduced_img = img[:,:,all_entropy > threshold] 
      
    return(reduced_img)
    def evaluate_entropy_instant(reference_frame, rendition_frame):
        # Function that computes the difference in Shannon entropy between
        # two images
        entropy_difference = shannon_entropy(
            reference_frame) - shannon_entropy(rendition_frame)

        return entropy_difference
def entropy(image):
    if image.ndim == 2:
        return shannon_entropy(image)
    if image.ndim == 3:
        entropies = []
        for i in range(image.shape[0]):
            entropies.append(shannon_entropy(image[i]))
        return entropies
def calcualte_MI(img, img1, img2):
    # 分别计算A,C和B,C的MI值,在图像融合中,MI值为二者之和,这里取了平均
    img = np.array(img)
    img1 = np.array(img1)
    img2 = np.array(img2)
    mi1 = shannon_entropy(img1) + shannon_entropy(img) - ComEntropy(img1, img)
    mi2 = shannon_entropy(img2) + shannon_entropy(img) - ComEntropy(img2, img)
    mi = (mi1 + mi2) / 2
    return round(mi, 3)
示例#5
0
    def entropy(reference_frame, rendition_frame):
        """
        Function that computes the difference in Shannon entropy between
        two images
        """

        entropy_difference = shannon_entropy(reference_frame) - shannon_entropy(rendition_frame)

        return entropy_difference
示例#6
0
def glcm_infmescor_1(matrix: np.ndarray, eps: float = 10e-3) -> np.ndarray:
    """
    Measures of Correlation 1
    """
    hxy = -np.sum(matrix * np.log(eps + matrix))
    hxy_1 = -np.sum(
        matrix * np.log(eps + np.sum(matrix, axis=0) * np.sum(matrix, axis=1)))
    hx = shannon_entropy(np.sum(matrix, axis=0))
    hy = shannon_entropy(np.sum(matrix, axis=1))
    measure_1 = (hxy - hxy_1) / max(hx, hy)
    return measure_1
def compute_glcm_features(retinal_image):
    #Compute Entropy
    glcm_image_entropy = shannon_entropy()

    #Compute GLCM (Gray Level Co-occurrence Matrix)
    glcm_image = greycomatrix()

    #Compute GLCM Features
    #Contrast
    glcm_image_contrast = greycoprops(glcm_image, 'contrast')

    #Dissimilarity
    glcm_image_dissimilarity = greycoprops(glcm_image, 'dissimilarity')

    #Homogeneity
    glcm_image_homogeneity = greycoprops(glcm_image, 'homogeneity')

    #Energy
    glcm_image_energy = greycoprops(glcm_image, 'energy')

    #Correlation
    glcm_image_correlation = greycoprops(glcm_image, 'correlation')

    #ASM
    glcm_image_ASM = greycoprops(glcm_image, 'ASM')
示例#8
0
def open_and_get_metrics(img_path):
    """Open the image from the static folder and retrieve its metrics for classification

    Args:
        img_path: The path of the image within the static folder, or uploads folder.

    Returns:
        image_data: A dictionary of all the collected image metrics
    """
    img = io.imread(img_path)
    flat_img = flatten(img)

    entropy = shannon_entropy(img, base=2)
    mean_colors = get_mean_colors(flat_img)
    luminance = (0.2126 * mean_colors[0]) + (0.7152 * mean_colors[1]) + (
        0.0722 * mean_colors[2])
    contrast = get_contrast(flat_img)
    contour = get_contour(img)

    image_data = {
        'file_name': img_path,
        'year': img_path.split('/')[-1].split('.')[0],
        'shannon_entropy': entropy,
        'mean_color_r': mean_colors[0],
        'luminance': luminance,
        'contrast': contrast,
        'contour': contour
    }

    print(image_data)

    return image_data
示例#9
0
def get_hand_crafted(one_image):
    """ Extracts various features out of the given image
    :param array one_image: the image from which features are to be extracted
    :return: the features associated with this image
    :rtype: Numpy array of size (38, 1)
    """
    #Select wavelet decomposition level so as to have the
    #same number of approximation coefficients
    if (one_image.shape[0] == 1000):
        wavedec_level = 9
    elif (one_image.shape[0] == 64):
        wavedec_level = 5

    hist = histogram(one_image, nbins=20, normalize=True)
    features = hist[0]
    blob_lo = blob_log(one_image,
                       max_sigma=2.5,
                       min_sigma=1.5,
                       num_sigma=5,
                       threshold=0.05)
    shape_ind = shape_index(one_image)
    shape_hist = np.histogram(shape_ind, range=(-1, 1), bins=9)
    shan_ent = shannon_entropy(one_image)
    max_val = one_image.max()
    min_val = one_image.min()
    variance_val = np.var(one_image)
    wavelet_approx = pywt.wavedec2(one_image, 'haar',
                                   level=wavedec_level)[0].flatten()
    features = np.concatenate([
        features, [blob_lo.shape[0]], shape_hist[0], [shan_ent], [max_val],
        [min_val], [variance_val], wavelet_approx
    ])
    return features
示例#10
0
def feature_extraction(path):
    image_rgb = skimage.io.imread(path)

    image_gray = color.rgb2gray(image_rgb)
    
    eight_bit = np.uint8(image_gray)

    cm = feature.greycomatrix(eight_bit, [1], [0])
    
    
    contrast = feature.greycoprops(cm)
    dissimilarity = feature.greycoprops(cm, 'dissimilarity')
    homogeneity = feature.greycoprops(cm, 'homogeneity')
    ASM = feature.greycoprops(cm, 'ASM')
    energy = feature.greycoprops(cm, 'energy')
    correlation = feature.greycoprops(cm, 'correlation')
    ent = measure.shannon_entropy(cm)
    
    thr = flt.threshold_otsu(image_gray)
    img = image_gray < thr
    filled = scipy.ndimage.morphology.binary_fill_holes(img)
    label = measure.label(filled)

    for cell in measure.regionprops(label):
        eccentricity = cell.eccentricity
        
    return [contrast[0][0], dissimilarity[0][0], homogeneity[0][0], ASM[0][0], energy[0][0], correlation[0][0], ent, eccentricity]
示例#11
0
def compute_props(image, masks_labelled, features, img):
	properties = regionprops(masks_labelled, intensity_image = image)
	#for each nuclei, save the corresponding features 
	for region in properties:
		area = float(region.area)
		bboxarea = float(region.bbox_area) 
		perimeter = region.perimeter 
		eccentricity = region.eccentricity
		total_intensity = float(np.sum(region.intensity_image))
		mean_intensity = region.mean_intensity
		solidity = region.solidity
		centroid = region.centroid       
        
		#feats = compute_feats(region.intensity_image,kernels)
		#pw = power(region.intensity_image, kernels[0])      
		lbp = local_binary_pattern(region.intensity_image, 3, 3, method = 'ror')      
		entr = compute_entropy(region.intensity_image)      
		shannon_entr = shannon_entropy(region.intensity_image)              
# =============================================================================
# 		res = {"Image": img, "Area": area, "BBox_Area": bboxarea, "Perimeter": perimeter, "Eccentricity": eccentricity, 
# 				"Total Intensity": total_intensity, "Mean Intensity": mean_intensity, "Solidity": solidity, "Gabor Mean":
#              feats[0,0], "Gabor Var": feats[0,1], "Gabor Amplitude": np.mean(pw), "Gabor Energy": np.sum(pw**2),
#              "Local Binary Pattern": compute_energy(lbp), "Entropy": compute_energy(entr)}
# =============================================================================
		res = {"Image": img, "Area": area, "BBox_Area": bboxarea, "Perimeter": perimeter, "Eccentricity": eccentricity, 
				"Total Intensity": total_intensity, "Mean Intensity": mean_intensity, "Solidity": solidity,
             "Local Binary Pattern": compute_energy(lbp), "Entropy": compute_amplitude(entr),
             "Shannon Entropy": shannon_entr, "Centroid_y": centroid[0], "Centroid_x": centroid[1]}
		row = len(features)
		features.loc[row] = res
	return features
示例#12
0
def svm_clf(path):
    data_set = []
    data1 =fits.open(path)
    data = data1[0].data
    data = data[132:991, 132:991]

    # 检测图像背景,获得去除背景后的图像
    # Detect the background of the picture and obtain the picture after removing the background
    data = data.astype(np.float64)
    bkg = sep.Background(data, mask=None, bw=64, bh=64, fw=3, fh=3)
    data_sub = data - bkg  # 得到去噪后的数据
    objects = sep.extract(data_sub, 2.5, err=bkg.globalrms, deblend_nthresh=1)

    # 获得亮星数目
    # Get the number of bright stars
    number = 0
    for i in range(len(objects)):
        a = objects[i][15]
        b = objects[i][16]
        a = max(a, b)
        b = min(a, b)
        # 控制星象大小
        # Control star size
        if a < 32 and b > 2.5:
            number = number + 1
        else:
            number = number

    m1, s1 = np.mean(data_sub), np.std(data_sub)
    data_sub = data_sub.astype(np.uint16)

    # 获得灰度共生矩阵参数
    # Obtain gray level co-occurrence matrix parameters
    gray = color.rgb2gray(data_sub)
    image = img_as_ubyte(gray)
    bins = np.array([0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 255])  # 16-bit
    inds = np.digitize(image, bins)
    max_value = inds.max() + 1
    matrix_coocurrence = greycomatrix(inds, [1], [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4], levels=max_value,
                                      normed=False, symmetric=False)
    cons = np.sum(contrast_feature(matrix_coocurrence)) / 4
    diss = np.sum(dissimilarity_feature(matrix_coocurrence)) / 4
    h**o = np.sum(homogeneity_feature(matrix_coocurrence)) / 4
    asmm = np.sum(asm_feature(matrix_coocurrence)) / 4
    ener = np.sum(energy_feature(matrix_coocurrence)) / 4
    corr = np.sum(correlation_feature(matrix_coocurrence)) / 4
    # 熵的计算
    # Entropy calculation
    shan = shannon_entropy(image)
    data_set = [[m1,number,corr,s1,h**o,shan,asmm,ener,cons]]

    # 加载保存好的模型进行预测
    # Load the saved model for prediction
    clf = joblib.load('./rf_clf.m')
    a=clf.predict(data_set)
    a=int(a[0])
    print(a)
    cnn_set=[path,m1,number,corr,s1,h**o,shan,asmm,ener,cons,diss,a]
    return cnn_set
示例#13
0
def shannon_split_weak(image):
    entropies = [(shannon_entropy(image[:, i, :]), i)
                 for i in range(image.shape[1])]
    entropies = sorted(entropies, key=lambda x: x[0], reverse=True)
    return list(
        list(
            zip(*(entropies[args.threshold // 2:args.threshold] +
                  entropies[-args.threshold // 2:-args.threshold:-1])))[1])
示例#14
0
def entropy_image(regionmask, intensity):
    """Compute Shannon Entropy of a given image
    
    Args:
        regionmask=binary image
        intensity= intensity image
    """
    feat = Entropy_Image([shannon_entropy((intensity * regionmask))])
    return feat
def display_metrics(Method, Prediction, aTargetImage, computingTime):

    number_of_angles = 22
    number_of_distances = 2

    prediction = Prediction
    # target = Target;
    target_image = aTargetImage
    method = Method
    prediction[0] = round(prediction[0] * prediction[1])

    pred_angles = np.zeros(number_of_angles)
    # target_angles = np.zeros(number_of_angles);

    for i in range(len(pred_angles)):
        pred_angles[i] = prediction[i + number_of_distances]
        # target_angles[i] = target[i+number_of_distances];

    setXRayParameters(prediction[0], prediction[1])
    pred_image = bone_rotation(pred_angles)

    # setXRayParameters(target[0], target[1]);
    # target_image = bone_rotation(target_angles);

    # diff = [];
    #
    # for i in range(number_of_distances):
    #     diff.append(abs(prediction[i]-target[i]));

    entropy = shannon_entropy(pred_image)
    SSIM = structural_similarity(pred_image, target_image)
    MAE = mean_absolute_error(target_image, pred_image)
    RMSE = root_mean_squared_error(target_image, pred_image)
    RE = relative_error(target_image, pred_image)
    ZNCC = zero_mean_normalised_cross_correlation(target_image, pred_image)
    computing_time = computingTime

    print('Prediction:', prediction)
    # print('Target: ', target);
    # print('SOD and SDD errors: ', diff);
    print(
        'Metrics: \n SSIM: %.8f \t MAE: %.8f \t RMSE: %.8f \t RE: %.8f \t ZNCC: %.8f \
            \t Entropy: %8f' % (SSIM, MAE, RMSE, RE, ZNCC, entropy))

    row = [[
        method, prediction[0], prediction[1], pred_angles, entropy, SSIM, MAE,
        RMSE, RE, ZNCC, computing_time
    ]]

    df2 = pd.DataFrame(row,
                       columns=[
                           'Methods', 'SOD', 'SDD', 'Rotating Angles',
                           'Entropy', 'SSIM', 'MAE', 'RMSE', 'Relative Error',
                           'ZNCC', 'Time'
                       ])

    return pred_image, df2
示例#16
0
def basic_statistical_features(image):
    """calculates the set of basic statistical features 
    
    Calculates the standard statistical features per channel every 10th percentile,
    sum of the pixel values and different moments

    Parameters
    ----------
    image : 3D array, shape (M, N, C)
        The input image with multiple channels.

    Returns
    -------
    features :  dict  
        dictionary including percentiles, moments and sum per channel 

    """
    # storing the feature values
    features = dict()
    for ch in range(image.shape[2]):
        # percentiles
        features["min_intensity_Ch" + str(ch + 1)] = image[:, :, ch].min()
        features["percentile10_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.1)
        features["percentile20_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.2)
        features["percentile30_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.3)
        features["percentile40_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.4)
        features["percentile50_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.5)
        features["percentile60_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.6)
        features["percentile70_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.7)
        features["percentile80_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.8)
        features["percentile90_intensity_Ch" + str(ch + 1)] = np.percentile(
            image[:, :, ch], 0.9)
        features["max_intensity_Ch" + str(ch + 1)] = image[:, :, ch].max()

        # pixel sum
        features["total_intensity_Ch" + str(ch + 1)] = image[:, :, ch].sum()

        # moments
        features["mean_intensity_Ch" + str(ch + 1)] = image[:, :, ch].mean()
        features["std_intensity_Ch" + str(ch + 1)] = image[:, :, ch].std()
        features["kurtosis_intensity_Ch" + str(ch + 1)] = kurtosis(
            image[:, :, ch].ravel())
        features["skew_intensity_Ch" + str(ch + 1)] = skew(image[:, :,
                                                                 ch].ravel())

        features["shannon_entropy_Ch" + str(ch + 1)] = shannon_entropy(
            image[:, :, ch])

    return features
示例#17
0
    def _calc_features(self):
        """
        calculate feature values
        :return: feature values
        """

        features = {}
        unif = []
        ent = []
        for i in np.arange(self.glcm.shape[3]):
            mat = self.glcm[:, :, 0, i]
            feature_unif = (mat**2).sum()
            unif.append(feature_unif)
            feature_ent = shannon_entropy(mat)
            ent.append(feature_ent)

        matrix = self.matrix[:, :, 0, 0]
        features['Uniformity'] = list(unif)
        features['Invariant Uniformity'] = (matrix**2).sum()

        features['GLCM Entropy'] = list(ent)
        features['GLCM Invariant Entropy'] = shannon_entropy(matrix)

        features['Correlation'] = greycoprops(self.glcm, 'correlation')[0]
        aux_corr = greycoprops(self.matrix, 'correlation')
        features['Invariant Correlation'] = float(aux_corr[0][0])

        features['Dissimilarity'] = greycoprops(self.glcm, 'dissimilarity')[0]
        aux_diss = greycoprops(self.matrix, 'dissimilarity')
        features['Invariant Dissimilarity'] = float(aux_diss[0][0])

        features['Contrast'] = greycoprops(self.glcm, 'contrast')[0]
        aux_cont = greycoprops(self.matrix, 'contrast')
        features['Invariant Contrast'] = float(aux_cont[0][0])

        features['Homogeneity'] = greycoprops(self.glcm, 'homogeneity')[0]
        aux_hom = greycoprops(self.matrix, 'homogeneity')
        features['Invariant Homogeneity'] = float(aux_hom[0][0])

        features['Energy'] = greycoprops(self.glcm, 'energy')[0]
        aux_eng = greycoprops(self.matrix, 'energy')
        features['Invariant Energy'] = float(aux_eng[0][0])

        return features
示例#18
0
def read_hyper_data(hs_img, directory_path, sheet_number, choose_best = False):
    print("-------Begining to read hyperspectral data----------- ")

    #if a sheet_number is provided, only read that sheet number
    if sheet_number !=  None:
        start_time = time.time()
        data, sheet_cnt = sheet_to_array(hs_img, sheet_number)
        end_time = time.time()
        print("Total time taken during reading count_worksheets: " + str(end_time - start_time))
        #preprocess the hs_img using data read from sheet
        hyp_im = preprocess_hyperdata(data)

        print("________Read sheet " + str(sheet_number)+ " of hyperspectral image.__________")
        cv2.imwrite(directory_path+"/full_hyperspec_img.png", hyp_im)
        return hyp_im

    #if NO sheet_number is provided, read the entire workbook
    else:
        start_time = time.time()
        datas, sheet_cnt = sheet_to_array(hs_img, sheet_number)
        end_time = time.time()
        print("Total time taken during reading count_worksheets: " + str(end_time - start_time))
        imgs = []
        entropies =[]
        #iterates through the number of channels
        for i in range(len(datas)):
        #reads data value from the sheet of hs_img
            data = datas[i]
            #preprocess the hs_img using data read from sheet
            hyp_im = preprocess_hyperdata(data)
            #output preprocessed hyp_im to path
            cv2.imwrite(directory_path+"/full_hyperspec_img_"+str(i)+".png", hyp_im)
            #the following steps are for calculating the best hyper_img
            imgs.append(hyp_im)
            #calculate the entropy of image
            entropy = measure.shannon_entropy(hyp_im)
            # print("entropy"+str(i), entropy)
            entropies.append(entropy)

        if choose_best == True:
            #output best preprocessed hs_img to path
            best_hyper_index = np.argmax(entropies)
            best_hyper_img = imgs[best_hyper_index]
            cv2.imwrite(directory_path+"/full_hyperspec_img.png", best_hyper_img)
            return best_hyper_img, best_hyper_img.shape

        #check if all the hyperspectral image matrix have been read.
        try:
            hyperspectral_data = np.concatenate((hyperspectral_data, im[..., np.newaxis]), axis = 2)
        except:
            hyperspectral_data = im[..., np.newaxis]

        assert(sheet_cnt == hyperspectral_data.shape[2])
        print("________Read all the channels of hyperspectral image.__________")

        return imgs
示例#19
0
文件: task5c.py 项目: niklasmh/ntnu
def getShapeValue(region, mode='area'):

    # Making different orders based on the shape signatures
    order = [ 0, 1, 2, 3, 4 ]
    value = 0

    # Choose which method to measure a shape signature
    if mode == 'shannon': value = shannon_entropy(region.image)
    elif mode == 'moi': value = momentOfInertia(region)
    else: value = region.area

    return value, order
示例#20
0
文件: task5c.py 项目: amunds1/ntnu-1
def getShapeValue(region, mode='area'):

    # Making different orders based on the shape signatures
    order = [0, 1, 2, 3, 4]
    value = 0

    # Choose which method to measure a shape signature
    if mode == 'shannon': value = shannon_entropy(region.image)
    elif mode == 'moi': value = momentOfInertia(region)
    else: value = region.area

    return value, order
示例#21
0
def add_noise2(array, exposure=None, bit_depth=None):
    '''
    returnes the snr as peak snr for reduced exposure images, returns shannon entropy for exposure = None.
    '''
    ## Change bit_depth if necessary
    if bit_depth == 16:
        array = img_as_uint(array / np.max(array))
        clip = 2**16 - 1
    elif bit_depth == 8:
        noise = img_as_ubyte(array / np.max(array))
        clip = 2**8 - 1
    else:
        clip = None
    if exposure == None:
        noise = array
        snr = shannon_entropy(noise)
    else:
        noise = np.clip(np.random.poisson(array / np.max(array) * exposure), 0,
                        clip)
        snr = shannon_entropy(
            noise)  #compare_psnr(array/np.max(array)*exposure, noise)
    return noise, snr
示例#22
0
def segment(data_path, SAVE_PATH):
    labels = [label for label in os.listdir(data_path)] #Duyet cac labels
    kernel = np.ones((3,3))
    if not os.path.exists(SAVE_PATH):
        os.mkdir(SAVE_PATH)  
    for label in labels: #Duyet cac labels
        dir = os.path.join(data_path, label)
        for img_name in os.listdir(dir): #Duyet cac anh
            im_path = os.path.join(dir, img_name)
            img = cv2.imread(im_path, cv2.IMREAD_GRAYSCALE) #doc anh xam
            imgsave_name = '{}'.format(img_name) #Ten anh de luu            
            save_directory = os.path.join(SAVE_PATH, label)
            save_path = os.path.join(save_directory, imgsave_name)
            
            h, w = img.shape
            diff = 10
            for i in range(w): #duyet canh ben tren
                if img[0, i] > 220:
                    cv2.floodFill(img, None, (i, 0), 0, loDiff=diff, upDiff=diff)

            for i in range(w-1, -1, -1):  #duyet canh ben duoi
                if img[h - 1, i] > 220:
                    cv2.floodFill(img, None, (i, h-1), 0, loDiff=diff, upDiff=diff)
            
            img_org = copy.copy(img) #tao ban sao cua img
            
            _, img = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU) #nhi phan hoa theo nguong OTSU
            img = 255 - img #invert anh
            img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) #toan tu open
            img = cv2.dilate(img,kernel,iterations = 7) #toan tu gian no
            
            x1, y1, x2, y2 = findROICoord(img)
            x1 = np.maximum(x1 - 60, 0) #tranh tran khoi kich thuoc anh goc
            y1 = np.maximum(y1 - 60, 0)
            x2 = np.minimum(x2 + 60, w - 1)
            y2 = np.minimum(y2 + 60, h - 1)
            try:
                img_org = img_org[y1:y2, x1:x2]            
                if not os.path.exists(save_directory):
                    os.mkdir(save_directory)        
                img_org = resize(img_org, (128, 128))  
                entropy = shannon_entropy(img_org)
                if entropy <= 5.5:
                    print(entropy)
                    print(save_path)
                    continue
                cv2.imwrite(save_path, img_org)
#                 clear_output()
            except Exception as e:
                print(im_path, save_path, str(e))
示例#23
0
def prep_data(filename_path):

    nii_output_dir = os.getenv('TMP_DIR_PATH',
                               "/Users/pmartyn/PycharmProjects/Thesis/tmp/")

    if not os.path.exists(nii_output_dir):
        os.makedirs(nii_output_dir)

    p = 0.5
    img = nib.load(filename_path)

    affine = img.affine
    img = img.get_fdata()

    prob = Extractor().run(img)
    mask = prob > p

    brain = img[:]
    brain[~mask] = 0
    image_array = nib.Nifti1Image(brain, affine).get_fdata()

    total_slices = image_array.shape[2]

    image_entropies = []

    for current_slice in range(0, total_slices):
        image_data = np.rot90(
            np.rot90(np.rot90(image_array[:, :, current_slice])))
        gray_image_data = color.rgb2gray(image_data)

        ent = measure.shannon_entropy(gray_image_data)
        dictionary = {
            "filename": current_slice,
            "entropy-value": ent,
            "image-data": gray_image_data
        }

        image_entropies.append(dictionary)

    image_entropies.sort(key=itemgetter('entropy-value'), reverse=True)

    for dictionary in image_entropies[:6]:
        filename = dictionary["filename"]
        image_data = dictionary["image-data"]
        img_to_crop = Image.fromarray(image_data)
        # The crop rectangle, as a (left, upper, right, lower)-tuple.
        cropped = img_to_crop.crop((25, 20, 250, 275))
        cropped = np.array(cropped)
        io.imsave(nii_output_dir + str(filename) + ".jpeg", cropped)
def main():
    os.chdir(sys.argv[1])
    data = pd.read_csv("positions.csv")
    folderpath = "icm/"
    entropy = []
    new_data = pd.DataFrame()
    new_data['filename'] = pd.unique(data['filename'])
    for imagename in pd.unique(data['filename']):
        print(os.path.abspath(imagename))
        e = shannon_entropy(imread(imagename + '.png'))
        if e < 0:
            e = 0
        entropy.append(e)
    new_data['entropy'] = entropy
    new_data.to_csv("entropy.csv", index=False)
def feature_extraction(image):
    img = cv2.imread(image)
    gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    eight_bit = np.uint8(gray)

    cm = feature.greycomatrix(eight_bit, [1], [0])
    
    
    contrast = feature.greycoprops(cm)
    dissimilarity = feature.greycoprops(cm, 'dissimilarity')
    homogeneity = feature.greycoprops(cm, 'homogeneity')
    ASM = feature.greycoprops(cm, 'ASM')
    energy = feature.greycoprops(cm, 'energy')
    correlation = feature.greycoprops(cm, 'correlation')
    ent = measure.shannon_entropy(cm)

    return [contrast[0][0], dissimilarity[0][0], homogeneity[0][0], ASM[0][0], energy[0][0], correlation[0][0], ent]
示例#26
0
def region_texture_metrics(region, image=None, tag='', glcm=False):
    """Texture analysis for a of scikit-image region"""

    database = pd.Series(dtype=object)

    # Check to see whether intensity_image is present or image argument
    # has been supplied
    if image is not None:
        region_image = bbox_sample(region, image)
    else:
        region_image = region.intensity_image

    # Obtain indices of pixels in region mask
    indices = np.where(region.image)
    intensity_sample = region_image[indices]

    # _, _, database[f"{tag} Fourier SDI"] = (0, 0, 0)
    # fourier_transform_analysis(segment_image)

    database[f"{tag} Mean"] = np.mean(intensity_sample)
    database[f"{tag} STD"] = np.std(intensity_sample)
    database[f"{tag} Entropy"] = shannon_entropy(intensity_sample)

    if glcm:

        glcm = greycomatrix(
            (region_image * region.image * IMAGE_MAX).astype('uint8'), [1, 2],
            [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4],
            256,
            symmetric=True,
            normed=True)
        glcm[0, :, :, :] = 0
        glcm[:, 0, :, :] = 0

        greycoprops = greycoprops_edit(glcm)

        metrics = [
            "Contrast", "Homogeneity", "Energy", "Entropy", "Autocorrelation",
            "Clustering", "Mean", "Covariance", "Correlation"
        ]

        for metric in metrics:
            value = greycoprops[metric.lower()].mean()
            database[f"{tag} GLCM {metric}"] = value

    return database
示例#27
0
def get_image_entropies(dir_path):
    image_entropies = []
    counter = 0
    for filename in os.listdir(dir_path):
        if filename.endswith('.jpg'):
            path = dir_path + filename
            rgbImg = io.imread(path)
            grayImg = img_as_ubyte(color.rgb2gray(rgbImg))
            ent = measure.shannon_entropy(grayImg)
            dictionary = {
                "filename": filename,
                "entropy-value": ent,
                "counter": counter
            }
            counter = counter + 1
            image_entropies.append(dictionary)

    image_entropies.sort(key=itemgetter('entropy-value'), reverse=True)
    print(image_entropies)
    return image_entropies
示例#28
0
    def compute_gabor_feats(self, image, kernels):

        features = {}
        f_var = []
        f_mean = []
        f_energy = []
        f_ent = []
        for k, kernel in enumerate(kernels):
            filtered = ndi.convolve(image, kernel, mode='wrap')
            f_mean.append(np.mean(filtered))
            f_var.append(np.var(filtered))
            f_energy.append(
                np.sum(np.power(filtered.ravel(), 2)) / len(filtered.ravel()))
            f_ent.append(shannon_entropy(filtered))

        features['Gabor Variance'] = mean(f_var)
        features['Gabor Mean'] = mean(f_mean)
        features['Gabor Energy'] = mean(f_energy)
        features['Gabor Entropy'] = mean(f_ent)

        return features
示例#29
0
def glcm_feature(image):
    glcm = greycomatrix(image, [2, 8, 16],
                        [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4],
                        256,
                        symmetric=True,
                        normed=True)
    #print(len(glcm))
    arr = np.empty((0, 0))
    for prop in {
            'contrast', 'dissimilarity', 'homogeneity', 'correlation', 'ASM'
    }:  #, 'energy'
        temp = greycoprops(glcm, prop)
        temp = np.array(temp).reshape(-1)
        arr = np.append(arr, temp)
    entropy_tem = []
    for k in range(glcm.shape[2]):
        for j in range(glcm.shape[3]):
            entropy_tem.append(measure.shannon_entropy(glcm[:, :, k, j]))
    entropy_feat = np.array(entropy_tem)
    arr = np.append(arr, entropy_feat)
    return arr  #.reshape([-1]) #4*3*6=72
def extract_features(array):
    props_array = []
    angles = [0, np.pi / 4, np.pi / 2, 3 * np.pi / 4]
    for img in array:
        ubyte_img = img_as_ubyte(img)
        img_features = np.array([])
        for i in range(3):
            glcm = greycomatrix(ubyte_img[:, :, i], [1], angles)
            filt_glcm = glcm[:, :, :, :]
            constrast = greycoprops(filt_glcm, 'contrast')
            energy = greycoprops(filt_glcm, 'energy')
            homogeneity = greycoprops(filt_glcm, 'homogeneity')
            correlation = greycoprops(filt_glcm, 'correlation')
            entropy = shannon_entropy(ubyte_img[:, :, i])
            img_features = np.insert(
                img_features, 0,
                np.concatenate(
                    (constrast, energy, homogeneity, correlation)).flatten())
            img_features = np.insert(img_features, 0, entropy)
        props_array.append(img_features)

    return props_array
    def _calc_features(self):
        """
        calculate feature values
        :return: feature values
        """

        features = {}

        props = regionprops(self.bin_img, self.img)

        features['Mean Intensity'] = np.mean(self.imgzero)
        #mean intensity of image

        features['Std'] = np.std(self.imgzero)
        #standard deviation

        features['Variance'] = np.var(self.imgzero)
        #variance

        features['Skewness'] = skew(self.imgzero)
        #skewness of distribution

        features['Kurtosis'] = kurtosis(self.imgzero)
        #kurtosis of distribution

        features['Contrast'] = np.std(self.hist)
        #contrast can be defined as std of histogram of intensity

        features['Max Intensity'] = props[0].max_intensity
        #Value with the greatest intensity in the region.

        features['Min Intensity'] = props[0].min_intensity
        #Value with the greatest intensity in the region.

        features['Entropy'] = shannon_entropy(self.img, base=2)
        #The Shannon entropy is defined as S = -sum(pk * log(pk)), where pk are frequency/probability of pixels of value k.

        return features
示例#32
0
def test_shannon_ones():
    img = np.ones((10, 10))
    res = shannon_entropy(img, base=np.e)
    assert_almost_equal(res, 0.0)
示例#33
0
def test_shannon_all_unique():
    img = np.arange(64)
    res = shannon_entropy(img, base=2)
    assert_almost_equal(res, np.log(64) / np.log(2))