Esempio n. 1
0
def test_moments_normalized():
    image = np.zeros((20, 20), dtype=np.double)
    image[13:17, 13:17] = 1
    mu = moments_central(image, 14.5, 14.5)
    nu = moments_normalized(mu)
    # shift image by dx=-3, dy=-3 and scale by 0.5
    image2 = np.zeros((20, 20), dtype=np.double)
    image2[11:13, 11:13] = 1
    mu2 = moments_central(image2, 11.5, 11.5)
    nu2 = moments_normalized(mu2)
    # central moments must be translation and scale invariant
    assert_almost_equal(nu, nu2, decimal=1)
Esempio n. 2
0
def test_moments_normalized():
    image = np.zeros((20, 20), dtype=np.double)
    image[13:17, 13:17] = 1
    mu = moments_central(image, (14.5, 14.5))
    nu = moments_normalized(mu)
    # shift image by dx=-3, dy=-3 and scale by 0.5
    image2 = np.zeros((20, 20), dtype=np.double)
    image2[11:13, 11:13] = 1
    mu2 = moments_central(image2, (11.5, 11.5))
    nu2 = moments_normalized(mu2)
    # central moments must be translation and scale invariant
    assert_almost_equal(nu, nu2, decimal=1)
Esempio n. 3
0
def test_moments_normalized():
    image = np.zeros((20, 20), dtype=np.float64)
    image[13:17, 13:17] = 1
    mu = moments_central(image, (14.5, 14.5))
    nu = moments_normalized(mu)
    # shift image by dx=-2, dy=-2 and scale non-zero extent by 0.5
    image2 = np.zeros((20, 20), dtype=np.float64)
    # scale amplitude by 0.7
    image2[11:13, 11:13] = 0.7
    mu2 = moments_central(image2, (11.5, 11.5))
    nu2 = moments_normalized(mu2)
    # central moments must be translation and scale invariant
    assert_almost_equal(nu, nu2, decimal=1)
Esempio n. 4
0
def test_moments_hu():
    image = np.zeros((20, 20), dtype=np.double)
    image[13:15, 13:17] = 1
    mu = moments_central(image, 13.5, 14.5)
    nu = moments_normalized(mu)
    hu = moments_hu(nu)
    # shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
    image2 = np.zeros((20, 20), dtype=np.double)
    image2[11, 11:13] = 1
    image2 = image2.T
    mu2 = moments_central(image2, 11.5, 11)
    nu2 = moments_normalized(mu2)
    hu2 = moments_hu(nu2)
    # central moments must be translation and scale invariant
    assert_almost_equal(hu, hu2, decimal=1)
Esempio n. 5
0
def test_moments_hu():
    image = np.zeros((20, 20), dtype=np.double)
    image[13:15, 13:17] = 1
    mu = moments_central(image, (13.5, 14.5))
    nu = moments_normalized(mu)
    hu = moments_hu(nu)
    # shift image by dx=2, dy=3, scale by 0.5 and rotate by 90deg
    image2 = np.zeros((20, 20), dtype=np.double)
    image2[11, 11:13] = 1
    image2 = image2.T
    mu2 = moments_central(image2, (11.5, 11))
    nu2 = moments_normalized(mu2)
    hu2 = moments_hu(nu2)
    # central moments must be translation and scale invariant
    assert_almost_equal(hu, hu2, decimal=1)
Esempio n. 6
0
def featuresExtractor_Hu(image):
    img = rgb2gray(image)
    hu = moments_central(img)
    hu = moments_normalized(hu)
    hu = moments_hu(hu)
    l = [norm(f) for f in hu]
    return l
Esempio n. 7
0
def collect(path, mean, std):
    img = io.imread('./images/' + path + '.bmp')
    hist = exposure.histogram(img)
    th = get_threshold('./images/' + path + '.bmp')
    img_binary = (img < th).astype(np.double)
    img_label = label(img_binary, background=0)
    regions = regionprops(img_label)
    boxes = []
    features = []
    for props in regions:
        box = []
        minr, minc, maxr, maxc = props.bbox
        if maxc - minc < 10 or maxr - minr < 10 or maxc - minc > 120 or maxr - minr > 120:
            continue
        box.append(minr)
        box.append(maxr)
        box.append(minc)
        box.append(maxc)
        boxes.append(box)

        roi = img_binary[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        features.append(hu)

    feature_arr = normalize(features, mean, std)
    return (boxes, feature_arr)
def extract_features(roi, props):

    features = []

    m = moments(roi)
    # print(m)

    cr = m[0, 1] / m[0, 0]
    cc = m[1, 0] / m[0, 0]

    mu = moments_central(roi, (cr, cc))
    nu = moments_normalized(mu)

    #finding Seven Features
    hu = moments_hu(nu)

    # seven features to be put into feature list
    features.extend(hu)

    # print(features)

    features.append(roi.shape[1]/roi.shape[0])
    features.append(props.eccentricity)
    features.append(props.convex_area/props.area)
    features.append(props.orientation)
    features.append(props.euler_number)
    
    return np.array([features])
Esempio n. 9
0
def get_data(x, y, img):

    # Create data and 5x5 image slice
    data = []
    new_image = img[x - 2:x + 2, y - 2:y + 2]

    # Append location of pixel
    data.append(x)
    data.append(y)

    # Append pixel values to data
    for pixel_val in new_image.ravel():
        data.append(pixel_val)

    # Append central moments to data
    central_moments = measure.moments_central(new_image)
    for central_moment in central_moments.ravel():
        data.append(central_moment)

    # Append hu moments to data
    hu_moments = measure.moments_hu(
        measure.moments_normalized(central_moments))
    for hu_moment in hu_moments:
        data.append(hu_moment)

    # Append variation of pixel values to data
    variation = np.var(new_image)
    data.append(variation)

    return data
Esempio n. 10
0
def get_hu_moments(samples):
    print "getting hu moments..."
    features = []
    for sample in samples:
        '''
        sample = np.array(sample)
        th = 200
        img_binary = (sample < th).astype(np.double)
        img_label = label(img_binary, background=255)
        regions = regionprops(img_label)
        if regions == []:
            print "no regions"
        for props in regions:
            minr, minc, maxr, maxc = props.bbox
            roi = img_binary[minr:maxr, minc:maxc]

        '''
        sample = np.array(sample)
        sample = sample.astype(np.double)
        m = moments(sample)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(sample, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        features.append(hu)
    return features
Esempio n. 11
0
File: hu.py Progetto: rgaiacs/pythia
def hist(image):
    """Create histogram"""
    return moments_hu(
        moments_normalized(
            moments_central(image)
        )
    )
Esempio n. 12
0
	def __compute_moments(self, data, centroid, radius):
		""" Compute moments"""

		# - Compute central moments
		mom_c= moments_central(data, center=centroid, order=3)

		# - Compute normalized moments
		mom_norm= moments_normalized(mom_c, 3)

		# - Compute Hu moments
		mom_hu= moments_hu(mom_norm)

		# - Flatten moments
		mom_c= mom_c.flatten()

		# - Compute Zernike moments
		#   NB: mahotas takes only positive pixels and rescale image by sum(pix) internally
		poldeg= 4
		nmom_zernike= 9
		mom_zernike= [-999]*nmom_zernike
		try:
			mom_zernike = mahotas.features.zernike_moments(data, radius, degree=poldeg, cm=centroid)
			##mom_zernike = mahotas.features.zernike_moments(mask, radius, degree=poldeg, cm=centroid)
		except Exception as e:
			logger.warn("Failed to compute Zernike moments (err=%s)!" % (str(e)))

		#print("--> mom_zernike")
		#print(mom_zernike)
		
		return (mom_c, mom_hu, mom_zernike)
Esempio n. 13
0
def get_hu_moment_from_image(image):
    """
    Compute the 7 Hu's moments from an image.
    This set of moments is proofed to be translation, scale and rotation invariant.

    Parameters
    ----------
    image: array-like
        a 2d array of double or uint8 corresponding to an image

    Returns
    -------
    (7, 1) array of double
        7 Hu's moments

    References
    ----------
    http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.moments
    """
    order = 7
    raw_moments = moments(image, order=order)
    cr = raw_moments[0, 1] / raw_moments[0, 0]
    cc = raw_moments[1, 0] / raw_moments[0, 0]
    central_moments = moments_central(image, cr, cc, order=order)
    normalized_moments = moments_normalized(central_moments, order)
    hu_moments = moments_hu(normalized_moments)
    return hu_moments
Esempio n. 14
0
def test_moments_normalized_invalid():
    with pytest.raises(TypeError):
        moments_normalized(np.zeros((3, 3, 3)))
    with pytest.raises(TypeError):
        moments_normalized(np.zeros((3,)))
    with pytest.raises(TypeError):
        moments_normalized(np.zeros((3, 3)), 3)
    with pytest.raises(TypeError):
        moments_normalized(np.zeros((3, 3)), 4)
Esempio n. 15
0
def extract_features(path, show, tag):
    img = io.imread('./images/' + path + '.bmp')
    hist = exposure.histogram(img)
    th = get_threshold('./images/' + path + '.bmp')
    img_binary = (img < th).astype(np.double)
    img_label = label(img_binary, background=0)

    # Show images
    if show == 1:
        io.imshow(img)
        plt.title('Original Image')
        io.show()

        plt.bar(hist[1], hist[0])
        plt.title('Histogram')
        plt.show()

        io.imshow(img_binary)
        plt.title('Binary Image')
        io.show()

        io.imshow(img_label)
        plt.title('Labeled Image')
        io.show()

    regions = regionprops(img_label)
    if show == 1:
        io.imshow(img_binary)
        ax = plt.gca()

    features = []

    for props in regions:
        minr, minc, maxr, maxc = props.bbox
        if maxc - minc < 10 or maxr - minr < 10 or maxc - minc > 120 or maxr - minr > 120:
            continue
        if show == 1:
            ax.add_patch(
                Rectangle((minc, minr),
                          maxc - minc,
                          maxr - minr,
                          fill=False,
                          edgecolor='red',
                          linewidth=1))
        roi = img_binary[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        features.append(hu)
        if (len(path) == 1):
            tag.append(ord(path))

    if show == 1:
        plt.title('Bounding Boxes')
        io.show()
    return features
Esempio n. 16
0
 def compute_hu_moments(i):
     b = cells_aligned_padded[i].astype(np.uint8)
     m = moments(b, order=1)
     hu = moments_hu(
         moments_normalized(
             moments_central(b, cc=m[0, 1] / m[0, 0],
                             cr=m[1, 0] / m[0, 0])))
     return hu
Esempio n. 17
0
def test_moments_hu_dtype(dtype):
    image = np.zeros((20, 20), dtype=np.double)
    image[13:15, 13:17] = 1
    mu = moments_central(image, (13.5, 14.5))
    nu = moments_normalized(mu)
    hu = moments_hu(nu.astype(dtype))

    assert hu.dtype == dtype
Esempio n. 18
0
    def describe(self, image):

        #calculate daisy feature descriptors
        mc = measure.moments_central(image)
        mn = measure.moments_normalized(mc)
        mh = measure.moments_hu(mn)

        # return Hu moments
        return mh
Esempio n. 19
0
def testKNN():
    trainFeatures, trainLebels = extractFeatures()
    knn = neighbors.KNeighborsClassifier()
    knn.fit(trainFeatures, trainLebels)
    #score = knn.score(trainFeatures, trainLebels)
    testNames = ['test1', 'test2']
    #testNames = ['test2']
    testFeatures = []
    testLabels = []
    testTruth = []
    correct = 0
    #textPosition = []
    for i in range(len(testNames)):
        classes, locations = readPkl(testNames[i])
        img = io.imread(testNames[i] + '.bmp')
        #testTruth = ['a']*7+['d']*7+['m']*7+['n']*7+['o']*7+['p']*7+['q']*7+['r']*7+['u']*7+['w']*7
        ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)
        #ret, binary = cv.threshold(img, 0, 255, cv.THRESH_BINARY | cv.THRESH_TRIANGLE)
        th = ret
        img_binary = (img < th).astype(np.double)
        img_dilation = morphology.binary_dilation(img_binary, selem=None)
        img_erosion = morphology.binary_erosion(img_binary, selem=None)
        img_label = label(img_binary, background=0)
        regions = regionprops(img_label)
        io.imshow(img_binary)
        ax = plt.gca()
        thresholdR = 15
        thresholdC = 15
        for props in regions:
            minr, minc, maxr, maxc = props.bbox
            # Computing Hu Moments and Removing Small Components
            if (maxr - minr) >= thresholdR and (maxc - minc) >= thresholdC:
                #textPosition.append((maxc, minr))
                roi = img_binary[minr:maxr, minc:maxc]
                m = moments(roi)
                cr = m[0, 1] / m[0, 0]
                cc = m[1, 0] / m[0, 0]
                mu = moments_central(roi, cr, cc)
                nu = moments_normalized(mu)
                hu = moments_hu(nu)
                testFeatures.append(hu)
                
                testLabels.append(knn.predict([testFeatures[-1]]))
                
                indexFix = locationFix(locations, minr, minc, maxr, maxc)
                if indexFix is not None:
                    if testLabels[-1] == classes[indexFix]:
                        correct += 1
                
                plt.text(maxc, minr, testLabels[-1][0], bbox=dict(facecolor='white', alpha=0.5))
                ax.add_patch(Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=1))
        plt.title('Bounding Boxes')
        io.show()
    print correct, len(testLabels)
    correctRate = correct / len(testLabels)
    print correctRate
def extractFeature(name, showall, showbb, flag):

    (img, regions, ax, rthre, cthre) = extractImage(name, showall, showbb,
                                                    flag)

    Features = []
    boxes = []

    for props in regions:
        tmp = []
        minr, minc, maxr, maxc = props.bbox
        if maxc - minc < cthre or maxr - minr < rthre or maxc - minc > cthre * 9 or maxr - minr > rthre * 9:
            continue
        tmp.append(minr)
        tmp.append(minc)
        tmp.append(maxr)
        tmp.append(maxc)
        boxes.append(tmp)
        if showbb == 1:
            ax.add_patch(
                Rectangle((minc, minr),
                          maxc - minc,
                          maxr - minr,
                          fill=False,
                          edgecolor='red',
                          linewidth=1))
        # computing hu moments and removing small components
        roi = img[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)

        area = (maxr - minr) * (maxc - minc)
        # add convexity
        p = perimeter(img[minr:maxr, minc:maxc])
        con = (area / (p * p)) * 4 * math.pi
        convex = np.array([con])
        hu = np.concatenate((hu, convex))

        # add density
        den = area / float(props.convex_area)
        dense = np.array([den])
        hu = np.concatenate((hu, dense))

        Features.append(hu)

    # print boxes

    plt.title('Bounding Boxes')
    if showbb == 1:
        io.show()

    return Features, boxes,
Esempio n. 21
0
def test_moments_normalized_spacing(anisotropic):
    image = np.zeros((20, 20), dtype=np.double)
    image[13:17, 13:17] = 1

    if not anisotropic:
        spacing1 = (1, 1)
        spacing2 = (3, 3)
    else:
        spacing1 = (1, 2)
        spacing2 = (2, 4)

    mu = moments_central(image, spacing=spacing1)
    nu = moments_normalized(mu, spacing=spacing1)

    mu2 = moments_central(image, spacing=spacing2)
    nu2 = moments_normalized(mu2, spacing=spacing2)

    # result should be invariant to absolute scale of spacing
    assert_almost_equal(nu, nu2)
Esempio n. 22
0
def test_moments_normalized_3d():
    image = draw.ellipsoid(1, 1, 10)
    mu_image = moments_central(image)
    nu = moments_normalized(mu_image)
    assert nu[0, 0, 2] > nu[0, 2, 0]
    assert_almost_equal(nu[0, 2, 0], nu[2, 0, 0])

    coords = np.where(image)
    mu_coords = moments_coords_central(coords)
    assert_almost_equal(mu_coords, mu_image)
Esempio n. 23
0
def test_moments_normalized_3d():
    image = draw.ellipsoid(1, 1, 10)
    mu_image = moments_central(image)
    nu = moments_normalized(mu_image)
    assert nu[0, 0, 2] > nu[0, 2, 0]
    assert_almost_equal(nu[0, 2, 0], nu[2, 0, 0])

    coords = np.where(image)
    mu_coords = moments_coords_central(coords)
    assert_almost_equal(mu_coords, mu_image)
Esempio n. 24
0
 def _describe(self, binary, steps=None):
     clipped = binary.clip(max=1)
     m = measure.moments(clipped)
     cr = m[0, 1] / m[0, 0]
     cc = m[1, 0] / m[0, 0]
     central = measure.moments_central(clipped, cr, cc)
     normalized = measure.moments_normalized(central)
     moments = measure.moments_hu(normalized)
     # nan determines, that moment could not be described,
     # but is hard to handle in prediction, set to zero instead
     moments[np.isnan(moments)] = 0
     return moments
def match_shapes(img_a: ndarray, img_b: ndarray):
    ''' 
        This function takes in input two images and returns
        the distances between the images using the HU moments.
    '''

    # calculating the hu moments
    hu_a = moments_hu(moments_normalized(moments_central(img_a)))
    hu_b = moments_hu(moments_normalized(moments_central(img_b)))

    # changing to log scale
    hu_a = -1 * sign(hu_a) * log10(abs(hu_a))
    hu_b = -1 * sign(hu_b) * log10(abs(hu_b))

    # calculating 3 distaces
    d1 = sum(abs((1 / hu_b) - (1 / hu_a)))
    d2 = sum(abs(hu_b - hu_a))
    d3 = sum(divide(abs(hu_a - hu_b), abs(hu_a)))

    # returning the distances
    return d1, d2, d3
Esempio n. 26
0
def test_moments_dtype(dtype):
    image = np.zeros((20, 20), dtype=dtype)
    image[13:15, 13:17] = 1

    expected_dtype = _supported_float_type(image)
    mu = moments_central(image, (13.5, 14.5))
    assert mu.dtype == expected_dtype

    nu = moments_normalized(mu)
    assert nu.dtype == expected_dtype

    hu = moments_hu(nu)
    assert hu.dtype == expected_dtype
Esempio n. 27
0
def extract_features(img):
    # This function extract our features out of an image. It basically
    # computes the 8 (and not 7) Hu geometrical moments. To do this we
    # first compute the moments, centralize and normalize them before
    # computing Hu moments
    m = moments(img)
    cr = m[0,1] / m[0,0]
    cc = m[1,0] / m[0,0]
    mc = moments_central(img, cr, cc)
    mn = moments_normalized(mc)
    hu = moments_hu(mn)
    i8 = mn[1, 1] * ( (mn[3, 0] + mn[1, 2])**2 - (mn[0,3]+mn[2,1])**2 ) - (mn[2,0] - mn[0,2]) * (mn[3,0] + mn[1,2]) * (mn[0,3] + mn[2,1])
    return append(hu, [i8])
Esempio n. 28
0
def extract_features(roi, props):
    features = []
    m = moments(roi)
    cr = m[0, 1] / m[0, 0]
    cc = m[1, 0] / m[0, 0]
    mu = moments_central(roi, cr, cc)
    nu = moments_normalized(mu)
    hu = moments_hu(nu)
    features.extend(hu)
    features.append(roi.shape[1] / roi.shape[0])
    features.append(props.eccentricity)
    features.append(props.convex_area / props.area)
    features.append(props.orientation)
    features.append(props.euler_number)

    return np.array([features])
Esempio n. 29
0
def apply_threshold(img, fname, classes, locations, Features):
    th = 230
    img_binary = (img < th).astype(np.double)
    io.imshow(img_binary)
    plt.title('Binary Image')
    io.show()

    img_label = label(img_binary, background=0)
    io.imshow(img_label)
    plt.title('Labeled Image')
    io.show()
    print np.amax(img_label)

    regions = regionprops(img_label)
    io.imshow(img_binary)
    ax = plt.gca()

    ypred = []

    for props in regions:
        minr, minc, maxr, maxc = props.bbox
        ax.add_patch(
            Rectangle((minc, minr),
                      maxc - minc,
                      maxr - minr,
                      fill=False,
                      edgecolor='red',
                      linewidth=1))
        roi = img_binary[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        pred_coord = np.array(list(props.centroid))
        ypred.append(np.flipud(pred_coord))

    ax.set_title('Bounding Boxes')
    plt.savefig('bounding_boxes_image_' + fname + '.png')
    io.show()

    t = np.array(ypred)
    x = t.astype(int)
    D = cdist(x, locations)
    print_matrix(D)
Esempio n. 30
0
    def _hu_moments(roi: coo_matrix) -> np.ndarray:
        """Returns the 7 Hu moments for an ROI image. See 
        https://scikit-image.org/docs/0.17.x/api/skimage.measure.html#moments-hu        # noqa
        for more information.

        Returns
        -------
        7-element, 1d np.array of Hu's image moments

        References
        ----------
        M. K. Hu, “Visual Pattern Recognition by Moment Invariants”, 
        IRE Trans. Info. Theory, vol. IT-8, pp. 179-187, 1962
        """
        roi_image = roi.toarray()
        mu = moments_central(roi_image)
        nu = moments_normalized(mu)
        return moments_hu(nu)
Esempio n. 31
0
def apply_threshold(img, fname, show, th_value):
    global Features, label_img
    th = th_value
    img_binary = (img < th).astype(np.double)
    if show[2]:
        io.imshow(img_binary)
        plt.title('Binary Image')
        io.show()
    img_label = label(img_binary, background=0)
    if show[3]:
        io.imshow(img_label)
        plt.title('Labeled Image')
        io.show()
    print fname + str(th)
    print np.amax(img_label)

    regions = regionprops(img_label)
    io.imshow(img_binary)
    ax = plt.gca()
    for props in regions:
        minr, minc, maxr, maxc = props.bbox
        ax.add_patch(
            Rectangle((minc, minr),
                      maxc - minc,
                      maxr - minr,
                      fill=False,
                      edgecolor='red',
                      linewidth=1))
        roi = img_binary[minr:maxr, minc:maxc]
        m = moments(roi)
        cr = m[0, 1] / m[0, 0]
        cc = m[1, 0] / m[0, 0]
        mu = moments_central(roi, cr, cc)
        nu = moments_normalized(mu)
        hu = moments_hu(nu)
        Features.append(hu)
        label_img.append(fname)

    if show[4]:
        ax.set_title('Bounding Boxes')
        plt.savefig('bounding_boxes_image_' + fname + '.png')
        io.show()
    else:
        ax.cla()
Esempio n. 32
0
    def calculate_features(self, feature_mode, debug=False):

        fruit_image = io.imread(self.path, as_gray=True)

        sigma = 0.005 * fruit_image.shape[0]
        filtered_fruit = filters.gaussian(fruit_image, sigma=sigma)

        # Apply triangle threshold to gaussian filtered image
        self.threshold = filters.threshold_triangle(filtered_fruit)
        thresholded_fruit = filtered_fruit < self.threshold

        fruit_central_moments = measure.moments_central(thresholded_fruit)
        hu_moments = measure.moments_hu(
            measure.moments_normalized(fruit_central_moments))
        # We only keep relevant hu moments, that is, components 1 and 3
        self.hu_moments = hu_moments[[1, 3]]

        # And we apply a log transform to them
        self.hu_moments = np.array([
            -1 * np.sign(j) * np.log10(np.abs(j)) for j in self.hu_moments[:]
        ])

        fruit_eigvalues = measure.inertia_tensor_eigvals(
            thresholded_fruit, mu=fruit_central_moments)
        self.moment_ratio = max(fruit_eigvalues) / min(fruit_eigvalues)

        if feature_mode == 'hu_plus_ratio':
            self.features = np.append(self.hu_moments, self.moment_ratio)
            self.feature_size = 3
        elif feature_mode == 'hu_only':
            self.features = np.array(self.hu_moments)
            self.feature_size = 2

        if debug:
            print("threshold: \n", self.threshold)
            print("Central moments: \n", fruit_central_moments)
            print("Hu moments:\n", hu_moments)
            print(
                "Normalized Hu moments:\n",
                [-1 * np.sign(j) * np.log10(np.abs(j)) for j in hu_moments[:]])
            print("Inertia tensor eigenvalues:\n", fruit_eigvalues)
            print("Moment ratio:\n", self.moment_ratio)
            print("Features: \n", self.features)
Esempio n. 33
0
    def momentos(self):
        """
            Calcula os 7 momentos de Hu e os momentos raw e centralizados de ordem 1 e 2

        """

        m = measure.moments(self.imagemTonsDeCinza)

        valores_m = [m[p, q] for (p, q) in momentsOrder]
        nomes_m = [
            M + str(p) + str(q)
            for M, (p, q) in zip(['M_'] * len(momentsOrder), momentsOrder)
        ]

        row = m[0, 1] / m[0, 0]
        col = m[1, 0] / m[0, 0]

        mu = measure.moments_central(self.imagemTonsDeCinza, row, col)

        valores_mu = [mu[p, q] for (p, q) in momentsOrder]
        nomes_mu = [
            M + str(p) + str(q)
            for M, (p, q) in zip(['Mu_'] * len(momentsOrder), momentsOrder)
        ]

        nu = measure.moments_normalized(mu)
        hu = measure.moments_hu(nu)

        valores_hu = list(hu)
        nomes_hu = [
            m + n for m, n in zip(['Hu_'] * len(valores_hu),
                                  map(str, range(0, len(valores_hu))))
        ]

        valores = valores_m + valores_mu + valores_hu
        nomes = nomes_m + nomes_mu + nomes_hu

        tipos = [numerico] * len(nomes)

        return nomes, tipos, valores
Esempio n. 34
0
    def momentos_hu(self):
        """
            Calcula os 7 momentos de Hu

        """

        m = measure.moments(self.imagemTonsDeCinza)

        row = m[0, 1] / m[0, 0]
        col = m[1, 0] / m[0, 0]

        mu = measure.moments_central(self.imagemTonsDeCinza,row,col)
        nu = measure.moments_normalized(mu)
        hu = measure.moments_hu(nu)

        valores = list(hu)

        nomes = [m+n for m,n in zip(['hu_'] * len(valores),map(str,range(0,len(valores))))]

        tipos = [numerico] * len(nomes)

        return nomes, tipos, valores
Esempio n. 35
0
    def momentos_hu(self):
        """
            Calcula os 7 momentos de Hu

        """

        m = measure.moments(self.imagemTonsDeCinza)

        row = m[0, 1] / m[0, 0]
        col = m[1, 0] / m[0, 0]

        mu = measure.moments_central(self.imagemTonsDeCinza,row,col)
        nu = measure.moments_normalized(mu)
        hu = measure.moments_hu(nu)

        valores = list(hu)

        nomes = [m+n for m,n in zip(['hu_'] * len(valores),map(str,range(0,len(valores))))]

        tipos = [numerico] * len(nomes)

        return nomes, tipos, valores
Esempio n. 36
0
def test_moments_normalized_invalid():
    with testing.raises(ValueError):
        moments_normalized(np.zeros((3, 3)), 3)
    with testing.raises(ValueError):
        moments_normalized(np.zeros((3, 3)), 4)
 def compute_hu_moments(i):
     b = cells_aligned_padded[i].astype(np.uint8)
     m = moments(b, order=1)
     hu = moments_hu(moments_normalized(moments_central(b, cc=m[0,1]/m[0,0], cr=m[1,0]/m[0,0])))
     return hu