예제 #1
0
    def iter_points(self, y_offset, x_offset):
        '''Iterates over point representations of segmented objects.
        The coordinates of the centroid points are relative to the global map,
        i.e. an offset is added to the image site specific coordinates.

        Parameters
        ----------
        y_offset: int
            global vertical offset that needs to be subtracted from
            *y*-coordinates (*y*-axis is inverted)
        x_offset: int
            global horizontal offset that needs to be added to x-coordinates

        Returns
        -------
        Generator[Tuple[Union[int, shapely.geometry.point.Point]]]
            time point, z-plane, label and point geometry
        '''
        logger.debug('calculate centroids for objects of type "%s"', self.key)
        points = dict()
        for (t, z), plane in self.iter_planes():
            centroids = mh.center_of_mass(plane, labels=plane)
            centroids[:, 1] += x_offset
            centroids[:, 0] += y_offset
            centroids[:, 0] *= -1
            for label in self.labels:
                y = int(centroids[label, 0])
                x = int(centroids[label, 1])
                point = shapely.geometry.Point(x, y)
                yield (t, z, label, point)
예제 #2
0
def center_extent(image, size):
    (eW, eH) = size

    if image.shape[1] > image.shape[0]:
        ratio = eW / image.shape[1]
        image = cv2.resize(image, (eW, int(ratio * image.shape[0])))
    else:
        ratio = eH / image.shape[0]
        #debug
        #print "ratio is",ratio
        image = cv2.resize(image, (int(ratio * image.shape[1]), eH))

    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2

    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image

    CM = mahotas.center_of_mass(extent)
    (cY, cX) = np.round(CM).astype("int32")
    (dx, dy) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dx], [0, 1, dy]])
    extent = cv2.warpAffine(extent, M, size)

    return extent
예제 #3
0
    def extract(self):
        '''Extracts point pattern features.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in
            :attr:`label_image <jtlib.features.PointPattern.label_image>`
        '''

        logger.info('extract point pattern features')
        features = dict()
        for obj in self.parent_object_ids:
            parent_obj_img = self.get_parent_object_mask_image(obj)
            points_img = self.get_points_object_label_image(obj)
            point_ids = np.unique(points_img)[1:]
            mh.labeled.relabel(points_img, inplace=True)

            size = np.sum(parent_obj_img)
            abs_border_dist_img = mh.distance(parent_obj_img).astype(float)
            rel_border_dist_img = abs_border_dist_img / size
            centroids = mh.center_of_mass(points_img, labels=points_img)
            centroids = centroids[1:, :].astype(int)
            abs_distance_matrix = squareform(pdist(centroids))
            rel_distance_matrix = abs_distance_matrix / size

            indexer = np.arange(centroids.shape[0])
            if len(indexer) == 0:
                continue
            if len(indexer) == 1:
                y, x = centroids[0, :]
                values = [
                    abs_border_dist_img[y, x], rel_border_dist_img[y, x],
                    np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
                ]
                features[point_ids[0]] = values
                continue
            for i, (y, x) in enumerate(centroids):
                idx = indexer != i
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nanmin(abs_distance_matrix[i, idx]),
                    np.nanmin(rel_distance_matrix[i, idx]),
                    np.nanmean(abs_distance_matrix[i, idx]),
                    np.nanstd(abs_distance_matrix[i, idx]),
                    np.nanmean(rel_distance_matrix[i, idx]),
                    np.nanstd(rel_distance_matrix[i, idx]),
                ]
                features[point_ids[i]] = values

        ids = features.keys()
        values = list()
        nans = [np.nan for _ in range(len(self.names))]
        for i in self.object_ids:
            if i not in ids:
                logger.warn('values missing for object #%d', i)
                features[i] = nans
            values.append(features[i])
        return pd.DataFrame(values, columns=self.names, index=self.object_ids)
예제 #4
0
def center_extent(image, size):
    (eW, eH) = size

    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)
    else:
        image = imutils.resize(image, height=eH)

    extent = np.zeros((eH, eW), dtype='uint8')
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0], offsetX:offsetX+image.shape[1]] = image

    CM = mahotas.center_of_mass(extent)
    (cY, cX) = np.round(CM).astype("int32")

    # ret, thresh = cv2.threshold(image, 127, 255, 0)
    # contours, hierarchy = cv2.findContours(thresh, 1, 2)

    # cnt = contours[0]
    # M = cv2.moments(cnt)
    # cX = int(M['m10'] / M['m00'])
    # cY = int(M['m01'] / M['m00'])


    (dX, dY) = ((size[0]//2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)

    return extent
예제 #5
0
    def iter_points(self, y_offset, x_offset):
        '''Iterates over point representations of segmented objects.
        The coordinates of the centroid points are relative to the global map,
        i.e. an offset is added to the image site specific coordinates.

        Parameters
        ----------
        y_offset: int
            global vertical offset that needs to be subtracted from
            *y*-coordinates (*y*-axis is inverted)
        x_offset: int
            global horizontal offset that needs to be added to x-coordinates

        Returns
        -------
        Generator[Tuple[Union[int, shapely.geometry.point.Point]]]
            time point, z-plane, label and point geometry
        '''
        logger.debug('calculate centroids for objects of type "%s"', self.key)
        points = dict()
        for (t, z), plane in self.iter_planes():
            centroids = mh.center_of_mass(plane, labels=plane)
            centroids[:, 1] += x_offset
            centroids[:, 0] += y_offset
            centroids[:, 0] *= -1
            for label in self.labels:
                y = int(centroids[label, 0])
                x = int(centroids[label, 1])
                point = shapely.geometry.Point(x, y)
                yield (t, z, label, point)
def center_extent(image, size):
	# Grab the extent width and height
	(w, h) = size

	# When the width is greater than the height
	if image.shape[1] > image.shape[0]:
		image = imutils.resize(image, width=w)
	# When the height is greater than the width
	else:
		image = imutils.resize(image, height=h)

	# Save memory for the extent of the image and grab it
	extent = np.zeros((h, w), dtype="uint8")
	offset_x = (w - image.shape[1]) // 2
	offset_y = (h - image.shape[0]) // 2
	extent[offset_y:offset_y + image.shape[0], offset_x:offset_x + image.shape[1]] = image

	# Compute the center of mass of the image and then move the center of mass to the center of the image
	(c_y, c_x) = np.round(mahotas.center_of_mass(extent)).astype("int32")
	(d_x, d_y) = ((size[0] // 2) - c_x, (size[1] // 2) - c_y)
	matrix = np.float32([[1, 0, d_x], [0, 1, d_y]])
	extent = cv2.warpAffine(extent, matrix, size)

	# Return the extent of the image
	return extent
예제 #7
0
    def _MNIST_preprocess(self, img):
        """
        Applys image processing that mirrors the preprocessing of the
        MNIST dataset.
        Assumes image has been cropped to bounding box.

        Scales image down to 20 x 20 then centered in a 28 x 28 grid
        based on the centre of mass of input image
        """

        # get ratio to scale image down to 20x20
        ratio = min(20./img.shape[0], 20./img.shape[1])
        scaleshape = (img.shape[0] * ratio, img.shape[1] * ratio)
        norm = mh.resize.resize_to(img, scaleshape)
    

        # position center of mass of image in a 28x28 field
        dest = np.zeros((28, 28))
        COM = mh.center_of_mass(norm)
        print COM
        (x, y) = (13.5, 13.5) - COM
        (x, y) = (int(round(x)), int(round(y)))
        try:
            dest[x:x + norm.shape[0], y:y + norm.shape[1]] = norm
        except ValueError:
            dest = None
        return dest
예제 #8
0
    def preprocess(self,image):
        # pegue a extensão da largura e altura
        (eW, eH) = self.size

        # manipular quando a largura for maior que a altura
        if image.shape[1] > image.shape[0]:
            image = imutils.resize(image, width = eW)

        # caso contrário, a altura é maior que a largura
        else:
            image = imutils.resize(image, height = eH)


        extent = np.zeros((eH, eW), dtype = "uint8")
        offsetX = (eW - image.shape[1]) // 2
        offsetY = (eH - image.shape[0]) // 2
        extent[offsetY:offsetY + image.shape[0], offsetX:offsetX + image.shape[1]] = image

        # calcular o centro de massa da imagem e depois
        # mova o centro de massa para o centro da imagem
        (cY, cX) = np.round(mahotas.center_of_mass(extent)).astype("int32")
        (dX, dY) = ((self.size[0] // 2) - cX, (self.size[1] // 2) - cY)
        M = np.float32([[1, 0, dX], [0, 1, dY]])
        extent = cv2.warpAffine(extent, M, self.size)

        # retornar a extensão da imagem
        return extent
예제 #9
0
def center_extent(image, size):
    (eW, eH) = size
    print("eW, eH = %s" % str(size))
    print("image.shape=%s" % str(image.shape))

    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)
    else:
        image = imutils.resize(image, height=eH)

    print("resize image.shape=%s" % str(image.shape))
    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image
    print("offsetX, offsetY = (%s, %s)" % (offsetX, offsetY))
    cv2.imshow("extent", extent)

    CM = mahotas.center_of_mass(extent)
    (cY, cX) = np.round(CM).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)
    print("CM=%s" % CM)
    print("(cY, cX) = (%s, %s)" % (cY, cX))
    print("(dX, dY) = (%s, %s)" % (dX, dY))
    print("M=%s" % M)
    cv2.imshow("extent1", extent)
    return extent
예제 #10
0
def center_extent(image, size):
    # grab the extent width and height
    (eW, eH) = size

    # handle when the width is greater than the height
    if image.shape[1] > image.shape[0]:
        image = resize(image, width=eW)

    # otherwise, the height is greater than the width
    else:
        image = resize(image, height=eH)

    # allocate memory for the extent of the image and
    # grab it
    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image

    # compute the center of mass of the image and then
    # move the center of mass to the center of the image
    (cY, cX) = np.round(mahotas.center_of_mass(extent)).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)

    # return the extent of the image
    return extent
예제 #11
0
def center_extent(image, size):
    (eH, eW) = size

    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)

    else:
        image = imutils.resize(image, height=eH)

    # allocate memory for extent of the image and grab it
    extent = np.zeros((eH, eW), dtype='uint8')
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image

    # compute the center of mass of the image and then
    # move the center of mass to the center of the image
    (cX, cY) = np.round(mahotas.center_of_mass(extent)).astype("int32")
    (dX, dY) = ((size[0] / 2) - cX, (size[1] / 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)

    # return the extent of the image
    return extent
예제 #12
0
    def analyze_edu_hist_eps(self, file, dapi_coords, checked):
        """
        Calculates the number of counted cells and their coordinates with histogram
        equalization and Gaussian filter preprocessing and epsilon quality control.

        Parameters
        ----------
        file : str
            The path to the image.
        dapi_coords : list
            Coordinates of all the cell "centers" in the DAPI channel. Used as a reference.
        checked : list
            Keeps track of which cells have already been counted in other channels.

        Returns
        -------
        list
            Coordinates of all the cell "centers" in the EdU channel.
        int
            The number of cells counted in the image.
        list
            Keeps track of which cells have already been counted in other channels.
        """

        img = mh.imread(file)
        imgg = mh.colors.rgb2gray(img)
        imgg = eq.hist_eq(imgg)
        imggf = mh.gaussian_filter(imgg,15.3).astype(np.uint8)
        rmax = mh.regmax(imggf)
        edu_seeds, edu_nuclei = mh.label(rmax)
        edu_coords = mh.center_of_mass(imgg,labels=edu_seeds)
        count, checked = self.epsilon(edu_coords,dapi_coords,checked)
        return edu_coords, count, checked
def center_extent(image, size):
    (eW, eH) = size

    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)
    else:
        image = imutils.resize(image, height=eH)

    extent = np.zeros((eH, eW), dtype="uint8")

    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image
    #try to visualize what's happening, its simple

    CM = mahotas.center_of_mass(
        extent)  #weighted mean of white pixels in image
    (cY, cX) = np.round(CM).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    #M = np.float32([[1, 0, dX], [0, 1, dY]])
    #extent = cv2.warpAffine(extent, M, size)
    extent = imutils.translate(extent, dX, dY)
    #the above two lines are nothing but translation
    return extent
예제 #14
0
    def analyze_edu(self, file):
        """
        Calculates the number of counted cells and their coordinates with Gaussian
        filter preprocessing.

        Parameters
        ----------
        file : str
            The path to the image.

        Returns
        -------
        int
            The number of cells counted in the image.
        list
            Coordinates of all the cell "centers" in the EdU channel.
        """

        img = mh.imread(file)
        imgg = mh.colors.rgb2gray(img)
        imggf = mh.gaussian_filter(imgg,11).astype(np.uint8)
        rmax = mh.regmax(imggf)
        edu_seeds, edu_nuclei = mh.label(rmax)
        edu_coords = mh.center_of_mass(imgg,labels=edu_seeds)
        return edu_nuclei,edu_coords
예제 #15
0
def center_extent(image, size):
    # grab the extent width and height
    (eW, eH) = size

    # handle when the width is greater than the height
    if image.shape[1] > image.shape[0]:
        image = resize(image, width=eW)

    # otherwise, the height is greater than the width
    else:
        image = resize(image, height=eH)

    # allocate memory for the extent of the image and
    # grab it
    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image

    # compute the center of mass of the image and then
    # move the center of mass to the center of the image
    (cY, cX) = np.round(mahotas.center_of_mass(extent)).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)

    # return the extent of the image
    return extent
예제 #16
0
    def analyze_dapi_hist(self, file):
        """
        Calculates the number of counted cells and their coordinates with histogram
        equalization and Gaussian filter preprocessing.

        Parameters
        ----------
        file : str
            The path to the image.

        Returns
        -------
        list
            The coordinates of all the cell "centers."
        int
            The number of cells counted in the image.
        """

        img = mh.imread(file)
        imgg = mh.colors.rgb2gray(img)
        imgg = eq.hist_eq(imgg)
        imggf = mh.gaussian_filter(imgg, 7.5).astype(np.uint8)
        rmax = mh.regmax(imggf)
        dapi_seeds, dapi_nuclei = mh.label(rmax)
        dapi_coords = mh.center_of_mass(imgg, labels=dapi_seeds)
        return dapi_coords, dapi_nuclei
예제 #17
0
def get_border_center(border, border_yx):
    node = mh.center_of_mass(border)
    nodes = border_yx
    nodes = np.asarray(nodes)
    deltas = nodes - node
    dist_2 = np.einsum('ij,ij->i', deltas, deltas)
    border_center = border_yx[np.argmin(dist_2)]

    return border_center
예제 #18
0
def get_tree_center(seg, labels, mask=npy.empty([1])):
    if npy.size(mask) == 1:
        mask = npy.ones_like(labels)
    center = mh.center_of_mass(mask * seg, labels=mask * labels)
    center = center[~npy.isnan(center).any(
        axis=1)]  #delete line with nan value.

    center = center[center[:, 1].argsort()]

    kd = ssp.KDTree(center)
    return center, kd
 def centerOfMass(self):
     # Returns the center of mass of img.
     # http://mahotas.readthedocs.io/en/latest/api.html#mahotas.center_of_mass
     # mahotas.center_of_mass(img, labels=None)
     # If labels is given, then it returns L centers of mass,
     # one for each region identified by labels (including region 0).
     # Return: The exact shape of the output
     # depends on whether the labels argument was used.
     # If labels is None, then the return value
     # is a 1-ndarray of coordinates (size = len(img.shape));
     # otherwise, the return value is a 2-ndarray of coordinates
     # (shape = (labels.max()+1, len(img.shape)).
     return mh.center_of_mass(self.imgArray)
예제 #20
0
def peppers():
    # This last image is the peppers.png file
    my_image = mh.imread(filename3)
    T = mh.otsu(my_image)
    b_image = (my_image > T)
    g_image = mh.gaussian_filter(b_image, 15)
    rmax = mh.regmax(g_image)
    labeled, nr_objects = mh.label(rmax)
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The peppers.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]"               %(o, round(center[1], 0), round(center[0], 0))
        o = o + 1
예제 #21
0
def peppers():
    # This last image is the peppers.png file
    my_image = mh.imread(filename3)
    T = mh.otsu(my_image)
    b_image = (my_image > T)
    g_image = mh.gaussian_filter(b_image, 15)
    rmax = mh.regmax(g_image)
    labeled, nr_objects = mh.label(rmax)
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The peppers.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]" % (o, round(
            center[1], 0), round(center[0], 0))
        o = o + 1
예제 #22
0
 def get_com(self, scan_filter=(10, 10)):
     """
     Calculates center of mass of particle using regional maxima calculated over the entire matrix
     
     Parameters
     -----------
     scan_filter : int
         size of a weighted square region for regional maxima identification
     """
     self.maxes = mh.regmax(self.image, Bc=np.ones(scan_filter)).astype(int)
     self.spots, n_spots = mh.label(self.maxes, Bc=np.ones(scan_filter))
     com = mh.center_of_mass(self.image, self.spots)
     plt.imshow(self.spots)
     self.com = com
     return
예제 #23
0
파일: imcore.py 프로젝트: thanasi/imaging
def eccentricity(im):
    ''' im should be a binary image with a single region '''
    
    c = mahotas.center_of_mass(im)
    
    m11 = mahotas.moments(im, 1, 1, c)
    m02 = mahotas.moments(im, 2, 0, c)
    m20 = mahotas.moments(im, 0, 2, c)
    
    l1 = (m20 + m02) / 2 + np.sqrt(4*m11**2 + (m20-m02)**2) / 2
    
    l2 = (m20 + m02) / 2 - np.sqrt(4*m11**2 + (m20-m02)**2) / 2
    
    e = np.sqrt(1 - l2 / l1)
    
    return e
def center_extent(image, size):
    (eW, eH) = size
    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=eW)
    else:
        image = imutils.resize(image, height=eH)
    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0],
           offsetX:offsetX + image.shape[1]] = image
    CM = mahotas.center_of_mass(extent)
    (cX, cY) = np.round(CM).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    extent = cv2.warpAffine(extent, M, size)
    return extent
예제 #25
0
def mass_center(img, dim):
	(width, height) = dim
	if img.shape[1] > img.shape[0]:
		img = imt.resize(img, width=width)
	else:
		img = imt.resize(img, height=height)

	output = np.zeros((height, width), dtype="uint8")
	dist_x = (width - img.shape[1]) // 2
	dist_y = (height - img.shape[0]) // 2
	output[dist_y:dist_y + img.shape[0], dist_x:dist_x + img.shape[1]] = img

	(cen_y, cen_x) = np.round(ms.center_of_mass(output)).astype("int32")
	(cor_x, cor_y) = ((dim[0] // 2) - cen_x, (dim[1] // 2) - cen_y)
	tmp = np.float32([[1, 0, cor_x], [0, 1, cor_y]])
	output = cv2.warpAffine(output, tmp, dim)
	return output
예제 #26
0
def objects():
    # The second image is the objects.png file
    #import image from file
    my_image = mh.imread(filename2)
    #use the mean to form a binary image
    b_image = (my_image > my_image.mean())
    #use gaussian filter
    g_image = mh.gaussian_filter(b_image, 1.5)
    #count the number of objects in the picture
    labeled, nr_objects = mh.label(g_image)
    #find center point for each object
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The objects.png contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]"               %(o, round(center[1], 0), round(center[0], 0))
        o = o + 1
예제 #27
0
파일: shape.py 프로젝트: ericjster/mahotas
def eccentricity(bwimage):
    """
    ecc = eccentricity(bwimage)

    Compute eccentricity

    Parameters
    ----------
    bwimage : ndarray
        Interpreted as a boolean image

    Returns
    -------
    r : float
        Eccentricity measure
    """
    from .moments import moments
    bwimage = _make_binary(bwimage)

    area = np.sum(bwimage)
    if area == 0:
        return 0.
    hull = np.sum(bwperim(bwimage))

    cof = mh.center_of_mass(bwimage)
    hull_mu00 = moments(bwimage, 0, 0, cof)
    hull_mu11 = moments(bwimage, 1, 1, cof)
    hull_mu02 = moments(bwimage, 0, 2, cof)
    hull_mu20 = moments(bwimage, 2, 0, cof)

    # Parameters of the 'image ellipse'
    #   (the constant intensity ellipse with the same mass and
    #   second order moments as the original image.)
    #   From Prokop, RJ, and Reeves, AP.  1992. CVGIP: Graphical
    #   Models and Image Processing 54(5):438-460
    semimajor = np.sqrt((2 * (hull_mu20 + hull_mu02 + \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)

    semiminor = np.sqrt((2 * (hull_mu20 + hull_mu02 - \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)

    if semimajor == 0.:
        return 0.
    return  np.sqrt(semimajor**2 - semiminor**2) / semimajor
예제 #28
0
def objects():
    # The second image is the objects.png file
    #import image from file
    my_image = mh.imread(filename2)
    #use the mean to form a binary image
    b_image = (my_image > my_image.mean())
    #use gaussian filter
    g_image = mh.gaussian_filter(b_image, 1.5)
    #count the number of objects in the picture
    labeled, nr_objects = mh.label(g_image)
    #find center point for each object
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The objects.png contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: [ %s, %s ]" % (o, round(
            center[1], 0), round(center[0], 0))
        o = o + 1
예제 #29
0
def ellipse_axes(bwimage):
    ''' Parameters of the 'image ellipse'

    semimajor,semiminor = ellipse_axes(bwimage)

    Returns the parameters of the constant intensity ellipse with the same mass
    and second order moments as the original image.

    Parameters
    ----------
    bwimage : ndarray
        Interpreted as a boolean image

    Returns
    -------
    semimajor : float
    semiminor : float

    References
    ----------
    Prokop, RJ, and Reeves, AP.  1992. CVGIP: Graphical Models and Image
    Processing 54(5):438-460

    '''
    from .moments import moments
    bwimage = _make_binary(bwimage)

    if not np.any(bwimage):
        return 0., 0.

    cof = mh.center_of_mass(bwimage)
    hull_mu00 = moments(bwimage, 0, 0, cof)
    hull_mu11 = moments(bwimage, 1, 1, cof)
    hull_mu02 = moments(bwimage, 0, 2, cof)
    hull_mu20 = moments(bwimage, 2, 0, cof)

    semimajor = np.sqrt((2 * (hull_mu20 + hull_mu02 + \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)

    semiminor = np.sqrt((2 * (hull_mu20 + hull_mu02 - \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)
    return semimajor, semiminor
예제 #30
0
def eccentricity(bwimage):
    """
    ecc = eccentricity(bwimage)

    Compute eccentricity

    Parameters
    ----------
    bwimage : ndarray
        Interpreted as a boolean image

    Returns
    -------
    r : float
        Eccentricity measure
    """
    from .moments import moments
    bwimage = _make_binary(bwimage)

    if not np.any(bwimage):
        return 0

    cof = mh.center_of_mass(bwimage)
    hull_mu00 = moments(bwimage, 0, 0, cof)
    hull_mu11 = moments(bwimage, 1, 1, cof)
    hull_mu02 = moments(bwimage, 0, 2, cof)
    hull_mu20 = moments(bwimage, 2, 0, cof)

    # Parameters of the 'image ellipse'
    #   (the constant intensity ellipse with the same mass and
    #   second order moments as the original image.)
    #   From Prokop, RJ, and Reeves, AP.  1992. CVGIP: Graphical
    #   Models and Image Processing 54(5):438-460
    semimajor = np.sqrt((2 * (hull_mu20 + hull_mu02 + \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)

    semiminor = np.sqrt((2 * (hull_mu20 + hull_mu02 - \
                    np.sqrt((hull_mu20 - hull_mu02)**2 + \
                    4 * hull_mu11**2)))/hull_mu00)

    if semimajor == 0.:
        return 0.
    return  np.sqrt(semimajor**2 - semiminor**2) / semimajor
예제 #31
0
    def extract(self):
        '''Extracts morphology features to measure the size and shape of objects.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in `label_image`
        '''
        logger.info('extract morphology features')
        features = list()
        cm = mh.center_of_mass(img=self.label_image > 0,labels=self.label_image)
        for obj in self.object_ids:
            mask = self.get_object_mask_image(obj)
            local_centroid_x = cm[obj][1]
            local_centroid_y = cm[obj][0]
            area = np.float64(np.count_nonzero(mask))
            perimeter = mh.labeled.perimeter(mask)
            if perimeter == 0:
                circularity = np.nan
            else:
                circularity = (4.0 * np.pi * area) / (perimeter**2)
            convex_hull = mh.polygon.fill_convexhull(mask)
            area_convex_hull = np.count_nonzero(convex_hull)
            convexity = area / area_convex_hull
            eccentricity = mh.features.eccentricity(mask)
            major_axis, minor_axis = mh.features.ellipse_axes(mask)
            if major_axis == 0:
                elongation = np.nan
            else:
                elongation = (major_axis - minor_axis) / major_axis
            values = [
                local_centroid_x, local_centroid_y, area, eccentricity, convexity, circularity, perimeter,
                elongation
            ]
            if self.compute_zernike:
                logger.debug('extract Zernike moments for object #%d', obj)
                r = 100
                mask_rs = mh.imresize(mask, [r*2, r*2])
                zernike_values = mh.features.zernike_moments(
                    mask_rs, degree=self._degree, radius=r
                )
                values.extend(zernike_values)
            features.append(values)
        return pd.DataFrame(features, columns=self.names, index=self.object_ids)
예제 #32
0
def _calc_orientation(img):
    com = mh.center_of_mass(img)

    mu00 = mh.moments(img, 0, 0, com)
    mu11 = mh.moments(img, 1, 1, com)
    mu20 = mh.moments(img, 2, 0, com)
    mu02 = mh.moments(img, 0, 2, com)

    mup_20 = mu20 / mu00
    mup_02 = mu02 / mu00
    mup_11 = mu11 / mu00

    theta_rad = 0.5 * math.atan(2 * mup_11 /
                                (mup_20 - mup_02))  # todo math -> numpy
    theta = theta_rad * (180 / math.pi)
    if (mup_20 - mup_02) > 0:
        theta += 90

    return theta
예제 #33
0
def find_peaks(image,
               min_distance=1,
               threshold_abs=None,
               threshold_rel=None,
               exclude_border=True,
               num_peaks=np.inf,
               footprint=None,
               Bc='square',
               max_size=None,
               verbose=True):

    bin_im = peak_local_max(image,
                            min_distance=min_distance,
                            threshold_abs=threshold_abs,
                            threshold_rel=threshold_rel,
                            exclude_border=exclude_border,
                            num_peaks=num_peaks,
                            footprint=footprint,
                            indices=False)

    struct_elems = {
        'cross': np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]),
        'square': np.ones((3, 3))
    }
    if type(Bc) == str:
        Bc = struct_elems[Bc]
    elif type(Bc) == np.ndarray:
        pass
    else:
        raise TypeError('Invalid structuring element Bc')

    labeled, n = mh.label(bin_im, Bc=Bc)
    l, n1 = mh.labeled.filter_labeled(labeled, max_size=1)
    if verbose and n - n1 != 0:
        print('Found {} flat peaks with size > 1'.format(n - n1))

    if max_size:
        labeled, n = mh.labeled.filter_labeled(labeled,
                                               max_size=max_size,
                                               remove_bordering=exclude_border)

    return mh.center_of_mass(bin_im, labeled)[1:]
예제 #34
0
def imgcentmoments(img,x,y,cofy=None,cofx=None):
    """
    M_xy = imgcentmoments(img,x,y, cofy=None, cofx=None)

    @param cofy and cofx are optional and computed from the image if not given
    """
    if cofy is None or cofx is None:
        print 'calling center_of_mass'
        cofy, cofx = center_of_mass(img)
    if not np.issubdtype(img.dtype, float):
        img = img.astype(float)
    r,c = img.shape
    p = np.arange(c)
    p -= cofx
    p **= x
    inter = np.dot(img, p)
    p = np.arange(r)
    p -= cofy
    p **= y
    return np.dot(inter, p)
예제 #35
0
def circles():
    # Image 1 is the circles.png file
    my_image = mh.imread(filename)
    # Threshold using the Riddler-Calvard method
    # More on Riddler-Calvard: http://mahotas.readthedocs.org/en/latest/thresholding.html
    thres = mh.rc(my_image)
    #use the value to form a binary image
    b_image = (my_image > thres)
    #use gaussian filter
    g_image = mh.gaussian_filter(b_image, 33)
    #separate objects stuck together
    rmax = mh.regmax(g_image)
    #count the number of objects in the picture
    labeled, nr_objects = mh.label(rmax)
    #find center point for each object
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The circles.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: %s" % (o, center)
        o = o + 1
예제 #36
0
def circles():
    # Image 1 is the circles.png file
    my_image = mh.imread(filename)
    # Threshold using the Riddler-Calvard method 
    # More on Riddler-Calvard: http://mahotas.readthedocs.org/en/latest/thresholding.html
    thres = mh.rc(my_image)
    #use the value to form a binary image
    b_image = (my_image > thres)
    #use gaussian filter
    g_image = mh.gaussian_filter(b_image, 33)
    #separate objects stuck together
    rmax = mh.regmax(g_image)
    #count the number of objects in the picture
    labeled, nr_objects = mh.label(rmax)
    #find center point for each object
    centers = mh.center_of_mass(my_image, labeled)[1:]
    print "The circles.png file contains ", nr_objects, " objects."
    o = 1
    for center in centers:
        print "Object %s center: %s" %(o, center)
        o = o + 1
예제 #37
0
def center_extent(image, size):
	(eW, eH) = size

	if image.shape[1] > image.shape[0]:
		image = imutils.resize(image, width = eW)

	else:
		image = imutils.resize(image, height = eH)

	extent = np.zeros((eH, eW), dtype='uint8')
	offsetX = (eW - image.shape[1]) // 2
	offsetY = (eH - image.shape[0]) // 2
	extent[offsetY:offsetY + image.shape[0],
	offsetX:offsetX+image.shape[1]] = image

	CM = mahotas.center_of_mass(extent)
	(cY, cX) = np.round(CM).astype('int32')
	(dX, dY) = ((size[0]//2)-cX, (size[1]//2)-cY)
	M = np.float32([[1, 0, dX], [0, 1, dY]])
	extent = cv2.warpAffine(extent, M, size)

	return extent
def center_extent(image, size):
    (eW, eH) = size

    # 如果宽度》高度
    if image.shape[1] > image.shape[0]:
        image = resize(image, width=eW)
    else:
        image = resize(image, height=eH)

    extent = np.zeros((eH, eW), dtype="uint8")
    offsetX = (eW - image.shape[1]) // 2
    offsetY = (eH - image.shape[0]) // 2
    extent[offsetY:offsetY + image.shape[0], offsetX:offsetX + image.shape[1]] = image

    # 计算图片的质量中心
    (cY, cX) = np.round(mahotas.center_of_mass(extent)).astype("int32")
    (dX, dY) = ((size[0] // 2) - cX, (size[1] // 2) - cY)
    M = np.float32([[1, 0, dX], [0, 1, dY]])
    # 把质量中心移动到图片的中心
    extent = cv2.warpAffine(extent, M, size)

    # return the extent of the image
    return extent
def center_extent(image, size):
    (e_w, e_h) = size

    if image.shape[1] > image.shape[0]:
        image = imutils.resize(image, width=e_w)
    else:
        image = imutils.resize(image, height=e_h)

    extent = np.zeros((e_h, e_w), dtype='uint8')

    offset_x = (e_w - image.shape[1]) // 2
    offset_y = (e_h - image.shape[0]) // 2
    extent[offset_y:offset_y + image.shape[0],
           offset_x:offset_x + image.shape[1]] = image

    cm = mahotas.center_of_mass(extent)
    (c_y, c_x) = np.round(cm).astype('int32')
    (d_x, d_y) = ((size[0] // 2) - c_x, (size[1] // 2) - c_y)

    M = np.float32([[1, 0, d_x], [0, 1, d_y]])
    extent = cv2.warpAffine(extent, M, size)

    return extent
예제 #40
0
    def get_position(self, n_pix=9):
        """
        Gets particle position and size from watershed analysis

        Parameters
        ----------
        n_pix : float or int
            number of pixels in square array for peak labeling
        """

        self.n_pix = n_pix
        Bc1 = np.ones((self.n_pix, self.n_pix))
        self.seeds, self.n_seeds = mh.label(self.areas, Bc=Bc1)
        self.locg = mh.center_of_mass(self.original_image, self.seeds)
        self.locg = self.locg.astype(int)
        sg = mh.labeled.labeled_size(self.seeds)
        particle_radius_mean = np.sqrt(np.mean(sg[1:]) / np.pi) *\
            self.pix_to_micron
        sg = np.sqrt(sg[1:] / np.pi) * self.pix_to_micron
        pr_med = np.median(sg)
        pr_std = np.sqrt(np.std(sg[1:]) / np.pi) * self.pix_to_micron
        self.sg = sg
        return pr_med, particle_radius_mean, pr_std, sg
예제 #41
0
    def run_on_objects(self, object_name, workspace):
        """Run, computing the area measurements for a single map of objects"""
        objects = workspace.get_objects(object_name)

        if len(objects.shape) is 2:
            #
            # Do the ellipse-related measurements
            #
            i, j, l = objects.ijv.transpose()
            centers, eccentricity, major_axis_length, minor_axis_length, \
            theta, compactness = \
                ellipse_from_second_moments_ijv(i, j, 1, l, objects.indices, True)
            del i
            del j
            del l
            self.record_measurement(workspace, object_name,
                                    F_ECCENTRICITY, eccentricity)
            self.record_measurement(workspace, object_name,
                                    F_MAJOR_AXIS_LENGTH, major_axis_length)
            self.record_measurement(workspace, object_name,
                                    F_MINOR_AXIS_LENGTH, minor_axis_length)
            self.record_measurement(workspace, object_name, F_ORIENTATION,
                                    theta * 180 / np.pi)
            self.record_measurement(workspace, object_name, F_COMPACTNESS,
                                    compactness)
            is_first = False
            if len(objects.indices) == 0:
                nobjects = 0
            else:
                nobjects = np.max(objects.indices)
            mcenter_x = np.zeros(nobjects)
            mcenter_y = np.zeros(nobjects)
            mextent = np.zeros(nobjects)
            mperimeters = np.zeros(nobjects)
            msolidity = np.zeros(nobjects)
            euler = np.zeros(nobjects)
            max_radius = np.zeros(nobjects)
            median_radius = np.zeros(nobjects)
            mean_radius = np.zeros(nobjects)
            min_feret_diameter = np.zeros(nobjects)
            max_feret_diameter = np.zeros(nobjects)
            zernike_numbers = self.get_zernike_numbers()
            zf = {}
            for n, m in zernike_numbers:
                zf[(n, m)] = np.zeros(nobjects)
            if nobjects > 0:
                chulls, chull_counts = convex_hull_ijv(objects.ijv, objects.indices)
                for labels, indices in objects.get_labels():
                    to_indices = indices - 1
                    distances = distance_to_edge(labels)
                    mcenter_y[to_indices], mcenter_x[to_indices] = \
                        maximum_position_of_labels(distances, labels, indices)
                    max_radius[to_indices] = fix(scind.maximum(
                            distances, labels, indices))
                    mean_radius[to_indices] = fix(scind.mean(
                            distances, labels, indices))
                    median_radius[to_indices] = median_of_labels(
                            distances, labels, indices)
                    #
                    # The extent (area / bounding box area)
                    #
                    mextent[to_indices] = calculate_extents(labels, indices)
                    #
                    # The perimeter distance
                    #
                    mperimeters[to_indices] = calculate_perimeters(labels, indices)
                    #
                    # Solidity
                    #
                    msolidity[to_indices] = calculate_solidity(labels, indices)
                    #
                    # Euler number
                    #
                    euler[to_indices] = euler_number(labels, indices)
                    #
                    # Zernike features
                    #
                    zf_l = cpmz.zernike(zernike_numbers, labels, indices)
                    for (n, m), z in zip(zernike_numbers, zf_l.transpose()):
                        zf[(n, m)][to_indices] = z
                #
                # Form factor
                #
                ff = 4.0 * np.pi * objects.areas / mperimeters ** 2
                #
                # Feret diameter
                #
                min_feret_diameter, max_feret_diameter = \
                    feret_diameter(chulls, chull_counts, objects.indices)

            else:
                ff = np.zeros(0)

            for f, m in ([(F_AREA, objects.areas),
                          (F_CENTER_X, mcenter_x),
                          (F_CENTER_Y, mcenter_y),
                          (F_CENTER_Z, np.ones_like(mcenter_x)),
                          (F_EXTENT, mextent),
                          (F_PERIMETER, mperimeters),
                          (F_SOLIDITY, msolidity),
                          (F_FORM_FACTOR, ff),
                          (F_EULER_NUMBER, euler),
                          (F_MAXIMUM_RADIUS, max_radius),
                          (F_MEAN_RADIUS, mean_radius),
                          (F_MEDIAN_RADIUS, median_radius),
                          (F_MIN_FERET_DIAMETER, min_feret_diameter),
                          (F_MAX_FERET_DIAMETER, max_feret_diameter)] +
                             [(self.get_zernike_name((n, m)), zf[(n, m)])
                              for n, m in zernike_numbers]):
                self.record_measurement(workspace, object_name, f, m)
        else:
            labels = objects.segmented

            props = skimage.measure.regionprops(labels)

            # Area
            areas = [prop.area for prop in props]

            self.record_measurement(workspace, object_name, F_AREA, areas)

            # Extent
            extents = [prop.extent for prop in props]

            self.record_measurement(workspace, object_name, F_EXTENT, extents)

            # Centers of mass
            import mahotas

            if objects.has_parent_image:
                image = objects.parent_image

                data = image.pixel_data

                spacing = image.spacing
            else:
                data = np.ones_like(labels)

                spacing = (1.0, 1.0, 1.0)

            centers = mahotas.center_of_mass(data, labels=labels)

            if np.any(labels == 0):
                # Remove the 0-label center of mass
                centers = centers[1:]

            center_z, center_x, center_y = centers.transpose()

            self.record_measurement(workspace, object_name, F_CENTER_X, center_x)

            self.record_measurement(workspace, object_name, F_CENTER_Y, center_y)

            self.record_measurement(workspace, object_name, F_CENTER_Z, center_z)

            # Perimeters
            perimeters = []

            for label in np.unique(labels):
                if label == 0:
                    continue

                volume = np.zeros_like(labels, dtype='bool')

                volume[labels == label] = True

                verts, faces = skimage.measure.marching_cubes(
                    volume,
                    spacing=spacing,
                    level=0
                )

                perimeters += [skimage.measure.mesh_surface_area(verts, faces)]

            if len(perimeters) == 0:
                self.record_measurement(workspace, object_name, F_PERIMETER, [0])
            else:
                self.record_measurement(workspace, object_name, F_PERIMETER, perimeters)
예제 #42
0
 def f(im):
     return moments(im, p0, p1, cm=mh.center_of_mass(im), normalize=1)
예제 #43
0
    def extract(self):
        '''Extracts point pattern features.

        Returns
        -------
        pandas.DataFrame
            extracted feature values for each object in
            :attr:`label_image <jtlib.features.PointPattern.label_image>`
        '''

        logger.info('extract point pattern features')
        features = dict()
        for obj in self.parent_object_ids:
            parent_obj_img = self.get_parent_object_mask_image(obj)
            points_img = self.get_points_object_label_image(obj)
            point_ids = np.unique(points_img)[1:]
            mh.labeled.relabel(points_img, inplace=True)

            size = np.sum(parent_obj_img)
            abs_border_dist_img = mh.distance(parent_obj_img).astype(float)
            rel_border_dist_img = abs_border_dist_img / size
            centroids = mh.center_of_mass(points_img, labels=points_img)
            centroids = centroids[1:, :].astype(int)

            indexer = np.arange(centroids.shape[0])
            if len(indexer) == 0:
                continue
            if len(indexer) == 1:
                y, x = centroids[0, :]
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan,
                    np.nan
                ]
                features[point_ids[0]] = values
                continue
            for i, c in enumerate(centroids):
                abs_distances = cdist([c], centroids)[0, :]
                rel_distances = abs_distances / size
                idx = indexer != i
                y, x = c
                values = [
                    abs_border_dist_img[y, x],
                    rel_border_dist_img[y, x],
                    np.nanmin(abs_distances[idx]),
                    np.nanmin(rel_distances[idx]),
                    np.nanmean(abs_distances[idx]),
                    np.nanmean(rel_distances[idx]),
                    np.nanstd(abs_distances[idx]),
                    np.nanstd(rel_distances[idx]),
                ]
                features[point_ids[i]] = values

        ids = features.keys()
        values = list()
        nans = [np.nan for _ in range(len(self.names))]
        for i in self.object_ids:
            if i not in ids:
                logger.warn('values missing for object #%d', i)
                features[i] = nans
            values.append(features[i])
        return pd.DataFrame(values, columns=self.names, index=self.object_ids)
예제 #44
0
파일: hw8.py 프로젝트: haobruce/CUNY
import scipy.ndimage as ndimage
import mahotas as mh


# circles.png img
img = imread('C:\\Users\\bhao\\Google Drive\\CUNY\\git\\DATA602\\circles.png')
imgf = ndimage.gaussian_filter(img, 16)
thres = imgf > np.percentile(imgf, [90])  # 90th percentile needed to separate into 5 objects

# count and label objects
labeled, nr_objects = mh.label(thres)
plt.imshow(labeled)
plt.show()

# find coordinates for centers of mass
com = mh.center_of_mass(thres, labeled)
plt.plot(com[:, 1], com[:, 0], 'r.')
plt.xlim([0, img.shape[1]])
plt.ylim([img.shape[0], 0])
plt.imshow(thres)
plt.show()

print('Circles.png:')
print('Number of objects:    %d') % nr_objects
print('Coordinates of object centers:')
print(com)


# objects.png img
img = imread('C:\\Users\\bhao\\Google Drive\\CUNY\\git\\DATA602\\objects.png')
imgf = ndimage.gaussian_filter(img, 3)
def compute_RMS_adjacent_frames(frame_1_fname, frame_2_fname, segments_1, segments_2):

    rms_error_2D = 0

    # original frames
    frame_1 = np.asarray(frame_1_fname, dtype=float)
    frame_2 = np.asarray(frame_2_fname, dtype=float)

    # separate RGB components of the frame (t)
    arr_R_1 = frame_1[:, :, 0]
    arr_G_1 = frame_1[:, :, 1]
    arr_B_1 = frame_1[:, :, 2]

    # separate RGB components of the frame (t+1)
    arr_R_2 = frame_2[:, :, 0]
    arr_G_2 = frame_2[:, :, 1]
    arr_B_2 = frame_2[:, :, 2]

    # get frame size
    height = segments_1.shape[0]
    width = segments_1.shape[1]

    # for labels presented in segmentation results
    for lb in set(np.unique(segments_1)).difference((0,)):

        #    arr_segment_mask = (segments_1 == lb)
        #    fn = str(lb)
        #    plot_array('Segment ' + fn, arr_segment_mask, 0, 1, 'segment-' + fn)

        # compute 2D and 3D RMS errors for one chosen label
        if lb == 221:

            # print 'label = ', lb

            # get masks for segments (t) and (t+1)
            arr_segment_mask_1 = segments_1 == lb
            arr_segment_mask_2 = segments_2 == lb

            fname = str(lb)
            #      plot_array('Mask (t) ' + fname, arr_segment_mask_1, 0, 1, 'mask-t-' + fname)
            #      plot_array('Mask (t+1) ' + fname, arr_segment_mask_2, 0, 1, 'mask-t2-' + fname)

            # calculate the center of masses for segments (t) and (t+1)
            y0, x0 = mahotas.center_of_mass(arr_segment_mask_1, labels=None)
            y1, x1 = mahotas.center_of_mass(arr_segment_mask_2, labels=None)

            y0 = round(y0)
            x0 = round(x0)

            y1 = round(y1)
            x1 = round(x1)

            # print 'y0 = ', y0, ' x0 = ', x0
            # print 'y1 = ', y1, ' x1 = ', x1

            delta_x = x1 - x0
            delta_y = y1 - y0

            # print 'delta_x = ', delta_x
            # print 'delta_y = ', delta_y

            # an array for a segment (t) shifted to the center of mass of a segment (t+1)
            arr_segment_mask_1_shift = np.zeros([height, width])

            # color data of the segment (t+1)
            arr_R_seg_2 = np.zeros([height, width])
            arr_G_seg_2 = np.zeros([height, width])
            arr_B_seg_2 = np.zeros([height, width])

            # color data of the segment (t) shifted to (t+1)
            arr_R_seg_1 = np.zeros([height, width])
            arr_G_seg_1 = np.zeros([height, width])
            arr_B_seg_1 = np.zeros([height, width])

            # shift the segment (t) to the center of mass of the segment (t+1)
            for i in range(height):
                for j in range(width):

                    cur_i = i + delta_y
                    cur_j = j + delta_x

                    if cur_i >= 0 and cur_i < height and cur_j >= 0 and cur_j < width:
                        arr_segment_mask_1_shift[cur_i][cur_j] = arr_segment_mask_1[i][j]
                        arr_R_seg_1[cur_i][cur_j] = arr_R_1[i][j]
                        arr_G_seg_1[cur_i][cur_j] = arr_G_1[i][j]
                        arr_B_seg_1[cur_i][cur_j] = arr_B_1[i][j]

            # compute overlap of segments (t) and (t+1)
            segments_overlap = arr_segment_mask_1_shift * arr_segment_mask_2
            #      plot_array('Segments overlap 2D ' + fname, segments_overlap, 0, 1, 'overlap-2D-' + fname)

            segments_error = arr_segment_mask_1_shift - segments_overlap
            #      plot_array('Segments error 2D ' + fname, segments_error, 0, 1, 'error-2D-' + fname)

            # color data of the segment (t) shifted to the center of mass of the segment (t+1)
            arr_R_seg_1 = arr_R_seg_1 * segments_overlap
            arr_G_seg_1 = arr_G_seg_1 * segments_overlap
            arr_B_seg_1 = arr_B_seg_1 * segments_overlap

            # color data of the segment (t+1)
            arr_R_seg_2 = arr_R_2 * segments_overlap
            arr_G_seg_2 = arr_G_2 * segments_overlap
            arr_B_seg_2 = arr_B_2 * segments_overlap

            # store color data of the segment for frames (t) and (t+1) respectively
            #      sp.misc.imsave('./segment-1.png',[arr_R_seg_1, arr_G_seg_1, arr_B_seg_1])
            #      sp.misc.imsave('./segment-2.png',[arr_R_seg_2, arr_G_seg_2, arr_B_seg_2])

            # calculate RMS error for 2D
            cmp_image = (
                pow((arr_R_seg_2 - arr_R_seg_1), 2)
                + pow((arr_G_seg_2 - arr_G_seg_1), 2)
                + pow((arr_B_seg_2 - arr_B_seg_1), 2)
            ) * segments_overlap
            cmp_image_sum = cmp_image.sum()
            object_size = segments_overlap.sum()

            # print 'Segment size (overlap) for 2D = ', object_size

            rms_error_2D = cmp_image_sum / object_size
            rms_error_2D = math.sqrt(rms_error_2D)

    return rms_error_2D
def compute_one_stereo_pair(
    frame_left_1_fname, frame_left_2_fname, frame_right_fname, left_segments_1, left_segments_2, right_segments_1
):

    rms_error_2D = 0
    rms_error_3D = 0

    # original frames
    frame_left_1 = np.asarray(frame_left_1_fname, dtype=float)
    frame_left_2 = np.asarray(frame_left_2_fname, dtype=float)
    frame_right = np.asarray(frame_right_fname, dtype=float)

    # separate RGB components of the left frame (t)
    arr_R_left_1 = frame_left_1[:, :, 0]
    arr_G_left_1 = frame_left_1[:, :, 1]
    arr_B_left_1 = frame_left_1[:, :, 2]

    # separate RGB components of the left frame (t+1)
    arr_R_left_2 = frame_left_2[:, :, 0]
    arr_G_left_2 = frame_left_2[:, :, 1]
    arr_B_left_2 = frame_left_2[:, :, 2]

    # separate RGB components of the right frame (t)
    arr_R_right = frame_right[:, :, 0]
    arr_G_right = frame_right[:, :, 1]
    arr_B_right = frame_right[:, :, 2]

    # get frame size
    height = left_segments_1.shape[0]
    width = right_segments_1.shape[1]

    # for labels presented in segmentation results
    for lb in set(np.unique(left_segments_1)).difference((0,)):

        #    arr_segment_mask = (left_segments_1 == lb)
        #    fn = str(lb)
        #    plot_array('Segment ' + fn, arr_segment_mask, 0, 1, 'segment-' + fn)

        # compute 2D and 3D RMS errors for one chosen label
        if lb == 332:

            # print 'label = ', lb

            # get masks for left segments (t), (t+1) and right segments (t)
            arr_segment_mask_left_1 = left_segments_1 == lb
            arr_segment_mask_left_2 = left_segments_2 == lb
            arr_segment_mask_right = right_segments_1 == lb

            fname = str(lb)
            #      plot_array('Left mask (t) ' + fname, arr_segment_mask_left_1, 0, 1, 'left-mask-t-' + fname)
            #      plot_array('Left mask (t+1) ' + fname, arr_segment_mask_left_2, 0, 1, 'left-mask-t2-' + fname)
            #      plot_array('Right mask (t) ' + fname, arr_segment_mask_right, 0, 1, 'right-mask-t-' + fname)

            # calculate the center of masses for left segments (t), (t+1) and right segments (t)
            y0, x0 = mahotas.center_of_mass(arr_segment_mask_left_1, labels=None)
            y1, x1 = mahotas.center_of_mass(arr_segment_mask_left_2, labels=None)
            y2, x2 = mahotas.center_of_mass(arr_segment_mask_right, labels=None)

            y0 = round(y0)
            x0 = round(x0)

            y1 = round(y1)
            x1 = round(x1)

            y2 = round(y2)
            x2 = round(x2)

            # print 'y0 = ', y0, ' x0 = ', x0
            # print 'y1 = ', y1, ' x1 = ', x1
            # print 'y2 = ', y2, ' x2 = ', x2

            delta_x_left = x1 - x0
            delta_y_left = y1 - y0

            delta_x_right = x2 - x0
            delta_y_right = y2 - y0

            # print 'delta_x_left = ', delta_x_left
            # print 'delta_y_left = ', delta_y_left

            # print 'delta_x_right = ', delta_x_right
            # print 'delta_y_right = ', delta_y_right

            # an array for a segment (t) shifted to the center of mass of a segment (t+1)
            arr_segment_mask_left_1_shift = np.zeros([height, width])

            # color data of the left segment (t+1)
            arr_R_left_seg_2 = np.zeros([height, width])
            arr_G_left_seg_2 = np.zeros([height, width])
            arr_B_left_seg_2 = np.zeros([height, width])

            # color data of the left segment (t) shifted to (t+1)
            arr_R_left_seg_1 = np.zeros([height, width])
            arr_G_left_seg_1 = np.zeros([height, width])
            arr_B_left_seg_1 = np.zeros([height, width])

            # shift the left segment (t) to the center of mass of the left segment (t+1)
            for i in range(height):
                for j in range(width):

                    cur_i = i + delta_y_left
                    cur_j = j + delta_x_left

                    if cur_i >= 0 and cur_i < height and cur_j >= 0 and cur_j < width:
                        arr_segment_mask_left_1_shift[cur_i][cur_j] = arr_segment_mask_left_1[i][j]
                        arr_R_left_seg_1[cur_i][cur_j] = arr_R_left_1[i][j]
                        arr_G_left_seg_1[cur_i][cur_j] = arr_G_left_1[i][j]
                        arr_B_left_seg_1[cur_i][cur_j] = arr_B_left_1[i][j]

            #      plot_array('Left mask t (shifted to (t+1)) ' + fname, arr_segment_mask_left_1_shift, 0, 1, 'left-mask-t-shifted-' + fname)

            # compute overlap of left segments (t) and (t+1)
            segments_overlap = arr_segment_mask_left_1_shift * arr_segment_mask_left_2
            #      plot_array('Segments overlap 2D ' + fname, segments_overlap, 0, 1, 'overlap-2D-' + fname)

            segments_error = arr_segment_mask_left_1_shift - segments_overlap
            #      plot_array('Segments error 2D ' + fname, segments_error, 0, 1, 'error-2D-' + fname)

            # color data of the left segment (t) shifted to the center of mass of the left segment (t+1)
            arr_R_left_seg_1 = arr_R_left_seg_1 * segments_overlap
            arr_G_left_seg_1 = arr_G_left_seg_1 * segments_overlap
            arr_B_left_seg_1 = arr_B_left_seg_1 * segments_overlap

            # color data of the left segment (t+1)
            arr_R_left_seg_2 = arr_R_left_2 * segments_overlap
            arr_G_left_seg_2 = arr_G_left_2 * segments_overlap
            arr_B_left_seg_2 = arr_B_left_2 * segments_overlap

            # store color data of the left segment for frames (t) and (t+1) respectively
            #      sp.misc.imsave('./segment-left-1.png',[arr_R_left_seg_1, arr_G_left_seg_1, arr_B_left_seg_1])
            #      sp.misc.imsave('./segment-left-2.png',[arr_R_left_seg_2, arr_G_left_seg_2, arr_B_left_seg_2])

            # calculate RMS error for 2D
            cmp_image = (
                pow((arr_R_left_seg_2 - arr_R_left_seg_1), 2)
                + pow((arr_G_left_seg_2 - arr_G_left_seg_1), 2)
                + pow((arr_B_left_seg_2 - arr_B_left_seg_1), 2)
            ) * segments_overlap
            cmp_image_sum = cmp_image.sum()
            object_size = segments_overlap.sum()

            # print 'Segment size (overlap) for 2D = ', object_size

            rms_error_2D = cmp_image_sum / object_size
            rms_error_2D = math.sqrt(rms_error_2D)

            # an array for the left segment (t) shifted to the center of mass of the right segment (t+1)
            arr_segment_mask_right_shift = np.zeros([height, width])

            # color data of the right segment
            arr_R_right_seg = np.zeros([height, width])
            arr_G_right_seg = np.zeros([height, width])
            arr_B_right_seg = np.zeros([height, width])

            # color data of the left segment shifted to the right
            arr_R_left_seg = np.zeros([height, width])
            arr_G_left_seg = np.zeros([height, width])
            arr_B_left_seg = np.zeros([height, width])

            # shift the left segment (t) to the center of mass of the right segment (t)
            for i in range(height):
                for j in range(width):

                    cur_i = i + delta_y_right
                    cur_j = j + delta_x_right

                    if cur_i >= 0 and cur_i < height and cur_j >= 0 and cur_j < width:
                        arr_segment_mask_right_shift[cur_i][cur_j] = arr_segment_mask_left_1[i][j]
                        arr_R_left_seg[cur_i][cur_j] = arr_R_left_1[i][j]
                        arr_G_left_seg[cur_i][cur_j] = arr_G_left_1[i][j]
                        arr_B_left_seg[cur_i][cur_j] = arr_B_left_1[i][j]

            # compute overlap of left segment (t) and right segment (t)
            segments_overlap_right = arr_segment_mask_right_shift * arr_segment_mask_right
            #      plot_array('Segments overlap 3D ' + fname, segments_overlap_right, 0, 1, 'overlap-3D-' + fname)
            #      plot_array('Left 1 segment (shifted) to the right ' + fname, arr_segment_mask_right_shift, 0, 1, 'left-mask-right-shifted-' + fname)

            segments_error_right = arr_segment_mask_right_shift - segments_overlap_right
            #      plot_array('Segments error 3D ' + fname, segments_error_right, 0, 1, 'error-3D-' + fname)

            # color data of the left segment (t) shifted to the center of mass of the right segment (t)
            arr_R_left_seg = arr_R_left_seg * segments_overlap_right
            arr_G_left_seg = arr_G_left_seg * segments_overlap_right
            arr_B_left_seg = arr_B_left_seg * segments_overlap_right

            # color data of the right segment (from the overlapped area)
            arr_R_right_seg = arr_R_right * segments_overlap_right
            arr_G_right_seg = arr_G_right * segments_overlap_right
            arr_B_right_seg = arr_B_right * segments_overlap_right

            # store color data of the left and right segments for frames (t) respectively
            #      sp.misc.imsave('./segment-left.png',[arr_R_left_seg, arr_G_left_seg, arr_B_left_seg])
            #      sp.misc.imsave('./segment-right.png',[arr_R_right_seg, arr_G_right_seg, arr_B_right_seg])

            # calculate RMS error for 3D
            cmp_image = (
                pow((arr_R_right_seg - arr_R_left_seg), 2)
                + pow((arr_G_right_seg - arr_G_left_seg), 2)
                + pow((arr_B_right_seg - arr_B_left_seg), 2)
            ) * segments_overlap_right
            cmp_image_sum = cmp_image.sum()
            object_size = segments_overlap_right.sum()

            # print 'Segment size (overlap) for 3D = ', object_size

            rms_error_3D = cmp_image_sum / object_size
            rms_error_3D = math.sqrt(rms_error_3D)

    return rms_error_2D, rms_error_3D
예제 #47
0
파일: nn1.py 프로젝트: bbbales2/annotator
    #hist /= hist.max()
    #hist = plt.cm.jet(hist)[:, :, :3]
    #            hist = (hist * 255).astype('uint8')

        hist = numpy.kron(hist, numpy.ones((16, 16)))

        plt.imshow(im, interpolation = 'NONE')
        plt.imshow(hist, alpha = 0.3, interpolation = 'NONE')
        plt.title(label)
        plt.show()
#%%
import mahotas
ls, c = mahotas.label(hist)

sums = mahotas.labeled_sum(hist, ls)[1:]
coords = mahotas.center_of_mass(hist, ls)[1:]
#%%
with open('classifiers', 'w') as f:
    pickle.dump(clss, f)
#%%
for label in labels:
    try:
        writer = imageio.get_writer('movie_{0}.mp4'.format(label), fps = 24.0)

        cmap = plt.cm.jet

        for f in range(len(vid)):
            im = vid[f]

            tmp = time.time()
            hist = sess.run(target, feed_dict = { tens : im.reshape(1, im.shape[0], im.shape[1], im.shape[2]) })[0]