示例#1
0
def test_zernike_cm():
    A = (np.arange(256) % 14).reshape((16, 16))
    cm = (8.9,12.4)
    slow = _slow_zernike(A, 8., 12, cm)
    fast = zernike_moments(A, 8., 12, cm=cm)
    delta = np.array(slow) - fast
    assert np.abs(delta).max() < 0.001
示例#2
0
def test_zernike_cm():
    A = (np.arange(256) % 14).reshape((16, 16))
    cm = (8.9, 12.4)
    slow = _slow_zernike(A, 8., 12, cm)
    fast = zernike_moments(A, 8., 12, cm=cm)
    delta = np.array(slow) - fast
    assert np.abs(delta).max() < 0.001
示例#3
0
    def _extract_features(self, X, candidate):

        row, col, radius = int(candidate[0]), int(candidate[1]), int(
            candidate[2])
        padded_radius = int(self.padding * radius)

        # compute the coordinate of the patch to select
        col_min = max(col - padded_radius, 0)
        row_min = max(row - padded_radius, 0)
        col_max = min(col + padded_radius, X.shape[0] - 1)
        row_max = min(row + padded_radius, X.shape[1] - 1)

        # extract patch
        patch = X[row_min:row_max, col_min:col_max]
        resized_patch = resize(patch, (self.resized, self.resized))

        # compute Zernike moments
        zernike = zernike_moments(patch, radius)

        # compute surf descriptors
        scale_surf = 2 * self.padding * radius / 20
        keypoint = np.array([[row, col, scale_surf, 0.1, 1]])
        surf_descriptor = surf.descriptors(X, keypoint,
                                           is_integral=False).ravel()
        if not surf_descriptor.size:
            surf_descriptor = np.zeros((70,))

        # compute haar-like features
        haar_features = extract_feature_image(resized_patch, self.feature_type,
                                              self.feature_coord)

        return np.hstack((zernike, surf_descriptor, haar_features))
示例#4
0
文件: atividade.py 项目: gfviegas/pdi
def treatImage(frame):
    frame = cv2.resize(frame, (240, 240))

    # aplica filtro gaussiano e coloca imagem preto e branco
    img_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    img_grey = cv2.GaussianBlur(img_grey, (5, 5), 0)

    # aplica otsu para binarizar
    ret, img_grey = cv2.threshold(img_grey, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    #tira o contorno da imagem
    contours, heirarchy = cv2.findContours(img_grey, cv2.RETR_TREE,
                                           cv2.CHAIN_APPROX_SIMPLE)
    borderImg = np.zeros((240, 240, 3), np.uint8)
    cv2.drawContours(borderImg, contours, -1, (125, 125, 0), 1)
    borderGrey = cv2.cvtColor(borderImg, cv2.COLOR_BGR2GRAY)

    # Calculate Moments
    moment = cv2.moments(borderGrey)

    # Calculate Hu Moments
    huMoments = cv2.HuMoments(moment)

    # Calcula momento de Zernike
    zeMoments = zernike_moments(borderGrey, radius=20)

    # cv2.imshow("Imagem capturada", img_grey)
    # cv2.waitKey(0)

    return huMoments + zeMoments
示例#5
0
    def _extract_features(self, X, candidate):

        y, x, radius = int(candidate[0]), int(candidate[1]), candidate[2]
        padded_radius = int(self.padding * radius)

        # compute the coordinate of the patch to select
        x_min = x - padded_radius
        x_min = x_min if x_min < 0 else 0
        y_min = y - padded_radius
        y_min = y_min if y_min < 0 else 0
        x_max = x + padded_radius
        x_max = x_max if x_max > X.shape[0] else X.shape[0] - 1
        y_max = y + padded_radius
        y_max = y_max if y_max > X.shape[1] else X.shape[1] - 1

        patch = X[y_min:y_max, x_min:x_max]

        # compute Zernike moments
        zernike = zernike_moments(patch, radius)

        # compute SURF descriptor
        keypoint = np.array([[y, x, 1, 0.1, 1]])
        surf_descriptor = surf.descriptors(patch, keypoint,
                                           is_integral=False).ravel()
        if not surf_descriptor.size:
            surf_descriptor = np.zeros((70, ))

        return np.hstack((zernike, surf_descriptor))
示例#6
0
    def _extract_features(self, X, candidate):

        y, x, radius = int(candidate[0]), int(candidate[1]), candidate[2]
        padded_radius = int(self.padding * radius)

        # compute the coordinate of the patch to select
        x_min = max(x - padded_radius, 0)
        y_min = max(y - padded_radius, 0)
        x_max = min(x + padded_radius, X.shape[0] - 1)
        y_max = min(y + padded_radius, X.shape[1] - 1)

        patch = X[y_min:y_max, x_min:x_max]

        # compute Zernike moments
        zernike = zernike_moments(patch, radius)

        # compute SURF descriptor
        scale_surf = radius / self.min_radius
        keypoint = np.array([[y, x, scale_surf, 0.1, 1]])
        surf_descriptor = surf.descriptors(X, keypoint,
                                           is_integral=False).ravel()
        if not surf_descriptor.size:
            surf_descriptor = np.zeros((70, ))

        return np.hstack((zernike, surf_descriptor))
示例#7
0
    def extract(self, image, keypoints):
        # patch size to build descriptor from
        patch_size = self.patch_size
        desc_size = self.descriptor_size
        # random = np.random.RandomState()
        # random.seed(self.sample_seed)
        ## why 8?
        # samples = np.array((patch_size / 5.0) * random.randn(desc_size * 8)).astype(np.int32)
        # hps2 = - (patch_size-2) // 2
        # samples = samples[(samples < hps) & (samples > hps2)]
        # d2 = desc_size*2
        # pos0 = samples[:d2].reshape(desc_size, 2)
        # pos1 = samples[d2:d2*2].reshape(desc_size, 2)

        # pos0 = np.ascontiguousarray(pos0)
        # pos1 = np.ascontiguousarray(pos1)
        hps = patch_size // 2
        self.mask = _mask_border_keypoints(image.shape, keypoints, hps)

        self.keypoints = np.array(keypoints[self.mask, :], dtype=np.intp, order="C", copy=False)

        self.descriptors = []
        for nn in range(self.keypoints.shape[0]):
            kx, ky = self.keypoints[nn]
            patch = image[kx - hps : kx + hps, ky - hps : ky + hps]
            self.descriptors.append(zernike_moments(patch, 8.0, 12))
        self.descriptors = np.array(self.descriptors)
示例#8
0
	def feature_zernike(self,mode = 'all',index=None,outimg=None):
		if mode=='all':
			output = []
			for i in range(self.n_index):
				temp_img = self.get1img(i,'gray')
				output.append( zernike_moments(temp_img,1) )

			return np.array(output)
		elif mode=='single':
			if index == None and outimg is None:
				print("please input index")
				return
			if outimg is None:
				temp_img  = self.get1img(index,'gray')
			else :
				temp_img = outimg
			return zernike_moments(temp_img,1)
示例#9
0
def get_zernikeMoments(img, name):
    ordnung = 8
    radius = 200
    zernike = features.zernike_moments(img, radius, ordnung)
    print()
    print("Zernike Momente von " + name +
          " der Ordnung {}, Radius in px {}: {}".format(
              ordnung, radius, zernike))

    return zernike
示例#10
0
    def _extract_features(self, X, candidate):
        row, col, radius = int(candidate[0]), int(candidate[1]), int(
            candidate[2])
        padded_radius = int(self.padding * radius)

        # compute the coordinate of the patch to select
        col_min = max(col - padded_radius, 0)
        row_min = max(row - padded_radius, 0)
        col_max = min(col + padded_radius, X.shape[0] - 1)
        row_max = min(row + padded_radius, X.shape[1] - 1)

        # extract patch
        patch = X[row_min:row_max, col_min:col_max]
        patch_edge = filters.sobel(patch)

        # compute Zernike moments
        zernike = zernike_moments(patch, radius)
        zernike_edge = zernike_moments(patch_edge, radius)

        return np.hstack((zernike, zernike_edge))
示例#11
0
def getZernikeMatchShapes(img0, img1, name0, name1, id):
    img0 = invert_image(img0)
    img1 = invert_image(img1)
    ordnung = 8
    radius = 200
    zernike1 = features.zernike_moments(img0, radius, ordnung)
    zernike2 = features.zernike_moments(img1, radius, ordnung)

    zernike_contours_match1 = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0
    ]
    for k in range(0, 24):
        zernike_contours_match1[k] = abs(1 / (zernike1[k]) - 1 / (zernike2[k]))

    zernike_contours_match2 = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0
    ]
    for w in range(0, 24):
        zernike_contours_match2[w] = abs(zernike1[w] - zernike2[w])

    zernike_contours_match3 = [
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0
    ]
    for f in range(0, 24):
        zernike_contours_match3[f] = (abs(zernike1[f] - zernike2[f])) / (abs(
            zernike1[f]))

    zernike_match = [0, 0, 0]
    zernike_match[0] = np.sum(zernike_contours_match1)
    zernike_match[1] = np.sum(zernike_contours_match2)
    zernike_match[2] = np.sum(zernike_contours_match3)

    print("\n" + id + ".ZernikeContoursMatch of " + name0 + " and " + name1 +
          ": {}".format(zernike_match))

    return zernike_match
示例#12
0
    def calculate(self, resource):
        #initalizing
        except_image_only(resource)

        image_uri = resource.image
        #image_uri = BQServer().prepare_url(image_uri, remap='gray')
        im = image2numpy(image_uri, remap='gray')
        im = np.uint8(im)
        radius = 8
        degree = 8
        descritptor = zernike_moments(im, radius, degree)

        #initalizing rows for the table
        return descritptor
示例#13
0
 def _calculateStatistics(self, img, haralick=False, zernike=False):
     result = []
     # 3-bin histogram
     result.extend(mquantiles(img))
     # First four moments
     result.extend([img.mean(), img.var(), skew(img, axis=None), kurtosis(img, axis=None)])
     # Haralick features
     if haralick:
         integerImage = dtype.img_as_ubyte(img)
         result.extend(texture.haralick(integerImage).flatten())
     # Zernike moments
     if zernike:
         result.extend(zernike_moments(img, int(self.rows) / 2 + 1))
     return result
示例#14
0
 def _calculateStatistics(self, img, haralick=False, zernike=False):
     result = []
     #3-bin histogram
     result.extend(mquantiles(img))
     #First four moments
     result.extend([
         img.mean(),
         img.var(),
         skew(img, axis=None),
         kurtosis(img, axis=None)
     ])
     #Haralick features
     if haralick:
         integerImage = dtype.img_as_ubyte(img)
         result.extend(texture.haralick(integerImage).flatten())
     #Zernike moments
     if zernike:
         result.extend(zernike_moments(img, int(self.rows) / 2 + 1))
     return result
示例#15
0
    def _describe(self, binary, steps=None):
        shape = list(binary.shape) + [3]
        kernel = np.ones((40, 40), np.uint8)
        closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)

        im, contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        # no contours if binary is empty
        if len(contours) == 0:
            return np.zeros(self.dim)
        # compute moments around center of binary
        center, radius = cv2.minEnclosingCircle(contours[0])
        moments = zernike_moments(binary, radius, cm=center)
        if steps is not None:
            steps['closed'] = closed
            cont_img = np.zeros((binary.shape[0], binary.shape[1], 3))
            cv2.drawContours(cont_img, contours, 0, (0, 255, 0), 3)
            steps['contour'] = cont_img
        return moments
示例#16
0
    def _calc_features(self):
        """
        calculate feature values
        :return: feature values
        """

        features = {}

        props = regionprops(self.bin_img, self.img)

        features['Area'] = props[0].area  #Number of pixels of the region.

        features['BB Area'] = props[
            0].bbox_area  #Number of pixels of bounding box.

        features['Perimeter'] = props[0].perimeter
        #Perimeter of object which approximates the contour as a line through the centers of border pixels using a 4-connectivity.

        features['Centroid'] = props[0].centroid  #Centroid coordinate tuple.

        features['Weighted Centroid'] = props[0].weighted_centroid
        #Centroid coordinate tuple (row, col) weighted with intensity image.

        features['Centroid Divergence'] = np.linalg.norm(
            np.array(props[0].centroid) - np.array(props[0].weighted_centroid))

        features['Equivalent Diameter'] = props[0].equivalent_diameter
        #The diameter of a circle with the same area as the region.

        features['Major Axis Length'] = props[0].major_axis_length
        #The length of the major axis of the ellipse that has the same normalized second central moments as the region.

        features['Minor Axis Length'] = props[0].minor_axis_length
        #The length of the minor axis of the ellipse that has the same normalized second central moments as the region.

        features['Eccentricity'] = props[0].eccentricity
        #Eccentricity of the ellipse that has the same second-moments as the region. The eccentricity is the ratio of the focal distance (distance between focal points).

        features['Circularity'] = (4 * props[0].area *
                                   math.pi) / (props[0].perimeter**2)
        #Circularity that specifies the roundness of objects.

        features['Roundness'] = (4 * props[0].area) / (
            np.pi * props[0].major_axis_length**2)
        #Like circularity, but does not depend on perimeter/roughness.

        features['Aspect Ratio'] = (
            props[0].major_axis_length) / props[0].minor_axis_length
        #Aspect ratio.

        features['Orientation'] = props[0].orientation
        #Angle between the 0th axis (rows) and the major axis of the ellipse that has the same second moments as the region, ranging from -pi/2 to pi/2 counter-clockwise.

        features['Solidity'] = props[0].solidity
        #Ratio of pixels in the region to pixels of the convex hull image.

        conv_img = props[0].convex_image  #get convex image of ROI
        conv_perimeter = regionprops(conv_img.astype(np.uint8))[0].perimeter

        features['Roughness'] = props[0].perimeter / conv_perimeter
        #Ratio of perimeter of region to perimeter of the convex hull image.

        features['Hu Moments'] = (props[0].weighted_moments_hu)
        #tuple - Hu moments (translation, scale and rotation invariant) of intensity image.

        diam = self.img[0].shape[0]

        maxradius = diam / 2
        features['Zernike Moments'] = zernike_moments(self.img, maxradius)
        #Zernike Moments of Region

        return features
def zernike_minimum_enclosing_circle(coords,degree=9):
	image, center, diameter = minimum_enclosing_circle_shift(coords)

	return zernike_moments(image, radius=diameter/2, degree=degree, cm=center)
示例#18
0
def calculate_zernike_moments(im,
                              cm=None,
                              radius=0.3,
                              norder=8,
                              label=None,
                              use_log=False,
                              show_plot=False):
    """Calculate the Zernike moments of the image.

    These moments are useful to single out asymmetries in the image:
    for example, when characterizing the beam of the radio telescope using
    a map of a calibrator, it is useful to calculate these moments to
    understand if the beam is radially symmetric or has distorted side
    lobes.

    Parameters
    ----------
    im : 2-d array
        The image to be analyzed

    Other parameters
    ----------------
    cm : [int, int]
        'Center of mass' of the image
    radius : float
        The radius around the center of mass, in percentage of the image
        size (0 <= radius <= 0.5)
    norder : int
        Maximum order of the moments to calculate
    use_log: bool
        Rescale the image to a log scale before calculating the coefficients.
        The scale is the same documented in the ds9 docs, for consistency.
        After normalizing the image from 0 to 1, the log-rescaled image is
        log(ax + 1) / log a, with ``x`` the normalized image and ``a`` a
        constant fixed here at 1000
    show_plot : bool, default False
        show the plots immediately

    Returns
    -------
    moments_dict : dict
        Dictionary containing the order, the sub-index and the moment, e.g.
        {0: {0: 0.3}, 1: {1: 1e-16}, 2: {0: 0.95, 2: 6e-19}, ...}
        Moments are symmetrical, so only the unique values are reported.

    """
    if np.all(np.isnan(im)):
        return None
    im_to_analyze = im.copy()
    im_to_analyze = interpolate_invalid_points_image(im_to_analyze,
                                                     zeros_are_invalid=True)

    if cm is None or np.any(np.isnan(cm)):
        cm = get_center_of_mass(im_to_analyze, radius, approx='max')
    if (cm[0] >= im_to_analyze.shape[0]) or (cm[1] >= im_to_analyze.shape[1]) \
            or (cm[0] < 1) or (cm[1] < 1):
        cm = np.array(im_to_analyze.shape) // 2

    if use_log:
        im_to_analyze = ds9_like_log_scale(im_to_analyze, 1000)

    radius_pix = np.int(np.min(im.shape) * radius)
    moments = zernike_moments(im_to_analyze, radius_pix, norder, cm=cm)
    count = 0
    moments_dict = {}
    description_string = \
        'Zernike moments (cm: {}, radius: {}):\n'.format(cm, radius_pix)

    if HAS_MPL:
        fig = plt.figure('Zernike moments', figsize=(10, 10))
        x, y = np.int(cm[0]), np.int(cm[1])
        shape = im_to_analyze.shape
        vmax = np.max(im_to_analyze)

        if (x < shape[0]) & (y < shape[1]):
            vmax = im_to_analyze[x, y]

        plt.imshow(im_to_analyze,
                   vmin=0,
                   vmax=vmax,
                   origin='lower',
                   cmap='magma')
        circle = plt.Circle((y, x), radius_pix, color='r', fill=False)
        plt.gca().add_patch(circle)
        plt.colorbar()

    for i in range(norder + 1):
        description_string += str(i) + ': '
        moments_dict[i] = {}
        for j in range(i + 1):
            if (i - j) % 2 == 0:
                description_string += "{}/{} {:.1e} ".format(
                    i, j, moments[count])
                moments_dict[i][j] = moments[count]
                count += 1
        description_string += '\n'

    if HAS_MPL:
        plt.text(0.05,
                 0.95,
                 description_string,
                 horizontalalignment='left',
                 verticalalignment='top',
                 transform=plt.gca().transAxes,
                 color='white')

        if label is None:
            label = str(np.random.randint(0, 100000))
        plt.savefig('Zernike_debug_' + label + '.png')
        if show_plot:
            plt.show()
        plt.close(fig)

    log.debug(description_string)

    moments_dict['Description'] = description_string

    return moments_dict
示例#19
0
    def momentsZernique(self):
        dis=self.Distancia_Centroide()
        raio=mean(dis)

        return fea.zernike_moments(self.imagemCinza,raio)
示例#20
0
    def momentsZernique_tratado(self):
        im=self.tratamento_imagem()
        raio=max(im.shape)

        return fea.zernike_moments(im,raio)
示例#21
0
def get_blob_element(mask, rect, skel, num_blob_pixels, color, color_variance, grey, depth, label, is_subblob = False):
    # Scale image for scale-invariant shape features
    # Make it so the longest edge is equal to SHAPE_FEATURE_SIZE
    resized_image = mahotas.imresize(mask,
                                     float(SHAPE_FEATURE_SIZE) / max(mask.shape))

    z_moments = zernike_moments(resized_image, SHAPE_FEATURE_SIZE, degree = Z_ORDER)

    blob_element = xml.Element(label)
    
    x_1 = rect[0]
    y_1 = rect[1]
    x_2 = x_1 + rect[2]
    y_2 = y_1 + rect[3]

    blob_element.attrib['x'] = str(rect[0])
    blob_element.attrib['y'] = str(rect[1])
    blob_element.attrib['width'] = str(rect[2])
    blob_element.attrib['height'] = str(rect[3])
    rect_center = (rect[0] + .5 * rect[2],
                   rect[1] + .5 * rect[3])
    if (skel is not None and len(skel) > 0 ):
        blob_element.attrib['head_dist'] = str(distance(rect_center,
                                                skel['HEAD']['2d']))
        blob_element.attrib['right_hand_dist'] = str(distance(rect_center,
                                                skel['RIGHT_HAND']['2d']))
        blob_element.attrib['left_hand_dist'] = str(distance(rect_center,
                                                skel['LEFT_HAND']['2d']))
    
    

    
    features_element = xml.Element('features')
    blob_element.append(features_element)

    size_element = xml.SubElement(features_element, 'size')
    xml.SubElement(size_element, 'pixels').text = str(num_blob_pixels)

    hu_element = get_hu_moments_element(mask)

    features_element.append(hu_element)

    grey_masked_image = grey & mask

    blob_depth = depth & mask

    if is_subblob:
        blob_depth_rect = blob_depth
        blob_mask_rect = mask
    else:
        blob_depth_rect = blob_depth[y_1:y_2, x_1:x_2]
        blob_mask_rect = mask[y_1:y_2, x_1:x_2]


    normal_vector_histogram = get_normal_vector_histogram(blob_depth_rect, blob_mask_rect.astype(numpy.uint8))
    
    #print normal_vector_histogram

    if normal_vector_histogram is None:
        print 'Error calculating normal_vector_histogram'
        return None

    normal_histogram_element = xml.Element('normalhistogram')

    for i in xrange(normal_vector_histogram.size):
        xml.SubElement(normal_histogram_element, 'a_' + str(i)).text = str(normal_vector_histogram[i])

    features_element.append(normal_histogram_element)

    haralick_element = xml.Element('haralick')

    haralick_features = haralick(grey_masked_image)
    # Average the rows (the different directions of the features)
    haralick_features_averaged = numpy.mean(haralick_features,axis=0)
    #print len(haralick_features_averaged)

    for i in xrange(NUM_HARALICK_FEATURES):
        xml.SubElement(haralick_element, 'a_' + str(i)).text = str(haralick_features_averaged[i])

    features_element.append(haralick_element)

    zernike_element = xml.Element('zernike')
        
    for i in xrange(Z_FEATURES):
        xml.SubElement(zernike_element, 'a_' + str(i)).text = str(z_moments[i])

    features_element.append(zernike_element)
    
    rgb_element = xml.Element('rgb')
    xml.SubElement(rgb_element, 'r').text = str(color[0])
    xml.SubElement(rgb_element, 'g').text = str(color[1])
    xml.SubElement(rgb_element, 'b').text = str(color[2])

    features_element.append(rgb_element)
    
    rgb_variance_element = xml.Element('rgb_var')
    xml.SubElement(rgb_variance_element, 'r').text = str(color_variance[0])
    xml.SubElement(rgb_variance_element, 'g').text = str(color_variance[1])
    xml.SubElement(rgb_variance_element, 'b').text = str(color_variance[2])
    
    features_element.append(rgb_variance_element)

    return blob_element
示例#22
0
def step(frame, file_time, output_file = None):
    global g_adjacency, g_max, g_prev, g_colors, g_feat, g_gray, g_rgb, g_rgbfeat, g_blobidx, g_blobidxset, g_myrect, g_myhist
    global current_frame_time, xml_root
    global g_last_depth, g_last_depth_change
    #global g_frameidx
    #ni.show_frame(frame)
    num_kinects = frame['num_kinects']
    if num_kinects == 0:
        return
    depth = numpy.array(frame['depths'][0])
    
    rgb = numpy.array(frame['images'][0])
    cvmod.set_depth(depth.ravel())
    g_sa = 0.
    if g_showall:
        g_sa = 1.
    var = numpy.array([gx1, gx2, gy1, gy2, gz1, gz2, gd1, gd2, g_sa], dtype=numpy.float)
    cvmod.set_vars(var)
    if not BATCH_MODE and not BATCH_BLOB_MODE:
        cvmod.output3d()
    cvmod.process_blob()
    
    adj = numpy.array(g_adjacency, dtype=numpy.uint8) * 255
    adj = cv2.erode(adj, None)
    adj = cv2.erode(adj, None)
    adj = cv2.erode(adj, None)
    
    
    if not BATCH_MODE and not BATCH_BLOB_MODE:
##        cv2.imshow('Depth', depth_change)
##        cv2.waitKey(10)
        cv2.imshow('ADJ', adj)
        cv2.waitKey(10)
    
    max_idx, img, rects = maskblob.createblobmask(adj)

    #print 'Rects',rects
    
    if g_prev == None:
        g_max, g_prev = max_idx, img
        for i in range(max_idx):
            g_colors.append(get_rcol())
        return
##    print "print", g_max, max_idx
    new_g_colors = [None]



    if not BATCH_MODE:
        if g_feat != None:
            nextPts, status, err = cv2.calcOpticalFlowPyrLK(g_gray, gorig, g_feat, None, winSize=(15, 15), maxLevel=5)
            for i in range(len(nextPts)):
                if status[i][0] == 1:
                    x1, y1 = g_feat[i][0]
                    #cv2.circle(cmask, (x1, y1), 4., (0, 0, 255), -1)
                    x2, y2 = nextPts[i][0]
                    dx = abs(x1-x2)
                    dy = abs(y1-y2)
                    #if dx*dx+dy*dy < 70*70:
                    #    cv2.line(cmask, (x1, y1), (x2, y2), (0, 255, 0), 3)


            ass_mat = numpy.zeros((max_idx, g_max), dtype=numpy.uint8)
            for idx1 in range(1, g_max+1):
                bmask = maskblob.getblob(g_prev, idx1)
                bmask = cv2.dilate(bmask, None)

                for i in range(len(nextPts)):
                    if status[i][0] == 1:
                        x1, y1 = g_feat[i][0]
                        if bmask[y1][x1] > 0:

                            for idx2 in range(1, max_idx+1):
                                dmask = maskblob.getblob(img, idx2)
                                dmask = cv2.dilate(dmask, None)
                                x2, y2 = nextPts[i][0]
                                dx = abs(x1-x2)
                                dy = abs(y1-y2)
                                if dx*dx+dy*dy < 70*70:
                                    if dmask[y2][x2] > 0:
                                        ass_mat[idx2-1][idx1-1] += 1
                                        cv2.line(cmask, (x1, y1), (x2, y2), (0, 255, 0), 1)
            if not BATCH_MODE and not BATCH_BLOB_MODE:
                print ass_mat

            for i in range(max_idx):
                k = ass_mat[i]
                if numpy.max(k) > 0:
                    p = numpy.argmax(k)
                    blobidxset[i] = g_blobidxset[p]

            if not BATCH_MODE and not BATCH_BLOB_MODE:
                print blobidxset


        # End Batch

    if len(rects) > 0:
        a0, b0, w0, h0 =  rects[0]
        a1 = a0+w0
        b1 = b0+h0
##        cv2.rectangle(cmask, (a0, b0), (a1, b1), (255, 255, 255), 5)
    else:
        a0 = 0
        b0 = 0
        a1 = 630
        b1 = 470
        
        
    if g_myrect == None:
        
        g_myrect = a0, b0, a1, b1
        g_myhist = get_histogram(rgb, a0, b0, a1, b1)
        #show_histogram(g_myhist)
    else:

        frame_element = xml.SubElement(xml_root,'frame')
        frame_element.attrib['timestamp'] = str(current_frame_time)
##        frame_element.attrib['confidence'] = str(numpy.std(prob_vector * hsv))

        skeleton_element = xml.SubElement(frame_element,'skeleton')
        if not (frame['skel'][0]['HEAD']['2d'][0] == 0 and
            frame['skel'][0]['HEAD']['2d'][1] == 0):
            xmlhelper.generate_skeleton_xml(frame, skeleton_element)
        
        blobs_element = xml.SubElement(frame_element,'blobs')        

        #print 'Max prob: ', numpy.max(prob)

        if output_file is not None:
            blue_channel = rgb[:,:,0]
            green_channel = rgb[:,:,1]
            red_channel = rgb[:,:,2]
            
            for blob_num in range(0, max_idx):
                
                blob_mask = maskblob.getblob(img,blob_num + 1)
                num_blob_pixels = numpy.sum(blob_mask, dtype=numpy.int32) / 255
                
                if num_blob_pixels <= 8:
                    continue

##                num_prob_pixels = numpy.sum(prob * blob_mask, dtype=numpy.int32) / 255
                
                x_1 = rects[blob_num][0]
                y_1 = rects[blob_num][1]
                x_2 = x_1 + rects[blob_num][2]
                y_2 = y_1 + rects[blob_num][3]

                
##                blob_prob = prob * (blob_mask / 255)
##                blob_attention = numpy.sum(blob_prob) / numpy.sum(prob)
                
                blob_red = red_channel & blob_mask
                red_value = numpy.sum(blob_red, dtype=numpy.int32) / num_blob_pixels
                red_variance = numpy.std(blob_red[blob_red.nonzero()])
                blob_red_rect = blob_red[x_1:x_2, y_1:y_2]

                blob_green = green_channel & blob_mask
                green_value = numpy.sum(blob_green, dtype=numpy.int32) / num_blob_pixels
                green_variance = numpy.std(blob_green[blob_green.nonzero()])
                blob_green_rect = blob_green[x_1:x_2, y_1:y_2]

                blob_blue = blue_channel & blob_mask
                blue_value = numpy.sum(blob_blue, dtype=numpy.int32) / num_blob_pixels
                blue_variance = numpy.std(blob_blue[blob_blue.nonzero()])
                blob_blue_rect = blob_blue[x_1:x_2, y_1:y_2]

                blob_depth = depth & blob_mask
                max_depth = numpy.max(blob_depth[blob_depth.nonzero()])
                min_depth = numpy.min(blob_depth[blob_depth.nonzero()])

                blob_color_rect = numpy.zeros((rects[blob_num][2], rects[blob_num][3], 3), numpy.uint8)

                moments = cv2.moments(blob_mask)
                hu_moments = cv2.HuMoments(moments)

                # Scale image for scale-invariant shape features
                resized_image = mahotas.imresize(blob_mask,
                                                 float(SHAPE_FEATURE_SIZE) / max(blob_mask.shape))

                z_moments = zernike_moments(resized_image, SHAPE_FEATURE_SIZE, degree = Z_ORDER)

                blob_element = xml.Element('blob')
                
                blobs_element.append(blob_element)

                blob_element.attrib['x'] = str(rects[blob_num][0])
                blob_element.attrib['y'] = str(rects[blob_num][1])
                blob_element.attrib['width'] = str(rects[blob_num][2])
                blob_element.attrib['height'] = str(rects[blob_num][3])
                rect_center = (rects[blob_num][0] + .5 * rects[blob_num][2],
                               rects[blob_num][1] + .5 * rects[blob_num][3])
                blob_element.attrib['head_dist'] = str(distance(rect_center,
                                                            frame['skel'][0]['HEAD']['2d']))
                blob_element.attrib['right_hand_dist'] = str(distance(rect_center,
                                                            frame['skel'][0]['RIGHT_HAND']['2d']))
                blob_element.attrib['left_hand_dist'] = str(distance(rect_center,
                                                            frame['skel'][0]['LEFT_HAND']['2d']))
                
                features_element = xml.Element('features')
                blob_element.append(features_element)

                size_element = xml.Element('size')
                xml.SubElement(size_element, 'pixels').text = str(num_blob_pixels)

                features_element.append(size_element)
                
                hu_element = xml.Element('hu')
                xml.SubElement(hu_element, 'i_1').text = str(hu_moments[0][0])
                xml.SubElement(hu_element, 'i_2').text = str(hu_moments[1][0])
                xml.SubElement(hu_element, 'i_3').text = str(hu_moments[2][0])
                xml.SubElement(hu_element, 'i_4').text = str(hu_moments[3][0])
                xml.SubElement(hu_element, 'i_5').text = str(hu_moments[4][0])
                xml.SubElement(hu_element, 'i_6').text = str(hu_moments[5][0])
                xml.SubElement(hu_element, 'i_7').text = str(hu_moments[6][0])

                features_element.append(hu_element)

                zernike_element = xml.Element('zernike')

                for i in xrange(Z_FEATURES):
                    xml.SubElement(zernike_element, 'a_' + str(i)).text = str(z_moments[i])

                features_element.append(zernike_element)
                


                rgb_element = xml.Element('rgb')
                xml.SubElement(rgb_element, 'r').text = str(red_value)
                xml.SubElement(rgb_element, 'g').text = str(green_value)
                xml.SubElement(rgb_element, 'b').text = str(blue_value)

                features_element.append(rgb_element)
                
                rgb_variance_element = xml.Element('rgb_var')
                xml.SubElement(rgb_variance_element, 'r').text = str(red_variance)
                xml.SubElement(rgb_variance_element, 'g').text = str(green_variance)
                xml.SubElement(rgb_variance_element, 'b').text = str(blue_variance)
                
                features_element.append(rgb_variance_element)
                


    if not BATCH_MODE:
        grayrgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        grayrgborig = cv2.cvtColor(rgb, cv2.COLOR_BGR2GRAY)
        grayrgbfeat = show_features(grayrgb, 500)

        
        if g_rgbfeat != None:
            nextPts, status, err = cv2.calcOpticalFlowPyrLK(g_rgb, grayrgborig, g_rgbfeat, None, winSize=(15, 15), maxLevel=5)
            for i in range(len(nextPts)):
                if status[i][0] == 1:
                    x1, y1 = g_rgbfeat[i][0]
                    #cv2.circle(rgb, (x1, y1), 4., (0, 0, 255), -1)
                    x2, y2 = nextPts[i][0]
                    dx = abs(x1-x2)
                    dy = abs(y1-y2)
                    if dx*dx+dy*dy < 30*30:
                        cv2.line(rgb, (x1, y1), (x2, y2), (0, 255, 0), 1)
            f = cv2.calcOpticalFlowFarneback(g_rgb, grayrgborig, None, .5, 3, 15, 3, 5, 1.2, 0)
            print f.shape

        g_rgb = grayrgborig
        g_rgbfeat = grayrgbfeat

        if not BATCH_BLOB_MODE:
##            cv2.imshow('MAS', cmask)
##            cv2.waitKey(10)
##            cv2.imshow('GAS', gmask)
##            cv2.waitKey(10)
            cv2.imshow('Gray',g_gray)
            cv2.waitKey(10)


            cv2.imshow('IMG', rgb)
            cv2.waitKey(10)

    if BATCH_MODE or BATCH_BLOB_MODE:
        print numpy.max(depth)
        print cv2.imwrite(file_time + 'rgb.jpg',rgb)
        print cv2.imwrite(file_time + 'd.jpg',numpy.divide(depth,12))