Exemple #1
0
    def test_invalid(self):
        prop = bcpp.Int()

        assert not prop.is_valid(0.0)
        assert not prop.is_valid(1.0)
        assert not prop.is_valid(1.0+1.0j)
        assert not prop.is_valid("")
        assert not prop.is_valid(())
        assert not prop.is_valid([])
        assert not prop.is_valid({})
        assert not prop.is_valid(_TestHasProps())
        assert not prop.is_valid(_TestModel())

        assert not prop.is_valid(np.bool8(False))
        assert not prop.is_valid(np.bool8(True))
        assert not prop.is_valid(np.float16(0))
        assert not prop.is_valid(np.float16(1))
        assert not prop.is_valid(np.float32(0))
        assert not prop.is_valid(np.float32(1))
        assert not prop.is_valid(np.float64(0))
        assert not prop.is_valid(np.float64(1))
        assert not prop.is_valid(np.complex64(1.0+1.0j))
        assert not prop.is_valid(np.complex128(1.0+1.0j))
        if hasattr(np, "complex256"):
            assert not prop.is_valid(np.complex256(1.0+1.0j))
Exemple #2
0
    def test_valid(self):
        prop = bcpp.Bool()

        assert prop.is_valid(None)

        assert prop.is_valid(False)
        assert prop.is_valid(True)

        assert prop.is_valid(np.bool8(False))
        assert prop.is_valid(np.bool8(True))
Exemple #3
0
    def test_invalid(self):
        prop = bcpp.Complex()

        assert not prop.is_valid("")
        assert not prop.is_valid(())
        assert not prop.is_valid([])
        assert not prop.is_valid({})
        assert not prop.is_valid(_TestHasProps())
        assert not prop.is_valid(_TestModel())

        assert not prop.is_valid(np.bool8(False))
        assert not prop.is_valid(np.bool8(True))
Exemple #4
0
def main():
    cap = cv2.VideoCapture(0)
    prev_grey_frame = None
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        grey = np.uint8(np.mean(frame, axis=2))

        ch = 0xFF & cv2.waitKey(5)
        if ch == 27:
            break
        if prev_grey_frame is not None:
            flow = cv2.calcOpticalFlowFarneback(prev_grey_frame, grey,
                    pyr_scale=0.5, levels=3, winsize=15,
                    iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
            mag_flow = np.uint8(np.sum(np.abs(5 * flow), axis=2))
            mask_flow = np.uint8(255 * (mag_flow > 50))
            mask_flow = cv2.dilate(mask_flow,
                            cv2.getStructuringElement(cv2.MORPH_RECT,(15,15)))
            vis_frame = frame.copy()

            fx, fy = flow[:, :, 0], flow[:, :, 1]

            for contour in cv2.findContours(mask_flow,
                                            cv2.cv.CV_RETR_EXTERNAL,
                                            cv2.cv.CV_CHAIN_APPROX_SIMPLE)[0]:
                rect = cv2.minAreaRect(contour)
                center, size, _ = rect
                if np.min(size) < 100:
                    continue

                cur_mask = np.zeros(grey.shape)
                cv2.drawContours(cur_mask, [contour], 0, 255, -1)
                mean_fx = np.mean(fx[np.bool8(cur_mask)])
                mean_fy = np.mean(fy[np.bool8(cur_mask)])
                p2 = (int(center[0] + mean_fx * 10),
                      int(center[1] + mean_fy * 10))
                cv2.line(vis_frame, (int(center[0]), int(center[1])),
                         p2, (0, 255, 0))

                box = cv2.cv.BoxPoints(rect)
                box = np.int0(box)
                for i in xrange(len(box)):
                    cv2.line(vis_frame, tuple(box[i - 1]),
                             tuple(box[i]), (0, 0, 255), 2)



            cv2.imshow('mag_flow', vis_frame)


        prev_grey_frame = grey.copy()
Exemple #5
0
    def test_Bool(self):
        prop = Bool()

        self.assertTrue(prop.is_valid(None))
        self.assertTrue(prop.is_valid(False))
        self.assertTrue(prop.is_valid(True))
        self.assertFalse(prop.is_valid(0))
        self.assertFalse(prop.is_valid(1))
        self.assertFalse(prop.is_valid(0.0))
        self.assertFalse(prop.is_valid(1.0))
        self.assertFalse(prop.is_valid(1.0 + 1.0j))
        self.assertFalse(prop.is_valid(""))
        self.assertFalse(prop.is_valid(()))
        self.assertFalse(prop.is_valid([]))
        self.assertFalse(prop.is_valid({}))
        self.assertFalse(prop.is_valid(Foo()))

        try:
            import numpy as np

            self.assertTrue(prop.is_valid(np.bool8(False)))
            self.assertTrue(prop.is_valid(np.bool8(True)))
            self.assertFalse(prop.is_valid(np.int8(0)))
            self.assertFalse(prop.is_valid(np.int8(1)))
            self.assertFalse(prop.is_valid(np.int16(0)))
            self.assertFalse(prop.is_valid(np.int16(1)))
            self.assertFalse(prop.is_valid(np.int32(0)))
            self.assertFalse(prop.is_valid(np.int32(1)))
            self.assertFalse(prop.is_valid(np.int64(0)))
            self.assertFalse(prop.is_valid(np.int64(1)))
            self.assertFalse(prop.is_valid(np.uint8(0)))
            self.assertFalse(prop.is_valid(np.uint8(1)))
            self.assertFalse(prop.is_valid(np.uint16(0)))
            self.assertFalse(prop.is_valid(np.uint16(1)))
            self.assertFalse(prop.is_valid(np.uint32(0)))
            self.assertFalse(prop.is_valid(np.uint32(1)))
            self.assertFalse(prop.is_valid(np.uint64(0)))
            self.assertFalse(prop.is_valid(np.uint64(1)))
            self.assertFalse(prop.is_valid(np.float16(0)))
            self.assertFalse(prop.is_valid(np.float16(1)))
            self.assertFalse(prop.is_valid(np.float32(0)))
            self.assertFalse(prop.is_valid(np.float32(1)))
            self.assertFalse(prop.is_valid(np.float64(0)))
            self.assertFalse(prop.is_valid(np.float64(1)))
            self.assertFalse(prop.is_valid(np.complex64(1.0 + 1.0j)))
            self.assertFalse(prop.is_valid(np.complex128(1.0 + 1.0j)))
            self.assertFalse(prop.is_valid(np.complex256(1.0 + 1.0j)))
        except ImportError:
            pass
Exemple #6
0
 def filterPrepare(self, e, data, keys, ndata, events):
     import numpy as np
     import pyopencl as cl
     mf = cl.mem_flags
     
     ndata = data.size
     if keys.size != ndata: raise Exception()
     
     filtbytes = np.bool8(False).nbytes * ndata
     
     if not isinstance(data, cl.Buffer):
         data_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf= data)
     else:
         data_buf = data
     
     if not isinstance(keys, cl.Buffer):
         keys_buf = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf= keys)
     else:
         keys_buf = keys
     
     filt_buf = cl.Buffer(self.ctx, mf.READ_WRITE, filtbytes)
     
     kernel = self.prg.filterPrepare
     kernel.set_args(data_buf, keys_buf, np.uint64(ndata), np.uint8(33), np.uint8(66), filt_buf)
     global_dims = self.get_global(self.get_grid_dims(ndata))
     
     print "filterPrepare"
     if e is None:
         e  = [ cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims), ]
     else:
         e  = [ cl.enqueue_nd_range_kernel(self.queue, kernel, global_dims, self.localDims, wait_for=e), ]
     events += e
     
     return (e, data_buf, keys_buf, filt_buf)
Exemple #7
0
    def update(self, product):
        """ Update from product """
        # Create or reuse existing thredds file
        h5_thredds = self.get_or_create()

        # Temporarily update
        try:
            del h5_thredds['time'].attrs['unit']
        except KeyError:
            pass  # It wasn't there anyway.
        h5_thredds['time'].attrs['units'] = self.datetime.strftime(
            'seconds since %Y-%m-%d'
        )

        # Update from products if necessary
        index = self._index(product)
        available = h5_thredds['available']
        if self.flag >= available[index]:
            target = h5_thredds['precipitation']
            with product.get() as h5_product:
                source = h5_product['precipitation']
                target[..., index] = source[...]
                available[index] = self.flag

        # Roundup
        logging.info('Updated {} ({})'.format(
            os.path.basename(self.path),
            product.datetime),
        )
        logging.debug(self.path)
        logging.debug('ThreddsFile fill status: {} %'.format(
            np.bool8(available[:]).sum() / available.size))

        h5_thredds.close()
def regions(img):
    '''
    CURRENTLY (6pm 8 Aug): 
    
    To fix: ksize (and maybe iterations) based on big image. Need to make it work for resized
            or else adaptive to img size. Maybe compare current ksize to length
            original (non-resized vals ksize1=15, iterations=30, ksize=41)
            #update: reduced values, still not adaptive
            
            #Also: thresh value in threshold also not adaptive but works for now
    
    '''
    img_copy = img[:].copy()
    #eroded = cv2.erode(img, None, iterations=10)
    gam = gamma(img, 2.2)
    blur = cv2.GaussianBlur(src=gam, dst=img_copy, ksize=(3, 3), sigmaX=0, 
                            sigmaY=0)
    eroded = cv2.dilate(blur, None, iterations=1)
    #gam = gamma(eroded, 2)
    blur2 = cv2.GaussianBlur(src=eroded, dst=img_copy, ksize=(9,9), sigmaX=0,
                             sigmaY=0)
    thresh_val = np.int(np.mean(blur2))
    ret, threshold_data = cv2.threshold(blur2, 50, 255, cv2.THRESH_BINARY)
    #threshold_data = cv2.adaptiveThreshold(blur2, 255,
                                           #cv2.ADAPTIVE_THRESH_MEAN_C,
                                           #cv2.THRESH_BINARY, 301, 2)                         
    #Create two masked images, one that masks out darker areas, one masks light
    boole = np.bool8(threshold_data)
    light_img = boole * img
    dark_img = img * np.uint8(boole == 0)

    return light_img, dark_img
Exemple #9
0
    def testDefaultFlatAndBackNonIdentical(self):
        """
        Test flattening/unflattening of objects which change type.

        No type requirements are given in these tests. In other words, we allow
        pylabrad to choose a default type for flattening.

        In this test, we do not expect A == unflatten(*flatten(A)). This is
        mostly because list of numbers, both with an without units, should
        unflatten to ndarray or ValueArray, rather than actual python lists.
        """

        def compareValueArrays(a, b):
            """I check near equality of two ValueArrays"""
            self.assertTrue(a.allclose(b))

        tests = [
            ([1, 2, 3], np.array([1, 2, 3], dtype="int32"), np.testing.assert_array_equal),
            ([1.1, 2.2, 3.3], np.array([1.1, 2.2, 3.3], dtype="float64"), np.testing.assert_array_almost_equal),
            (np.array([3, 4], dtype="int32"), np.array([3, 4], dtype="int32"), np.testing.assert_array_equal),
            (np.array([1.2, 3.4]), np.array([1.2, 3.4]), np.testing.assert_array_almost_equal),
            ([Value(1.0, "m"), Value(3.0, "m")], ValueArray([1.0, 3.0], "m"), compareValueArrays),
            ([Value(1.0, "m"), Value(10, "cm")], ValueArray([1.0, 0.1], "m"), compareValueArrays),
            (ValueArray([1, 2], "Hz"), ValueArray([1, 2], "Hz"), compareValueArrays),
            (ValueArray([1.0, 2], ""), np.array([1.0, 2]), np.testing.assert_array_almost_equal),
            # Numpy scalar types
            (np.bool8(True), True, self.assertEqual),
        ]
        for input, expected, comparison_func in tests:
            unflat = T.unflatten(*T.flatten(input))
            if isinstance(unflat, np.ndarray):
                self.assertEqual(unflat.dtype, expected.dtype)
            comparison_func(unflat, expected)
 def eval_query_top(self, query_idx, scores, k=(1, 5, 10, 20, 50, 100)):
     """ Evaluates top-k for a given query.
     """
     if not self.labels: raise NotImplementedError()
     q_label = self.get_query_groundtruth(query_idx, 'label')
     correct = np.bool8([l == q_label for l in self.labels])
     correct = correct[(-scores).argsort()]
     return {k_: float(correct[:k_].any()) for k_ in k if k_ < len(correct)}
    def test_invalid(self):
        prop = bcpp.Float()

        assert not prop.is_valid(1.0 + 1.0j)
        assert not prop.is_valid("")
        assert not prop.is_valid(())
        assert not prop.is_valid([])
        assert not prop.is_valid({})
        assert not prop.is_valid(_TestHasProps())
        assert not prop.is_valid(_TestModel())

        assert not prop.is_valid(np.bool8(False))
        assert not prop.is_valid(np.bool8(True))
        assert not prop.is_valid(np.complex64(1.0 + 1.0j))
        assert not prop.is_valid(np.complex128(1.0 + 1.0j))
        if hasattr(np, "complex256"):
            assert not prop.is_valid(np.complex256(1.0 + 1.0j))
Exemple #12
0
 def test_round_mask(self):
     """Compare output with predicted output. Use non-cube input"""
     reference = numpy.bool8([[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
                              [[0, 1, 1, 0], [0, 1, 1, 0], [0, 1, 1, 0]],
                              [[0, 1, 1, 0], [1, 1, 1, 1], [0, 1, 1, 0]],
                              [[0, 1, 1, 0], [0, 1, 1, 0], [0, 1, 1, 0]],
                              [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]])
     result = tools.round_mask((5, 3, 4), 1.6)
     numpy.testing.assert_equal(reference, result)
Exemple #13
0
def best_split_robust(X, y, weights, subset=None):
    """
    Finds the best split of the data subset for the weighted regression problem.

    Parameters
    ----------
    X : (n_samples,n_features) float array
        Binary features.
    y : (n_samples,) float array
        Response values.
    weights : (n_samples,) float array
        Sample weights.
    subset : integer array
        Indices. All if None.

    Returns
    -------
    out : (integer,float,integer array,float,float,integer_array,float)
        feature, prediction_on, subset_on, sum_of_squares_on, prediction_off, subset_off, sum_of_squares_off
    """

    if subset is not None:  # takes about 50% of runtime, rest is dot and sum (0-padding not an option since subset may be small)
        X = X[subset,]
        y = y[subset]
        weights = weights[subset]
    else:
        subset = np.arange(X.shape[0])
    n_features = X.shape[1]

    # compute counts
    n_features_on = weights.dot(X)
    feature_on_sum = y.dot(X)
    n_features_off = weights.sum() - n_features_on
    feature_off_sum = y.sum() - feature_on_sum

    # compute RSS, up to a global constant
    ii = np.where(n_features_on>0)[0]
    RSS_on = np.zeros(n_features)
    RSS_on[ii] = -feature_on_sum[ii]**2 / n_features_on[ii]
    ii = np.where(n_features_off>0)[0]
    RSS_off = np.zeros(n_features)
    RSS_off[ii] = -feature_off_sum[ii]**2 / n_features_off[ii]
    RSS = RSS_on + RSS_off

    # find best feature
    feature = RSS.argmin()
    if n_features_on[feature]>0:
        prediction_on = feature_on_sum[feature] / n_features_on[feature]
    else:
        prediction_on = 0
    if n_features_off[feature]>0:
        prediction_off = feature_off_sum[feature] / n_features_off[feature]
    else:
        prediction_off = 0
    subset_on, subset_off = subset_split(subset, np.bool8(X[:,feature]))

    return feature, prediction_on, subset_on, RSS_on[feature], prediction_off, subset_off, RSS_off[feature]
Exemple #14
0
def mark_data_missing(X, p):
    """
    X is a dataframe
    p is a vector of probabilities
    """
    assert type(X) is pd.DataFrame
    assert p.shape[0] == X.shape[1]
    mask = np.bool8(np.random.binomial(n=1, p=p, size=X.shape))
    return X.mask(mask)
 def test_int(self):
     self.assert_equal_with_lambda_check(_flexible_type(1), 1)
     self.assert_equal_with_lambda_check(_flexible_type(1L), 1)
     self.assert_equal_with_lambda_check(_flexible_type(True), 1)
     self.assert_equal_with_lambda_check(_flexible_type(False), 0)
     # numpy types
     self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
Exemple #16
0
def query_points_by_runids(data_mms, data_runids, grunid):
    # Get points based on grunids
    overall_mask = np.zeros(len(data_mms))

    for runid in grunid:
        mask = data_runids == runid
        overall_mask = overall_mask + np.int0(mask)

    data_mms_runid = data_mms[np.bool8(overall_mask)]
    return data_mms_runid
Exemple #17
0
def toNumpyScalar(num, dtype=None):
  ''' convert a Python number to an equivalent Numpy scalar type '''
  if isinstance(dtype,np.dtype): 
    num = dtype.type(num)
  else:  
    if isinstance(num, float): num = np.float64(num)
    elif isinstance(num, int): num = np.int64(num)
    elif isinstance(num, bool): num = np.bool8(num)
    else: raise NotImplementedError(num)
  return num
Exemple #18
0
def coordsmasks(event, xy_points, trkkey=False):
    '''This simple script produces masks for coordinates within each contour
       associated with each event
    NOTE: This produces masks for station RAINFALL DATA
    Usage: masks = event.gridmasks(event,xy_points)
          xy_points are the coordinates of the associated list of points
          xy_points[:,0] is lon, xy_points[:,1] is lat
          trkkey=False implies get the masks for all mbskeys
          trkkey='noaa-olr-0-all' implies compute the masks for only
                 that dataset

    Returns: masks (dict) mask['mbs-keys-you-specified'] = {}
    '''
    e = event
    if trkkey == False:
        mbskeys = e.trkarrs.keys()
    elif isinstance(trkkey, str):
        mbskeys = [trkkey]
    elif isinstance(trkkey, list):
        mbskeys = trkkey
    # MAIN LOOP OF METHOD
    masks = {}
    npts = xy_points.shape[0]
    for mbk in mbskeys:
        trkarr = np.int32(e.trkarrs[mbk])
        if trkarr.ndim == 2:
            ntimes = trkarr.shape[0]
            maskarr = np.bool8(np.zeros((ntimes, npts)))
            for t in xrange(ntimes):
                if trkarr[t, 1] != -1:
                    maskarr[t,:] = points_inside_poly(xy_points,\
                                               e.blobs[mbk]['ch'][trkarr[t,1]])

        elif trkarr.ndim == 3:
            ntimes, ntrks = trkarr.shape[0], trkarr.shape[2]
            maskarr = np.bool8(np.zeros((ntimes, n, ntrks)))
            for t in xrange(ntimes):
                for n in xrange(ntrks):
                    if trkarr[t, 1, n] != -1:
                        maskarr[t,:,n] = points_inside_poly(xy_points,\
                                               e.blobs[mbk]['ch'][trkarr[t,1]])
        masks[mbk] = maskarr
    return masks
def loc_rt(roi,locs):  #select dots_in area of restriction  unit:mm
    res=np.bool8(np.ones(len(locs)))
    for i in range(-1,len(roi)-1):  #0.00000001 was added to avoid overfiting
        s=(0-roi[i,0])/(roi[i+1,0]-roi[i,0]+0.00000001)-(0-roi[i,1])/(roi[i+1,1]-roi[i,1]+0.00000001)
        if(s>0):
            jdg=(locs[:,0]-roi[i,0])/(roi[i+1,0]-roi[i,0]+0.00000001)-(locs[:,1]-roi[i,1])/(roi[i+1,1]-roi[i,1]+0.00000001)>0
        else:
            jdg=(locs[:,0]-roi[i,0])/(roi[i+1,0]-roi[i,0]+0.00000001)-(locs[:,1]-roi[i,1])/(roi[i+1,1]-roi[i,1]+0.00000001)<0
        res=res&jdg
    return res
Exemple #20
0
def toNumpyScalar(num, dtype=None):
    ''' convert a Python number to an equivalent Numpy scalar type '''
    if isinstance(dtype, np.dtype):
        num = dtype.type(num)
    else:
        if isinstance(num, float): num = np.float64(num)
        elif isinstance(num, int): num = np.int64(num)
        elif isinstance(num, bool): num = np.bool8(num)
        else: raise NotImplementedError(num)
    return num
 def test_int(self):
     self.assert_equal_with_lambda_check(_flexible_type(1), 1)
     self.assert_equal_with_lambda_check(_flexible_type(long(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(True), 1)
     self.assert_equal_with_lambda_check(_flexible_type(False), 0)
     # numpy types
     self.assert_equal_with_lambda_check(_flexible_type(np.int_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.int16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint64(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint32(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.uint16(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool_(0)), 0)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(1)), 1)
     self.assert_equal_with_lambda_check(_flexible_type(np.bool8(0)), 0)
Exemple #22
0
    def _periodicity(self):

        self._exist_peaks()

        ts = self.state['ts']
        bin_peaks = self.state['peaks']

        ts_peaks = ts[np.bool8(bin_peaks)]
        self.state['Ti'] = ts_peaks[1:] - ts_peaks[:-1]
        return self.state['Ti']
Exemple #23
0
class TestBool(TestCase):
    bools = [True, False, np.bool8(True), np.bool8(False)]
    not_bools = [0, 1, 10, -1, 100, 1000000, int(-1e15), int(1e15),
                 0.1, -0.1, 1.0, 3.5, -2.3e6, 5.5e15, 1.34e-10, -2.5e-5,
                 math.pi, math.e, '', None, float("nan"), float("inf"),
                 -float("inf"), '1', [], {}, [1, 2], {1: 1}, b'good',
                 AClass, AClass(), a_func]

    def test_bool(self):
        b = Bool()

        for v in self.bools:
            b.validate(v)

        for v in self.not_bools:
            with self.assertRaises(TypeError):
                b.validate(v)

        self.assertEqual(repr(b), '<Boolean>')
Exemple #24
0
 def test_int(self):
     self.assertEqual(_flexible_type(1), 1)
     self.assertEqual(_flexible_type(1L), 1)
     self.assertEqual(_flexible_type(True), 1)
     self.assertEqual(_flexible_type(False), 0)
     # numpy types
     self.assertEqual(_flexible_type(np.int_(1)), 1)
     self.assertEqual(_flexible_type(np.int64(1)), 1)
     self.assertEqual(_flexible_type(np.int32(1)), 1)
     self.assertEqual(_flexible_type(np.int16(1)), 1)
     self.assertEqual(_flexible_type(np.uint64(1)), 1)
     self.assertEqual(_flexible_type(np.uint32(1)), 1)
     self.assertEqual(_flexible_type(np.uint16(1)), 1)
     self.assertEqual(_flexible_type(np.bool(1)), 1)
     self.assertEqual(_flexible_type(np.bool(0)), 0)
     self.assertEqual(_flexible_type(np.bool_(1)), 1)
     self.assertEqual(_flexible_type(np.bool_(0)), 0)
     self.assertEqual(_flexible_type(np.bool8(1)), 1)
     self.assertEqual(_flexible_type(np.bool8(0)), 0)
Exemple #25
0
def set_explicit_dtype(x):
    """Force `x` to have a numpy type if it doesn't already have one.

    Parameters
    ----------
    x : numpy-typed object, bool, integer, float
        If not numpy-typed, type is attempted to be inferred. Currently only
        bool, int, and float are supported, where bool is converted to
        np.bool8, integer is converted to np.int64, and float is converted to
        np.float64. This ensures that full precision for all but the most
        extreme cases is maintained for inferred types.

    Returns
    -------
    x : numpy-typed object

    Raises
    ------
    TypeError
        In case the type of `x` is not already set or is not a valid inferred
        type. As type inference can yield different results for different
        inputs, rather than deal with everything, explicitly failing helps to
        avoid inferring the different instances of the same object differently
        (which will cause a failure later on when trying to concatenate the
        types in a larger array).

    """
    if hasattr(x, "dtype"):
        return x

    # "value" attribute is found in basic icecube.{dataclasses,icetray} dtypes
    # such as I3Bool, I3Double, I3Int, and I3String
    if hasattr(x, "value"):
        x = x.value

    # bools are numbers.Integral, so test for bool first
    if isinstance(x, bool):
        return np.bool8(x)

    if isinstance(x, Integral):
        x_new = np.int64(x)
        assert x_new == x
        return x_new

    if isinstance(x, Number):
        x_new = np.float64(x)
        assert x_new == x
        return x_new

    if isinstance(x, string_types):
        x_new = np.string0(x)
        assert x_new == x
        return x_new

    raise TypeError("Type of argument ({}) is invalid: {}".format(x, type(x)))
def back_extract (img):

    gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    trash = gray_img[:].copy()    
    eq_img = cv2.equalizeHist(src=gray_img, dst=trash)
    gammed = gamma(eq_img, gamma=15)
    blur = gammed
    cv2.GaussianBlur(src=gammed, dst=blur, ksize=(35,35), sigmaX=0, sigmaY=0 )         
    cont = cv2.findContours(blur, cv2.RETR_EXTERNAL,
           cv2.CHAIN_APPROX_SIMPLE)[-2]
    areaArray = []
    for i, c in enumerate(cont):
        area = cv2.contourArea(c)
        areaArray.append(area)
    sorteddata = sorted(zip(areaArray, cont), key = lambda x: x[0], 
                        reverse=True)
    largest1 = sorteddata[0][1]
    points1 = np.array([point[0] for point in largest1])
    points2 = [0,0]
    if len(sorteddata) > 1 : #Some images don't have 2 segments 
        largest2 = sorteddata[1][1]
        points2 = np.array([point[0] for point in largest2])
    else: largest2 = np.asarray((0,0))
    blank = np.zeros(shape = gray_img.shape)
    if len(points2) > 2 : #If there're two segments
        filled = cv2.fillPoly(blank, [points1, points2], 1)
    else:
        filled = cv2.fillPoly(blank, [points1], 1)
    boole = ~np.bool8(filled) #inverts so background is 0
    boole = np.uint8(boole)
    masked = gray_img*boole
        ######## Secondary: GrabCut

    mask = np.zeros(img.shape[:2],np.uint8)
    
    bgdModel = np.zeros((1,65),np.float64)
    fgdModel = np.zeros((1,65),np.float64)
    
    rect = (0,0,img.shape[1]-1, len(img)-1)
    
    cv2.grabCut(img,mask,rect,bgdModel,fgdModel,2,cv2.GC_INIT_WITH_RECT)
    mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
    masked2 = img*mask2[:,:,np.newaxis]
    masked2 = cv2.cvtColor(masked2, cv2.COLOR_BGR2GRAY)
    masked = masked2*boole
    
    # Find how much white there is. Integrates into inversion decision later 
    how_mask = masked.size - np.count_nonzero(masked)
    
    cv2.imshow("masked img", masked)  
    cv2.waitKey(0)
    cv2.destroyAllWindows()    
    return masked, how_mask
Exemple #27
0
    def test_parse_to_bool_convertible(self):
        try_to_convert = partial(self._try_to_convert, cv.utils.dumpBool)
        for convertible_true in (True, 1, 64, np.bool(1), np.int8(123), np.int16(11), np.int32(2),
                                 np.int64(1), np.bool_(3), np.bool8(12)):
            actual = try_to_convert(convertible_true)
            self.assertEqual('bool: true', actual,
                             msg=get_conversion_error_msg(convertible_true, 'bool: true', actual))

        for convertible_false in (False, 0, np.uint8(0), np.bool_(0), np.int_(0)):
            actual = try_to_convert(convertible_false)
            self.assertEqual('bool: false', actual,
                             msg=get_conversion_error_msg(convertible_false, 'bool: false', actual))
 def initial(self):
     """ initial part of the transmission loss module
     """
     settings = LisSettings.instance()
     option = settings.options
     if option['TransLoss']:
         TransArea = loadmap('TransArea')
         self.var.TransSub = loadmap('TransSub')
         # downstream area taking into account for transmission loss
         self.var.UpAreaTrans = loadmap('UpAreaTrans')
         # upstream area
         self.var.UpTrans = np.where(self.var.UpAreaTrans >= TransArea,
                                     np.bool8(1), np.bool8(0))
         # Downstream taking into accound for transmission loss
         # if upstream area (the total one) is bigger than a threshold us
         # transmission loss
         self.var.TransPower1 = loadmap('TransPower1')
         self.var.TransPower2 = 1.0 / self.var.TransPower1
         # transmission loss function
         maskinfo = MaskInfo.instance()
         self.var.TransCum = maskinfo.in_zero()
Exemple #29
0
 def _read_image(name):
     """Read an image from a file_handle"""
     if name == "image":
         if file_handle["phased"][0]:
             image = _numpy.squeeze(file_handle['real'][...] + 1.j*file_handle['imag'][...])
         else:
             image = _numpy.real(_numpy.squeeze(file_handle['real'][...]))
     elif name == "mask":
         image = _numpy.bool8(_numpy.squeeze(file_handle["mask"][...]))
     else:
         raise ValueError("Can not load {0}.".format(name))
     return image
    def __init__(
            self,
            Gain=0.3,
            MaxMinorIter=100,
            NCPU=6,
            CycleFactor=2.5,
            FluxThreshold=None,
            RMSFactor=3,
            PeakFactor=0,
            GD=None,
            SearchMaxAbs=1,
            CleanMaskImage=None,
            ImagePolDescriptor=["I"],
            ModelMachine=None,
            **kw  # absorb any unknown keywords arguments into this
    ):
        self.SearchMaxAbs = SearchMaxAbs
        self.ModelImage = None
        self.MaxMinorIter = MaxMinorIter
        self.NCPU = NCPU
        self.MaskArray = None
        self.GD = GD
        self.MultiFreqMode = (self.GD["Freq"]["NBand"] > 1)
        self.NFreqBand = self.GD["Freq"]["NBand"]
        self.FluxThreshold = FluxThreshold
        self.CycleFactor = CycleFactor
        self.RMSFactor = RMSFactor
        self.PeakFactor = PeakFactor
        self.GainMachine = ClassGainMachine.ClassGainMachine(GainMin=Gain)
        if ModelMachine is None:
            import ClassModelMachineHogbom as ClassModelMachine
            self.ModelMachine = ClassModelMachine.ClassModelMachine(
                self.GD, GainMachine=self.GainMachine)
        else:
            self.ModelMachine = ModelMachine
        self.GainMachine = self.ModelMachine.GainMachine
        self.GiveEdges = GiveEdges.GiveEdges
        self._niter = 0
        if CleanMaskImage is not None:
            print >> log, "Reading mask image: %s" % CleanMaskImage
            MaskArray = image(CleanMaskImage).getdata()
            nch, npol, _, _ = MaskArray.shape
            self._MaskArray = np.zeros(MaskArray.shape, np.bool8)
            for ch in range(nch):
                for pol in range(npol):
                    self._MaskArray[ch, pol, :, :] = np.bool8(
                        1 - MaskArray[ch, pol].T[::-1].copy())[:, :]
            self.MaskArray = self._MaskArray[0]
        self._peakMode = "normal"

        self.CurrentNegMask = None
        self._NoiseMap = None
        self._PNRStop = None  # in _peakMode "sigma", provides addiitonal stopping criterion
 def readExternalMaskFromFits(self):
     CleanMaskImage = self.GD["Mask"]["External"]
     if not CleanMaskImage: return
     print("  Reading mask image: %s" % CleanMaskImage, file=log)
     MaskImage = image(CleanMaskImage).getdata()
     nch, npol, _, _ = MaskImage.shape
     MaskArray = np.zeros(MaskImage.shape, np.bool8)
     for ch in range(nch):
         for pol in range(npol):
             MaskArray[ch, pol, :, :] = np.bool8(
                 MaskImage[ch, pol].T[::-1].copy())[:, :]
     self.ExternalMask = MaskArray
Exemple #32
0
def execute(positions, num_particles, num_frames):
    #Get host positions:
    cpuPos = numpy.array(positions, dtype=numpy.float32)
    #Allocate position space on device:
    devPos = cuda.mem_alloc(cpuPos.nbytes)
    #Copy positions:
    cuda.memcpy_htod(devPos, cpuPos)

    #Allocate device velocities:
    devVels = cuda.mem_alloc(2 * num_particles * numpy.float32().nbytes)
    cuda.memset_d32(devVels, 0, 2 * num_particles)
    # #Copy velocities:
    # cuda.memcpy_htod(devVels, cpuVels)

    #Allocate and initialize device in bounds to false:
    #inBounds = numpy.zeros(num_particles, dtype=bool)
    devInBounds = cuda.mem_alloc(num_particles * numpy.bool8().nbytes)
    cuda.memset_d8(devInBounds, True, num_particles)

    # inB = numpy.zeros(num_particles, dtype=numpy.bool)
    # cuda.memcpy_dtoh(inB, devInBounds)
    # print inB

    # cuda.memcpy_htod(devInBounds, inBounds)
    # numBlocks = 1#(num_particles // 512) + 1;
    grid_dim = ((num_particles // NUM_THREADS) + 1, 1)
    print grid_dim
    runframe = module.get_function("runframe")
    frames = [None] * num_frames
    for i in range(num_frames):
        runframe(devPos,
                 devVels,
                 devInBounds,
                 numpy.int32(num_particles),
                 grid=grid_dim,
                 block=(NUM_THREADS, 1, 1))
        #Get the positions from device:
        cuda.memcpy_dtoh(cpuPos, devPos)
        frames[i] = cpuPos.copy()
        #frames[i] = copy(cpuPos)
        #write_frame(out, cpuPos, num_particles)

    #Simulation destination file:
    # out = open(OUTPUT_FILE, 'w')
    # write_header(out, num_particles)
    # for frame in frames:
    #     write_frame(out, frame, num_particles)

    #clean up...
    #out.close()
    devPos.free()
    devVels.free()
    devInBounds.free()
Exemple #33
0
 def __init__(self, recordingPath):
     self.recordingPath = recordingPath
     
     depName = 'image_' + str(1) + '_dep.png'
     depPath = os.path.join(self.recordingPath, depName)
     dep = imread(depPath, -1)
     self.videoSize = (dep.shape[1], dep.shape[0])
     
     self.mask = ~np.bool8(imread(os.path.join(recordingPath, 'mask.png'), -1))
     self.flatTable = buildMinMap(os.path.join(recordingPath, 'table'))
     
     self.img = np.zeros((dep.shape[0], dep.shape[1],3 ), 'uint8')
     self.dep = np.zeros(dep.shape, 'uint8')
Exemple #34
0
 def _read_image(name):
     """Read an image from a file_handle"""
     if name == "image":
         if file_handle["phased"][0]:
             image = _numpy.squeeze(file_handle['real'][...] +
                                    1.j * file_handle['imag'][...])
         else:
             image = _numpy.real(_numpy.squeeze(file_handle['real'][...]))
     elif name == "mask":
         image = _numpy.bool8(_numpy.squeeze(file_handle["mask"][...]))
     else:
         raise ValueError("Can not load {0}.".format(name))
     return image
Exemple #35
0
 def setMaskMachine(self, MaskMachine):
     self.MaskMachine = MaskMachine
     if self.MaskMachine.ExternalMask is not None:
         print("Applying external mask", file=log)
         MaskArray = self.MaskMachine.ExternalMask
         nch, npol, _, _ = MaskArray.shape
         self._MaskArray = np.zeros(MaskArray.shape, np.bool8)
         for ch in range(nch):
             for pol in range(npol):
                 self._MaskArray[ch, pol, :, :] = np.bool8(
                     1 - MaskArray[ch, pol].copy())[:, :]
         self._MaskArray = np.ascontiguousarray(self._MaskArray)
         self.MaskArray = np.ascontiguousarray(self._MaskArray[0])
Exemple #36
0
def execute(positions, num_particles, num_frames):
    #Get host positions:
    cpuPos = numpy.array(positions, dtype=numpy.float32)
    #Allocate position space on device:
    devPos = cuda.mem_alloc(cpuPos.nbytes)
    #Copy positions:
    cuda.memcpy_htod(devPos, cpuPos)
    
    #Allocate device velocities:
    devVels = cuda.mem_alloc(2 * num_particles * numpy.float32().nbytes)
    cuda.memset_d32(devVels, 0, 2 * num_particles)
    # #Copy velocities:
    # cuda.memcpy_htod(devVels, cpuVels)
    
    #Allocate and initialize device in bounds to false:
    #inBounds = numpy.zeros(num_particles, dtype=bool)
    devInBounds = cuda.mem_alloc(num_particles * numpy.bool8().nbytes)
    cuda.memset_d8(devInBounds, True, num_particles)
    
    # inB = numpy.zeros(num_particles, dtype=numpy.bool)
    # cuda.memcpy_dtoh(inB, devInBounds)
    # print inB
    
    # cuda.memcpy_htod(devInBounds, inBounds)
    # numBlocks = 1#(num_particles // 512) + 1;
    grid_dim = ((num_particles // NUM_THREADS) + 1, 1)
    print grid_dim
    runframe = module.get_function("runframe")
    frames = [None] * num_frames
    for i in range(num_frames):
        runframe(devPos, devVels, devInBounds, 
                 numpy.int32(num_particles),
                 grid=grid_dim,
                 block=(NUM_THREADS, 1, 1))
        #Get the positions from device:
        cuda.memcpy_dtoh(cpuPos, devPos)
        frames[i] = cpuPos.copy()
        #frames[i] = copy(cpuPos)
        #write_frame(out, cpuPos, num_particles)
    
    #Simulation destination file:
    # out = open(OUTPUT_FILE, 'w')
    # write_header(out, num_particles)
    # for frame in frames:
    #     write_frame(out, frame, num_particles)
    
    #clean up...
    #out.close()
    devPos.free()
    devVels.free()
    devInBounds.free()
Exemple #37
0
def get_traveled(courses):
    """ Return indices when travelling along courses. """
    # turn indices into points array
    height, width = courses.shape
    indices = (np.arange(height).repeat(width),
               np.tile(np.arange(width), height))
    points = np.array(indices).transpose()

    # determine direction and apply offset
    encode = courses[indices][:, np.newaxis]     # which codes
    select = np.bool8(encode & NUMBERS)          # which courses
    target = points + OFFSETS[select.argmax(1)]  # apply offsets

    return tuple(target.transpose())             # return tuple
Exemple #38
0
def subj_2_include(subj_main_folder, file_name):
    if len(subj_main_folder[0]) == 1:
        subj_list = glob.glob(f'{subj_main_folder}{os.sep}*{os.sep}')
    else:
        subj_list = subj_main_folder
    subj_idx = np.zeros(len(subj_list))

    for i in range(len(subj_idx)):
        if file_name + '.nii' in os.listdir(subj_list[i]):
            subj_idx[i] = True
        else:
            subj_idx[i] = False

    return np.bool8(subj_idx)
Exemple #39
0
def dft_2d_masked(y_side, x_side, mask_real, mask_fourier):
    """
    The dft matrix that is returnd works on complex vectors
    and returns a complex vector. Data is stored consistent with
    numpys flatten(). Only the cols and rows corresponding to pixels
    in the real and Fourier mask respectively are calculated.
    """
    o_1 = _numpy.exp(-2.0j * _numpy.pi / y_side)
    o_2 = _numpy.exp(-2.0j * _numpy.pi / x_side)
    i = _numpy.zeros(x_side * y_side)
    j = _numpy.zeros(x_side * y_side)
    for k in xrange(y_side):
        j[x_side * k : x_side * (k + 1)] = _numpy.arange(x_side)
    for k in xrange(x_side):
        i[k::x_side] = _numpy.arange(y_side)
    i_mask_real = i[_numpy.bool8(mask_real.flatten())]
    i_mask_fourier = i[_numpy.bool8(mask_fourier.flatten())]
    j_mask_real = j[_numpy.bool8(mask_real.flatten())]
    j_mask_fourier = j[_numpy.bool8(mask_fourier.flatten())]
    dft = o_1 ** (i_mask_real[:, _numpy.newaxis] * i_mask_fourier[_numpy.newaxis, :]) * o_2 ** (
        j_mask_real[:, _numpy.newaxis] * j_mask_fourier[_numpy.newaxis, :]
    )
    return dft
Exemple #40
0
def get_traveled(courses):
    """ Return indices when travelling along courses. """
    # turn indices into points array
    height, width = courses.shape
    indices = (np.arange(height).repeat(width),
               np.tile(np.arange(width), height))
    points = np.array(indices).transpose()

    # determine direction and apply offset
    encode = courses[indices][:, np.newaxis]  # which codes
    select = np.bool8(encode & NUMBERS)  # which courses
    target = points + OFFSETS[select.argmax(1)]  # apply offsets

    return tuple(target.transpose())  # return tuple
 def get_res(self):
     point_cla = self.point_act > 0
     Yb = np.bool8(self.Y)
     print('Results:')
     print('-------------------------------------------------')
     print('Number of red points classified as red:  ',
           np.sum(point_cla[Yb] == Yb[Yb]))
     print('Number of red points classified as blue: ',
           np.sum(point_cla[Yb] != Yb[Yb]))
     print('Number of blue points classified as blue:',
           np.sum(point_cla[~Yb] == Yb[~Yb]))
     print('Number of blue points classifies as red: ',
           np.sum(point_cla[~Yb] != Yb[~Yb]))
     print('-------------------------------------------------')
def test_make_figure():
    curdir = os.path.dirname(__file__)
    hdulist = fits.open('%s/../../tests/data/slice.fits' % (curdir, ))
    image = hdulist[0].data
    segmap = hdulist[1].data
    mask = np.bool8(hdulist[2].data)
    gain = 1.0
    source_morphs = statmorph.source_morphology(image,
                                                segmap,
                                                mask=mask,
                                                gain=gain)
    morph = source_morphs[0]
    fig = make_figure(morph)
    assert isinstance(fig, matplotlib.figure.Figure)
def test_joint_plot():
    walk = 'walk0'
    diff = np.random.random((101, 2))
    mov = np.bool8(np.arange(101))
    mov[np.random.randint(0, 101, 50)] = False

    joint = 'Cadera'
    X = np.linspace(-np.pi, np.pi, 101)
    angles = np.array((np.sin(X) * 10, -np.sin(X) * 10, np.tan(X) * 10))
    spt = np.random.random(6)

    # Graficar tabla espaciotemporal
    plotter = representation.Plotter(config)
    table = plotter.new_table_plot()
    for __ in range(10):
        table.add_cycle(range(6))
    table.build_table()
    table.save()

    # Graficar tabla espaciotemporal con texto
    plotter = representation.Plotter(config)
    table = plotter.new_table_plot()
    for __ in range(10):
        table.add_cycle(range(6))
    table.build_table()
    table.save(withtext=True)

    # Graficar cinemática de una articulacion
    plotter = representation.Plotter(config)
    ax = plotter.new_joint_plot(joint)
    ax.add_cycle(angles[0], 65)
    ax.save()

    # Agregar texto a un gráfico
    plotter = representation.Plotter(config)
    ax = plotter.new_joint_plot(joint)
    ax.save(withtext=True)

    # Graficar cycler
    plotter = representation.Plotter(config)
    ax = plotter.new_cycler_plot('walk0')
    ax.plot_cycler_out(diff, mov)
    ax.save()

    # Ploteo global
    plotter = representation.Plotter(config).auto()
    plotter.add_cycle('idd', spt, angles, withlabels=True)
    plotter.add_cycler(walk, diff, mov)
    plotter.saveplots(withtext=True)
Exemple #44
0
def get_traveled(indices, courses, unique):
    """ Return indices when travelling along courses. """
    # turn indices into points array
    points = np.array(indices).transpose()[:, np.newaxis, :]  # make points

    # determine uphill directions and apply offsets
    encode = courses[indices][:, np.newaxis]  # which codes
    select = np.bool8(encode & NUMBERS)[..., np.newaxis]  # which courses
    target = (points + select * OFFSETS).reshape(-1, 2)  # apply offsets

    if unique:
        target = np.unique(np.ascontiguousarray(target).view(DTYPE)).view(
            target.dtype).reshape(-1, 2)

    return tuple(target.transpose())  # return tuple
Exemple #45
0
def dft_2d_masked(y_side, x_side, mask_real, mask_fourier):
    """
    The dft matrix that is returnd works on complex vectors
    and returns a complex vector. Data is stored consistent with
    numpys flatten(). Only the cols and rows corresponding to pixels
    in the real and Fourier mask respectively are calculated.
    """
    o_1 = _numpy.exp(-2.j * _numpy.pi / y_side)
    o_2 = _numpy.exp(-2.j * _numpy.pi / x_side)
    i = _numpy.zeros(x_side * y_side)
    j = _numpy.zeros(x_side * y_side)
    for k in range(y_side):
        j[x_side * k:x_side * (k + 1)] = _numpy.arange(x_side)
    for k in range(x_side):
        i[k::x_side] = _numpy.arange(y_side)
    i_mask_real = i[_numpy.bool8(mask_real.flatten())]
    i_mask_fourier = i[_numpy.bool8(mask_fourier.flatten())]
    j_mask_real = j[_numpy.bool8(mask_real.flatten())]
    j_mask_fourier = j[_numpy.bool8(mask_fourier.flatten())]
    dft = (o_1**(i_mask_real[:, _numpy.newaxis] *
                 i_mask_fourier[_numpy.newaxis, :]) *
           o_2**(j_mask_real[:, _numpy.newaxis] *
                 j_mask_fourier[_numpy.newaxis, :]))
    return dft
    def segmentacion(self):
        # semilla de centroides
        book = np.array([np.min(self.features, 0), np.max(self.features, 0)])

        # segmento imagen
        self.centroids, dist = kmeans(self.features, book)
        code, dist = vq(self.features, self.centroids)
        self.imgSeg = np.bool8(code.reshape(self.imgShape))

        # elijo la clase que tenga menos pixeles como la de los dados
        if np.prod(self.imgShape) < 2 * self.imgSeg.sum():
            # es que se eligio el fondo como True. lo invierto
            self.imgSeg = np.uint8(~ self.imgSeg)
        else:
            self.imgSeg = np.uint8(self.imgSeg)
Exemple #47
0
def get_traveled(indices, courses, unique):
    """ Return indices when travelling along courses. """
    # turn indices into points array
    points = np.array(indices).transpose()[:, np.newaxis, :]  # make points

    # determine uphill directions and apply offsets
    encode = courses[indices][:, np.newaxis]                   # which codes
    select = np.bool8(encode & NUMBERS)[..., np.newaxis]       # which courses
    target = (points + select * OFFSETS).reshape(-1, 2)        # apply offsets

    if unique:
        target = np.unique(
            np.ascontiguousarray(target).view(DTYPE)
        ).view(target.dtype).reshape(-1, 2)

    return tuple(target.transpose())                           # return tuple
Exemple #48
0
def get_look_up_table():
    """ Create and return look-up-table. """
    # resultant vectors
    encode = np.arange(256, dtype='u1')[:, np.newaxis]    # which courses
    select = np.bool8(encode & NUMBERS)[..., np.newaxis]  # which numbers
    result = (select * VECTORS).sum(1)[:, np.newaxis, :]  # what resultant

    # select courses with the highest dotproduct and
    common = (result * VECTORS).sum(2)                    # best direction
    fitted = np.where(
        common.any(1),                                    # any common?
        (common * select[..., 0]).argmax(1),              # select best
        select[..., 0].argmax(1),                         # select any
    )
    mapped = NUMBERS[0, fitted]                           # mapping
    mapped[0] = 0
    return mapped
    def process(self, src, **kwargs):
        sw = SW('Optical Flow')
        
        frame_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
        p0 = self.p0

        # calculate optical flow
        p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_gray, frame_gray, p0, None, 
                                               winSize  = (15,15),
                                               maxLevel = 2,
                                               criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
        
        # Select good points
        good_new = p1[st==1]
        good_old = p0[st==1]
        rstmsk = np.zeros(good_new.shape[0], dtype=np.bool8)

        for i,pt in enumerate(good_new):
            rstmsk[i] = np.bool8(math.sqrt((pt[0]-self.center.x)**2+(pt[1]-self.center.y)**2)<=(self.radius+SELECTPADDING))
        
        good_new = good_new[rstmsk]
        good_old = good_old[rstmsk]
        
        if (good_new.shape[0]*2)<self.nump0:
            raise OpticalFlow.ObjectMissError
        
        tmp = np.average(good_new, axis=0)
        self.center = util.Point(int(tmp[0]),int(tmp[1]))
        
        dst = src.copy()
        
        try:
            self.oflines # 光流轨迹线
        except AttributeError:
            self.oflines = np.zeros_like(src, dtype=np.uint8)
        
        for n,o in zip(good_new, good_old):
            cv2.circle(dst,tuple(n),3,OBJECT_MATCH_COLOR,-1)# filled circle
            cv2.line(self.oflines,tuple(n),tuple(o),OBJECT_MATCH_COLOR,3)# line
        self.old_gray = frame_gray
        self.p0 = good_new.reshape(-1,1,2)
        
        sw.stop()
        return dst, [self.center], cv2.add(self.oflines,src)
Exemple #50
0
def radial_average(image, mask=None):
    """Calculates the radial average of an array of any shape,
    the center is assumed to be at the physical center."""
    if mask is None:
        mask = _numpy.ones(image.shape, dtype="bool8")
    else:
        mask = _numpy.bool8(mask)
    axis_values = [_numpy.arange(l) - l / 2.0 + 0.5 for l in image.shape]
    radius = _numpy.zeros((image.shape[-1]))
    for i in range(len(image.shape)):
        radius = radius + (axis_values[-(1 + i)][(slice(0, None),) + (_numpy.newaxis,) * i]) ** 2
    radius = _numpy.int32(_numpy.sqrt(radius))
    number_of_bins = radius[mask].max() + 1
    radial_sum = _numpy.zeros(number_of_bins)
    weight = _numpy.zeros(number_of_bins)
    for value, this_radius in zip(image[mask], radius[mask]):
        radial_sum[this_radius] += value
        weight[this_radius] += 1.0
    radial_sum[weight > 0] /= weight[weight > 0]
    radial_sum[weight == 0] = _numpy.nan
    return radial_sum
Exemple #51
0
def cluster_withsubsets(spike_table, reorder_clus=True):
    if reorder_clus:
        print "Cluster reordering not implemented!"
    ST_nc = np.bool8(spike_table.cols.channel_mask[:])
    Fet_nc3 = spike_table.cols.fet[:]

    # TODO: implement this and remove the raise exception
    raise NotImplementedError(
        "To use cluster_withsubsets you will need to implement some code to find the groups from the probe graph.")
    # m these are all 4-channel subsets to be computed (based on probe's
    # topology)
    ChSubsets = probes.SORT_GROUPS

    # m for each subset  - the consecutive numbers of spikes that are relevant
    # (?)
    SpkSubsets = spike_subsets(ST_nc, ChSubsets)
    print "%i subsets total" % len(SpkSubsets)
    # m _FPC is no. of features per channel
    n_spikes, n_ch, _FPC = Fet_nc3.shape

# for i_subset,ChHere,SpkHere in zip(it.count(), ChSubsets, SpkSubsets):   #m SpkHere - the consecutive numbers of spikes belonging to this subset
#        print("Sorting channels %s"%ChHere.__repr__())
# FetHere_nc3 = Fet_nc3[np.ix_(SpkHere, ChHere)] #m features of spikes in this subset
# m FetHere_nc3 is a 3D array of size (no. of spikes in this subset) x 4(subsets are of 4 channels) x 3 (no. of features per channel)
#        CluArr = klustakwik_cluster(FetHere_nc3, i_subset, ChHere, SpkHere)
#        print 'KlustaKwik returned', max(CluArr), 'clusters.'

    args = []
    # m SpkHere - the consecutive numbers of spikes belonging to this subset
    for i_subset, ChHere, SpkHere in zip(it.count(), ChSubsets, SpkSubsets):
        print("Sorting channels %s" % ChHere.__repr__())
        # m features of spikes in this subset
        FetHere_nc3 = Fet_nc3[np.ix_(SpkHere, ChHere)]
        # m FetHere_nc3 is a 3D array of size (no. of spikes in this subset) x
        # 4(subsets are of 4 channels) x 3 (no. of features per channel)
        args.append((FetHere_nc3, i_subset, ChHere, SpkHere))
        #CluArr = klustakwik_cluster(FetHere_nc3, i_subset, ChHere, SpkHere)
        # print 'KlustaKwik returned', max(CluArr), 'clusters.'
    pool = multiprocessing.Pool(NUMPROCESSES)
    pool.map(klustakwik_cluster_args, args)
def back_extract (img):
    '''
    Attempts to find the background and turn it black. Equalizes the histogram,
    boosts gamma way up to 15 such that the only contour is the seal (usually),
    blurs, then finds that contour, builds a filled polygon from the point, 
    and then multiplies the (inverted) boolean values by the original image 
    such that the background (black, 0) turns all corresponding background 
    pixels in the original black as well.
    
    Requires: cv2, numpy as np
    '''
    trash = img[:].copy()    
    eq_img = cv2.equalizeHist(src=img, dst=trash)
    gammed = gamma(eq_img, gamma=15)
    blur = gammed
    cv2.GaussianBlur(src=gammed, dst=blur, ksize=(35,35), sigmaX=0, sigmaY=0 )         
    cont = cv2.findContours(blur, cv2.RETR_EXTERNAL,
           cv2.CHAIN_APPROX_SIMPLE)[-2]
    areaArray = []
    for i, c in enumerate(cont):
        area = cv2.contourArea(c)
        areaArray.append(area)
    sorteddata = sorted(zip(areaArray, cont), key = lambda x: x[0], 
                        reverse=True)
    largest1 = sorteddata[0][1]
    points1 = np.array([point[0] for point in largest1])
    points2 = [0,0]
    if len(sorteddata) > 1 : #Some images don't have 2 segments 
        largest2 = sorteddata[1][1]
        points2 = np.array([point[0] for point in largest2])
    else: largest2 = np.asarray((0,0))
    blank = np.zeros(shape = img.shape)
    if len(points2) > 2 : #If there're two segments
        filled = cv2.fillPoly(blank, [points1, points2], 1)
    else:
        filled = cv2.fillPoly(blank, [points1], 1)
    boole = -np.bool8(filled) #inverts so background is 0
    boole = np.uint8(boole)
    masked = img*boole
    return masked    
Exemple #53
0
def svmPlotExtrRep(event=0,plot=True,suf=''):
    from Pixel import initPath
    if plot: plt.close()
    P=32;F=34
    dat=[]
    for vp in range(1,5):
        path,inpath,figpath=initPath(vp,event)
        fn= inpath+'svm%s/hc/hcWorker'%suf
        dat.append([])
        for g in range(2):
            for k in range(4):
                try:temp=np.load(fn+'%d.npy'%(k*2+g))
                except IOError:
                    print 'File missing: ',vp,event,suf
                    temp=np.zeros(P*P*F,dtype=np.bool8)
                temp=np.reshape(temp,[P,P,F])
                dat[-1].append(np.bool8(g-1**g *temp))
    lbl=[]
    for i in range(4):lbl.append([FIG[7][0]+str(i+1),20,18+i*40,FIG[7][1]])
    lbl.append([FIG[7][2],20,-10,70]);lbl.append([FIG[7][3],20,-10,245])
    if plot: plotGifGrid(dat,fn=figpath+'svm%sExtremaE%d'%(suf,event)+FMT,
                         F=34,P=32,text=lbl,bcgclr=0.5)
    return dat
Exemple #54
0
def calculate_flow_direction(values):
    """
    Single neighbour: Encode directly
    Multiple neighbours:
    - Zero drop: Resolve later, iteratively
    - Nonzero drop: Resolve immediately using look-up table
    """
    # output
    direction = np.zeros_like(values, dtype='u1')

    # calculation of drop per neighbour cell
    factor = np.zeros((3, 3))
    factor[INDICES] = WEIGHTS[0]

    best_drop = np.zeros_like(values)

    # assign directions based on zero or positive drops
    for i, j in zip(*factor.nonzero()):
        kernel = np.zeros((3, 3))
        kernel[i, j] = -factor[i, j]
        kernel[1, 1] = +factor[i, j]

        this_drop = ndimage.correlate(values, kernel)

        # same drops add to the direction
        same_drop = (this_drop == best_drop)
        direction[same_drop] += COURSES[i, j]

        # better drops replace the direction
        more_drop = this_drop > best_drop
        direction[more_drop] = COURSES[i, j]
        best_drop[more_drop] = this_drop[more_drop]

    # use look-up-table to eliminate multi-directions for positive drops:
    lut = get_look_up_table()
    some_drop = (best_drop > 0)
    direction[some_drop] = lut[direction[some_drop]]

    # assign outward to edges
    direction[0, -1] = 1
    direction[1:-1, -1] = 2
    direction[-1, -1] = 4
    direction[-1, 1:-1] = 8
    direction[-1, 0] = 16
    direction[1:-1, 0] = 32
    direction[0, 0] = 64
    direction[0, 1:-1] = 128

    # iterate to solve undefined directions where possible
    kwargs = {'structure': np.ones((3, 3))}

    while True:
        undefined = ~np.in1d(direction, NUMBERS).reshape(direction.shape)
        edges = undefined - ndimage.binary_erosion(undefined, **kwargs)

        t_index1 = edges.nonzero()
        direction1 = direction[t_index1][:, np.newaxis]

        # find neighbour values
        t_index8 = get_neighbours(t_index1)
        direction8 = direction[t_index8].reshape(-1, 8)

        # neighbour must be in encoded direction
        b_index8a = np.bool8(direction1 & NUMBERS)
        # neighbour must have a defined flow direction
        b_index8b = np.in1d(direction8, NUMBERS).reshape(b_index8a.shape)
        # that direction must not point towards the cell to be defined
        b_index8c = direction8 != INVERSE
        # combined index
        b_index8 = np.logical_and.reduce([b_index8a, b_index8b, b_index8c])

        if not b_index8.any():
            break

        argmax = np.argmax(b_index8, axis=1)
        nonzero = b_index8.any(axis=1)
        superindex = tuple([t_index1[0][nonzero], t_index1[1][nonzero]])
        direction[superindex] = NUMBERS[0, argmax[nonzero]]

    # set still undefined directions (complex depressions) to zero
    direction[~np.in1d(direction, NUMBERS).reshape(direction.shape)] = 0
    return direction
    phase_alg = _spimage.sp_phasing_er_alloc(_spimage.SpNoConstraints)

    sup_alg = _spimage.sp_support_array_init(_spimage.sp_support_static_alloc(), 20)

    # create phaser
    phaser = _spimage.sp_phaser_alloc()
    _spimage.sp_phaser_init(phaser, phase_alg, sup_alg, _spimage.SpEngineCUDA)
    _spimage.sp_phaser_set_amplitudes(phaser, amplitudes)
    _spimage.sp_phaser_init_model(phaser, real_space, 0)
    _spimage.sp_phaser_init_support(phaser, support, 0, 0)

    #real_space_s = _spimage.sp_image_shift(real_space)
    fourier_space = _spimage.sp_image_ifftw3(real_space)

    ereal_start = _numpy.sqrt((abs(real_space.image[~_numpy.bool8(support.image)])**2).sum() / (abs(real_space.image)**2).sum())
    efourier_start = _numpy.sqrt(((abs(fourier_space.image[_numpy.bool8(amplitudes.mask)]) - abs(amplitudes.image[_numpy.bool8(amplitudes.mask)]))**2).sum() / ((abs(amplitudes.image[_numpy.bool8(amplitudes.mask)])**2).sum() + (abs(fourier_space.image[~_numpy.bool8(amplitudes.mask)])**2).sum()))

    _spimage.sp_phaser_iterate(phaser, options.number_of_iterations)

    model_out = _spimage.sp_phaser_model(phaser)
    support_out = _spimage.sp_phaser_support(phaser)
    fmodel_out = _spimage.sp_phaser_fmodel(phaser)
    real_space_end = _spimage.sp_phaser_model_before_projection(phaser)
    fourier_space_end = _spimage.sp_phaser_fmodel(phaser)

    ereal_end = _numpy.sqrt((abs(real_space_end.image[~_numpy.bool8(support.image)])**2).sum() / (abs(real_space_end.image)**2).sum())
    efourier_end = _numpy.sqrt(((abs(fourier_space_end.image[_numpy.bool8(amplitudes.mask)]) - abs(amplitudes.image[_numpy.bool8(amplitudes.mask)]))**2).sum() / ((abs(amplitudes.image[_numpy.bool8(amplitudes.mask)])**2).sum() + (abs(fourier_space_end.image[~_numpy.bool8(amplitudes.mask)])**2).sum()))

    _spimage.sp_image_write(model_out, "%s/real_space-%s.h5" % (options.output_dir, options.output_affix), 0)
    _spimage.sp_image_write(support_out, "%s/support-%s.h5" % (options.output_dir, options.output_affix), 0)
def funarrayscalar():
    import numpy
    return numpy.complex64(2+3j), numpy.float32(1.), numpy.int8(123), numpy.bool8(True)
Exemple #57
0
    fnames.sort()
    fnames = [f for f in fnames if f.find('image_') >= 0]
    n = len(fnames)/2

    # Store some values in order to keep track of FPS
    if (showFPS):
        startTime = time()
        FPS = 0
        lastI = 0

    # Get our plot points ready
    timePoints = [[], []]
    plotPoints = [[[], [], []], [[], [], []]]

    # Create the mask and table model
    mask = ~np.bool8(cv2.imread(os.path.join(folder, 'mask.png'), -1))
    tablemodel = util.buildMinMap(os.path.join(folder, 'table'))

    i = 0
    waitAmount = 5
    
    handList = None
    camShifter = None
    colors = None

    # Loop until we are out of images
    while (i < n):
        
        print "Processing Frame ", i

        # Show the FPS if desired
 def get_dense(self):
     """
     Return the dense reppresentations of the connections matrix.
         Don't use this function in a loop, don't use this function in a loop, but execute the conversion to a dense matix before the loop.
     """
     return np.bool8( self.__S.todense() )
Exemple #59
0
def analyze_tweet_emojis(tweet):
    has_emoji=False
    original_text=tweet.text
    text=emoji_split(original_text)
    emjText=np.array([(emcode, len(re.findall(emcode,text))) for emcode in emj_codes\
                      if (len(re.findall(emcode,text)) > 0)])

    if len(emjText) >0:
        print(tweet.text)
        has_emoji=True
        mostFreqWord, mostFreqWordCount = count_words(text)
        newlineCount= text.count('\n')
        #create arrays to save in SQL. Sorted by frequency
        emojiLabel=emjText[np.argsort(emjText[:, 1])[::-1]][:,0] 
        emojiCount=np.array(emjText[np.argsort(emjText[:, 1])[::-1]][:,1], dtype=int)
        emojiTypes=len(emojiCount)
        emojiCountSum=sum(emojiCount)
        surrounding_text=surroundingText(text,emojiLabel) #sorted by frequency
        prev_word=surrounding_text[:,1]
        next_word=surrounding_text[:,2]
        prev_sentence=surrounding_text[:,3]
        next_sentence=surrounding_text[:,4]

        #build array of emoji strings
        emj_str = np.array([(emj_str, int(len(emj_str)/2)) for emj_str in sum([''.join([word if word in emj_codes+[' '] \
        else 'T' for word in emoji_split_line(line).split()]).rsplit('T') for line in text.split('\n')],[]) if emj_str != ''])
        
        #analyze emoji strings, cut away length 1 emojis and call new array a:
        if len(emj_str)==0:
            emojistrLabel,emojistrCount,emojistrLen,emojistrTypes,emojistr_prev_word,emojistr_next_word,\
            emojistr_prev_sentence,emojistr_next_sentence,emojiPatternLabel,emojiPatternCount,emojiPatternLen,emojiPatternTypes=\
            [],[],[],0,[],[],[],[],[],[],[],0
        else: #try to find strings, but first filter length 1 and those with length 2(with skin codes)
            d=collections.defaultdict(lambda:0)
            for key in emj_str[:,0]:
                d[key]+=1
            a=np.array([d.values(),d.keys(),[int(len(key)/2) for key in d.keys()]])
            #remove single emojis and double if skin code is included:
            skin_cut=~np.bool8(((np.int32(a[2,:]))==2) & (np.array([sum([len(re.findall(emcode,val)) for emcode in emj_codes_skin]) for val in a[1,:]])))
            multi_cut=(np.int32(a[2,:])>1) & skin_cut
            a=a[:,multi_cut]

            if len(a[0])==0:
                emojistrLabel,emojistrCount,emojistrLen,emojistrTypes,emojistr_prev_word,emojistr_next_word,\
                emojistr_prev_sentence,emojistr_next_sentence,emojiPatternLabel,emojiPatternCount,emojiPatternLen,\
                emojiPatternTypes=\
                [],[],[],0,[],[],[],[],[],[],[],0

            else:
                sort_index=a.argsort(axis=1)
                emojistrLabel=a[[1,sort_index[0]]][::-1]
                emojistrCount=np.array(a[[0,sort_index[0]]][::-1],dtype=int)
                emojistrLen=np.array(a[[2,sort_index[0]]][::-1],dtype=int)
                emojistrTypes=len(emojistrCount)
                #add emjStr CountSum
                surrounding_str_text=surroundingText(text,emojistrLabel) #sorted by frequency
                emojistr_prev_word=surrounding_str_text[:,1]
                emojistr_next_word=surrounding_str_text[:,2]
                emojistr_prev_sentence=surrounding_str_text[:,3]
                emojistr_next_sentence=surrounding_str_text[:,4]
                #find emoji str patterns
                pattern=np.array([(emcode, len(re.findall(emcode,text))) for emcode in emojistrLabel])
                emojiPatternLabel=pattern[np.argsort(pattern[:, 1])[::-1]][:,0] 
                emojiPatternCount=np.array(pattern[np.argsort(pattern[:, 1])[::-1]][:,1],dtype=int)
                emojiPatternLen=np.array([np.int32(len(val)/2) for val in emojiPatternLabel],dtype=int)
                emojiPatternTypes=len(emojiPatternCount)

        #skin tone information
        emjText_skin=np.array([(emcode, len(re.findall(emcode,text))) for emcode in emj_codes_skin\
                      if (len(re.findall(emcode,text)) > 0)])
        if len(emjText_skin)==0:
            emojiSkinLabel, emojiSkinCount,emojiSkinCountSum,emojiSkinTypes= [],[],0,0
        else:
            #create arrays to save in SQL. Sorted by frequency
            emojiSkinLabel=emjText_skin[np.argsort(emjText_skin[:, 1])[::-1]][:,0] 
            emojiSkinCount=np.array(emjText_skin[np.argsort(emjText_skin[:, 1])[::-1]][:,1],dtype=int)
            emojiSkinCountSum=sum(emojiSkinCount)
            emojiSkinTypes=len(emojiSkinCount)

        #tweet data:
        date= datetime.datetime.utcnow()
        created_at = tweet.created_at
        text = tweet.text
        #keep split text?
        retweet_count = tweet.retweet_count
        favorite_count = tweet.favorite_count
        lang=checkNone(tweet.lang)
        geo = checkNoneJSON(tweet.geo)
        time_zone = checkNone(tweet.user.time_zone)
        coordinates = checkNoneJSON(tweet.coordinates)
        name = checkNone(tweet.user.name)
        user_name = checkNone(tweet.user.screen_name)
        
        insertIntoSQL(date,created_at,text,retweet_count,favorite_count,lang,geo,coordinates,time_zone,name,user_name,\
    emojiLabel,emojiCount,emojiCountSum,emojiTypes,prev_word,next_word,prev_sentence,next_sentence,mostFreqWord,\
    mostFreqWordCount,newlineCount,emojiSkinLabel,emojiSkinCount,emojiSkinCountSum,emojiSkinTypes,emojistrLabel,\
    emojistrCount,emojistrLen,emojistrTypes,emojistr_prev_word,emojistr_next_word,emojistr_prev_sentence,\
    emojistr_next_sentence,emojiPatternLabel,emojiPatternCount,emojiPatternLen,emojiPatternTypes)
Exemple #60
0
def cluster_withsubsets(spike_table,clusterdir,reorder_clus=True):
    "TODO: write docstring"
    
    if reorder_clus: print "Cluster reordering not implemented!"
    ST_nc = np.bool8(spike_table.cols.st[:])
    Fet_nc3 = spike_table.cols.fet[:]    
    
    ChSubsets = probe_stuff.SORT_GROUPS
    SpkSubsets = spike_subsets(ST_nc,ChSubsets)    
    print("%i subsets total"%len(SpkSubsets))
    n_spikes,n_ch,_FPC = Fet_nc3.shape
    
    key2subset, key2members, key2spkmean, key2mag = {},{},{},{}
    for i_subset,ChHere,SpkHere in zip(it.count(),ChSubsets,SpkSubsets):        
        print("Sorting channels %s"%ChHere.__repr__())
        FetHere_nc3 = Fet_nc3[np.ix_(SpkHere,ChHere)] # features of spikes in this subset
        CluArr = klustakwik_cluster(FetHere_nc3,'/'.join((clusterdir,"cluster_%i" % i_subset)))
        CluMembersList = [(SpkHere[inds]) for inds in subset_inds(CluArr)] #go back to original indices
        # We are ignoring cluster 0 here, because of [1:] above. No not now
        for (i_clu,Members) in enumerate(CluMembersList):
            if len(Members) > MIN_CLU_SIZE:
                SpkMean = np.array([spike_table[member]["wave"][:,ChHere] for member in Members]).mean(axis=0)
                key = (i_subset,i_clu)
                key2subset[key]=ChHere
                key2members[key] = Members
                key2spkmean[key] = SpkMean
                key2mag[key] = SpkMean.ptp(axis=0).sum()
        
    ImprovingKeys = sorted(key2mag.keys(),key = lambda key: key2mag[key])    
    #problem: most spikes aren't members of any cluster?!
    
    key2oldcount = dict((key,len(members)) for key,members in key2members.items())
    FinalClu = np.zeros(n_spikes,dtype=np.dtype([("subset",int),("clu",int)]))

    # maybe i should have a key2int kind of function?
    fromto2stolen = collections.defaultdict(int)
    for key in ImprovingKeys:
        if DEBUG: 
            for oldkey in FinalClu[key2members[key]]: fromto2stolen[tuple(oldkey),key] += 1
        FinalClu[key2members[key]] = key
    for fromkey,tokey in fromto2stolen.keys(): 
        if DEBUG:
            if fromkey == (0,0): del fromto2stolen[(fromkey,tokey)]
        
    key2newcount = dict((key,((FinalClu["subset"] == key[0]) & (FinalClu["clu"] == key[1])).sum()) for key in ImprovingKeys)    
    key2good = dict((key,
                     key2newcount[key]/key2oldcount[key] > ACCEPTABLE_FRAC and
                     key2oldcount[key] > MIN_CLU_SIZE)
                    for key in ImprovingKeys)

    good_keys = filter(lambda key: key2good[key],reversed(ImprovingKeys))
    
    #with open("counts.txt","w") as fd:
    #    for i_clu,(new,old) in enumerate(zip(NewCount,OrigCount)):
    #        fd.write("%i: %i/%i\n"%(i_clu,new,old) if new/old < .8 else "%i: %i/%i ==> %i\n"%(i_clu,new,old,RelabelArr[i_clu]))

    # problem: relabel cluster indices so they're in the right order
    
    key2rank = dict((key,rank) for (rank,key) in enumerate(reversed(ImprovingKeys)))
    key2left = dict((key,len(members)) for key,members in key2members.items())
    
    if DEBUG: 
        merge_diagnostics(n_ch,key2subset,key2rank,key2left,key2good,key2spkmean,fromto2stolen)
    key2ind = dict((key,ind) for (ind,key) in enumerate(sorted(good_keys,key=lambda key: np.mean(key2subset[key]))))
    FinalCluInd = np.array([key2ind.get(tuple(key),0) for key in FinalClu],dtype=np.int32)
    return FinalCluInd