Example #1
0
def _akmeans_iterate(data, centroids, datax2_clusterx_old, max_iters,
                     flann_params, ave_unchanged_thresh, ave_unchanged_iterwin):
    """ Helper function which continues the iterations of akmeans """
    num_data = data.shape[0]
    num_clusters = centroids.shape[0]
    # Keep track of how many points have changed in each iteration
    xx2_unchanged = np.zeros(ave_unchanged_iterwin, dtype=centroids.dtype) + len(data)
    print('[akmeans] Running akmeans: data.shape=%r ; num_clusters=%r' %
          (data.shape, num_clusters))
    print('[akmeans] * max_iters = %r ' % max_iters)
    print('[akmeans] * ave_unchanged_iterwin=%r ; ave_unchanged_thresh=%r' %
          (ave_unchanged_thresh, ave_unchanged_iterwin))
    #print('[akmeans] Printing akmeans info in format: time (iterx, ave(#changed), #unchanged)')
    xx = 0
    for xx in range(0, max_iters):
        tt = ut.tic()
        ut.print_('...tic')
        # 1) Find each datapoints nearest cluster center
        (datax2_clusterx, _dist) = nn.ann_flann_once(centroids, data, 1, flann_params)
        ellapsed = ut.toc(tt)
        ut.print_('...toc(%.2fs)' % ellapsed)
        # 2) Compute new cluster centers
        centroids = _compute_cluster_centers(num_data, num_clusters, data, centroids, datax2_clusterx)
        # 3) Check for convergence (no change of cluster index)
        #ut.print_('+')
        num_changed = (datax2_clusterx_old != datax2_clusterx).sum()
        xx2_unchanged[xx % ave_unchanged_iterwin] = num_changed
        ave_unchanged = xx2_unchanged.mean()
        #ut.print_('  (%d, %.2f, %d)\n' % (xx, ave_unchanged, num_changed))
        if ave_unchanged < ave_unchanged_thresh:
            break
        else:  # Iterate
            datax2_clusterx_old = datax2_clusterx
            #if xx % 5 == 0:
            #    sys.stdout.flush()
    if xx == max_iters:
        print('[akmeans]  * AKMEANS: converged in %d/%d iters' % (xx + 1, max_iters))
    else:
        print('[akmeans]  * AKMEANS: reached the maximum iterations after in %d/%d iters' % (xx + 1, max_iters))
    sys.stdout.flush()
    return (datax2_clusterx, centroids)
Example #2
0
def _compute_cluster_centers(num_data, num_clusters, data, centroids, datax2_clusterx):
    """ Computes the cluster centers and stores output in the outvar: centroids.
    This outvar is also returned """
    # sort data by cluster
    datax_sort    = datax2_clusterx.argsort()
    clusterx_sort = datax2_clusterx[datax_sort]
    # group datapoints by cluster using a sliding grouping algorithm
    _L = 0
    clusterx2_dataLRx = ut.alloc_nones(num_clusters)
    for _R in range(len(datax_sort) + 1):  # Slide R
        if _R == num_data or clusterx_sort[_L] != clusterx_sort[_R]:
            clusterx2_dataLRx[clusterx_sort[_L]] = (_L, _R)
            _L = _R
    # Compute the centers of each group (cluster) of datapoints
    ut.print_('+')
    for clusterx, dataLRx in enumerate(clusterx2_dataLRx):
        if dataLRx is None:
            continue  # ON EMPTY CLUSTER
        (_L, _R) = dataLRx
        # The cluster center is the mean of its datapoints
        centroids[clusterx] = np.mean(data[datax_sort[_L:_R]], axis=0)
        #centroids[clusterx] = np.array(np.round(centroids[clusterx]), dtype=np.uint8)
    return centroids
Example #3
0
    def measure_feat_pairs(allres, orgtype='top_true'):
        print('Measure ' + orgtype + ' pairs')
        orgres = allres.__dict__[orgtype]
        entropy_list = []
        scale_list = []
        score_list = []
        lbl = 'Measuring ' + orgtype + ' pair '
        fmt_str = utool.make_progress_fmt_str(len(orgres), lbl)
        rank_skips = []
        gt_skips = []
        for ix, (qrid, rid, score, rank) in enumerate(orgres.iter()):
            utool.print_(fmt_str % (ix + 1, ))
            # Skip low ranks
            if rank > 5:
                rank_skips.append(qrid)
                continue
            other_rids = ibs.get_other_indexed_rids(qrid)
            # Skip no groundtruth
            if len(other_rids) == 0:
                gt_skips.append(qrid)
                continue
            qres = qrid2_qres[qrid]
            # Get matching feature indexes
            fm = qres.cx2_fm[rid]
            # Get their scores
            fs = qres.cx2_fs[rid]
            # Get matching descriptors
            printDBG('\nfm.shape=%r' % (fm.shape, ))
            desc1 = cx2_desc[qrid][fm[:, 0]]
            desc2 = cx2_desc[rid][fm[:, 1]]
            # Get matching keypoints
            kpts1 = cx2_kpts[qrid][fm[:, 0]]
            kpts2 = cx2_kpts[rid][fm[:, 1]]
            # Get their scale
            scale1_m = ktool.get_scales(kpts1)
            scale2_m = ktool.get_scales(kpts2)
            # Get their entropy
            entropy1 = descriptor_entropy(desc1, bw_factor=1)
            entropy2 = descriptor_entropy(desc2, bw_factor=1)
            # Append to results
            entropy_tup = np.array(zip(entropy1, entropy2))
            scale_tup = np.array(zip(scale1_m, scale2_m))
            entropy_tup = entropy_tup.reshape(len(entropy_tup), 2)
            scale_tup = scale_tup.reshape(len(scale_tup), 2)
            entropy_list.append(entropy_tup)
            scale_list.append(scale_tup)
            score_list.append(fs)
        print('Skipped %d total.' % (len(rank_skips) + len(gt_skips), ))
        print('Skipped %d for rank > 5, %d for no gt' % (
            len(rank_skips),
            len(gt_skips),
        ))
        print(np.unique(map(len, entropy_list)))

        def evstack(tup):
            return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2))

        def ehstack(tup):
            return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2))

        entropy_pairs = evstack(entropy_list)
        scale_pairs = evstack(scale_list)
        scores = ehstack(score_list)
        print('\n * Measured %d pairs' % len(entropy_pairs))
        return entropy_pairs, scale_pairs, scores
Example #4
0
    def measure_feat_pairs(allres, orgtype='top_true'):
        print('Measure ' + orgtype + ' pairs')
        orgres = allres.__dict__[orgtype]
        entropy_list = []
        scale_list = []
        score_list = []
        lbl = 'Measuring ' + orgtype + ' pair '
        fmt_str = utool.make_progress_fmt_str(len(orgres), lbl)
        rank_skips = []
        gt_skips = []
        for ix, (qrid, rid, score, rank) in enumerate(orgres.iter()):
            utool.print_(fmt_str % (ix + 1,))
            # Skip low ranks
            if rank > 5:
                rank_skips.append(qrid)
                continue
            other_rids = ibs.get_other_indexed_rids(qrid)
            # Skip no groundtruth
            if len(other_rids) == 0:
                gt_skips.append(qrid)
                continue
            qres = qrid2_qres[qrid]
            # Get matching feature indexes
            fm = qres.cx2_fm[rid]
            # Get their scores
            fs = qres.cx2_fs[rid]
            # Get matching descriptors
            printDBG('\nfm.shape=%r' % (fm.shape,))
            desc1 = cx2_desc[qrid][fm[:, 0]]
            desc2 = cx2_desc[rid][fm[:, 1]]
            # Get matching keypoints
            kpts1 = cx2_kpts[qrid][fm[:, 0]]
            kpts2 = cx2_kpts[rid][fm[:, 1]]
            # Get their scale
            scale1_m = ktool.get_scales(kpts1)
            scale2_m = ktool.get_scales(kpts2)
            # Get their entropy
            entropy1 = descriptor_entropy(desc1, bw_factor=1)
            entropy2 = descriptor_entropy(desc2, bw_factor=1)
            # Append to results
            entropy_tup = np.array(zip(entropy1, entropy2))
            scale_tup   = np.array(zip(scale1_m, scale2_m))
            entropy_tup = entropy_tup.reshape(len(entropy_tup), 2)
            scale_tup   = scale_tup.reshape(len(scale_tup), 2)
            entropy_list.append(entropy_tup)
            scale_list.append(scale_tup)
            score_list.append(fs)
        print('Skipped %d total.' % (len(rank_skips) + len(gt_skips),))
        print('Skipped %d for rank > 5, %d for no gt' % (len(rank_skips), len(gt_skips),))
        print(np.unique(map(len, entropy_list)))

        def evstack(tup):
            return np.vstack(tup) if len(tup) > 0 else np.empty((0, 2))

        def ehstack(tup):
            return np.hstack(tup) if len(tup) > 0 else np.empty((0, 2))

        entropy_pairs = evstack(entropy_list)
        scale_pairs   = evstack(scale_list)
        scores        = ehstack(score_list)
        print('\n * Measured %d pairs' % len(entropy_pairs))
        return entropy_pairs, scale_pairs, scores