示例#1
3
    def _assign_posterior(self):
        """assign posterior to the right prior based on
           Hungarian algorithm

        Returns
        -------
        HTFA
            Returns the instance itself.
        """

        prior_centers = self.get_centers(self.global_prior_)
        posterior_centers = self.get_centers(self.global_posterior_)
        posterior_widths = self.get_widths(self.global_posterior_)
        posterior_centers_mean_cov =\
            self.get_centers_mean_cov(self.global_posterior_)
        posterior_widths_mean_var =\
            self.get_widths_mean_var(self.global_posterior_)
        # linear assignment on centers
        cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
        _, col_ind = linear_sum_assignment(cost)
        # reorder centers/widths based on cost assignment
        self.set_centers(self.global_posterior_, posterior_centers)
        self.set_widths(self.global_posterior_, posterior_widths)
        # reorder cov/var based on cost assignment
        self.set_centers_mean_cov(
            self.global_posterior_,
            posterior_centers_mean_cov[col_ind])
        self.set_widths_mean_var(
            self.global_posterior_,
            posterior_widths_mean_var[col_ind])
        return self
示例#2
0
def test_linear_sum_assignment_input_validation():
    assert_raises(ValueError, linear_sum_assignment, [1, 2, 3])

    C = [[1, 2, 3], [4, 5, 6]]
    assert_array_equal(linear_sum_assignment(C),
                       linear_sum_assignment(np.asarray(C)))
    assert_array_equal(linear_sum_assignment(C),
                       linear_sum_assignment(np.matrix(C)))
def track_unavoided_crossings(overlaps: Tensor3D, nHOMO: int) -> Tuple:
    """
    Track the index of the states if there is a crossing using the
    algorithm  described at:
    J. Chem. Phys. 137, 014512 (2012); doi: 10.1063/1.4732536.
    """
    # 3D array containing the costs
    # Notice that the cost is compute on half of the overlap matrices
    # correspoding to Sji_t, the other half corresponds to Sij_t
    nOverlaps, nOrbitals, _ = overlaps.shape

    # Indexes taking into account the crossing
    # There are 2 Overlap matrices at each time t
    indexes = np.empty((nOverlaps + 1, nOrbitals), dtype=np.int)
    indexes[0] = np.arange(nOrbitals, dtype=np.int)

    # Track the crossing using the overlap matrices

    for k in range(nOverlaps):
        # Cost matrix to track the corssings
        logger.info("Tracking crossings at time: {}".format(k))
        cost_mtx_homos = np.negative(overlaps[k, :nHOMO, :nHOMO] ** 2)
        cost_mtx_lumos = np.negative(overlaps[k, nHOMO:, nHOMO:] ** 2)

        # Compute the swap at time t + dt using two set of Orbitals:
        # HOMOs and LUMOS
        swaps_homos = linear_sum_assignment(cost_mtx_homos)[1]
        swaps_lumos = linear_sum_assignment(cost_mtx_lumos)[1]
        total_swaps = np.concatenate((swaps_homos, swaps_lumos + nHOMO))
        indexes[k + 1] = total_swaps

        # update the overlaps at times > t with the previous swaps
        if k != (nOverlaps - 1):  # last element
            k1 = k + 1
            # Update the matrix Sji at time t
            overlaps[k] = swap_columns(overlaps[k], total_swaps)
            # Update all the matrices Sji at time > t
            overlaps[k1:] = swap_forward(overlaps[k1:], total_swaps)
    # Accumulate the swaps
    acc = indexes[0]
    arr = np.empty(indexes.shape, dtype=np.int)
    arr[0] = acc

    # Fold accumulating the crossings
    for i in range(nOverlaps):
        acc = acc[indexes[i + 1]]
        arr[i + 1] = acc

    return overlaps, arr
示例#4
0
def skip_connections_distance(list_a, list_b):
    """The distance between the skip-connections of two neural networks."""
    distance_matrix = np.zeros((len(list_a), len(list_b)))
    for i, a in enumerate(list_a):
        for j, b in enumerate(list_b):
            distance_matrix[i][j] = skip_connection_distance(a, b)
    return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(len(list_a) - len(list_b))
示例#5
0
def munkres(reads: List[str], scores: np.ndarray) -> Tuple[List[str], float, int]:
    """
    applying the (negative) scores matrix to the munkres/linear_sum_assignment algorithm
    we detect the cycles and assemble them afterwards
    :param reads: only a list of the sequences! No ids!
    :param scores: actual weights have positive values (the values are gonna be inverted) and negative BIG_M_WEIGHT
    :return:
    """
    # scores[scores < MINIMAL_OVERLAP_SCORE] = 0
    # scores_copy = scores.copy()
    read_lengths = [len(x) for x in reads]
    summed_read_length = sum(read_lengths)
    manipulated_scores = manipulate_scores2ap(scores, read_lengths)
    start_time_ap = time.time()
    row_ind, col_ind = linear_sum_assignment(manipulated_scores)
    # row_ind, col_ind = linear_sum_assignment(manipulated_scores)
    ap_value = manipulated_scores[row_ind, col_ind].sum()
    end_time_ap = time.time()

    cycles = detect_cycles(col_ind)
    # ap_value += summed_read_length * (len(cycles) - 1)


    contig_list = assemble_cycles(reads, cycles, col_ind, scores)

    print(contig_list)
    print(end_time_ap - start_time_ap)
    return contig_list, end_time_ap - start_time_ap, ap_value
示例#6
0
def train(times, X, y,c, lea, ep1, ep2, lamda1, lamda2 ):
    t0 = time.time()
#     times = 1
    # for lea in [0.0001, 0.00001, 0.000001]:
#    lea = .00001
    print 'learn={}, ep1={}, ep2={}, la1={}, la2={}'.format(lea, ep1, ep2, lamda1, lamda2)
    ari,ri,accu = [], [], []
    for ddd in range(times):
        y_pred_old = sof(X, y, k=len(np.unique(y)), c=1, 
                                 lamda1=lamda1,lamda2=lamda2, mu=2, 
                                 gamma=lea, ep1=ep1, ep2=ep2 )
        row, col = linear_sum_assignment(-confusion_matrix(y, y_pred_old))
        y_pred = np.copy(y_pred_old)
        for i, q in enumerate(col):
            y_pred[y_pred_old==q] = i
        ari.append( adjusted_rand_score(y,y_pred) )
        ri.append(rand_score(y, y_pred))
        accu.append(accuracy_score(y,y_pred))
        print '\taccu={}, RI={}'.format(accuracy_score(y,y_pred),rand_score(y, y_pred))
    # print 'ARI: ', adjusted_rand_score(y,y_pred)
    # print 'RI: ', rand_score(y, y_pred)
    # print 'Accu: ', accuracy_score(y,y_pred)
            

    print confusion_matrix(y, y_pred)
    # print y_pred
    print 'time, ', time.time()-t0
    print 'title\tmax\tmean\tstd'
    print 'ARI, ', np.array(ari).max(), np.array(ari).mean(), np.array(ari).std()
    print 'RI, ', np.array(ri).max(), np.array(ri).mean(), np.array(ri).std()
    print 'Accu, ', np.array(accu).max(), np.array(accu).mean(), np.array(accu).std()
    print ''
示例#7
0
def min_cost_perfect_bipartite_matching(G):
	n = len(G)
	try:
		from scipy.optimize import linear_sum_assignment
		rows, cols = linear_sum_assignment(G)
		assert (rows == list(range(n))).all()
		return list(cols), _matching_cost(G, cols)
	except ImportError:
		pass

	try:
		from munkres import Munkres
		cols = [None] * n
		for row,col in Munkres().compute(G):
			cols[row] = col
		return cols, _matching_cost(G, cols)
	except ImportError:
		pass

	if n > 6:
		raise Exception("Install Python module 'munkres' or 'scipy >= 0.17.0'")

	# Otherwise just brute-force
	permutations = itertools.permutations(range(n))
	best = list(next(permutations))
	best_cost = _matching_cost(G, best)
	for p in permutations:
		cost = _matching_cost(G, p)
		if cost < best_cost:
			best, best_cost = list(p), cost
	return best, best_cost
示例#8
0
 def _P_hat(self):
     self.PDW[self.PDW == 0] = self.epsilon
     row_ind, col_ind = linear_sum_assignment(1/np.abs(self.PDW))
     P = np.zeros((len(row_ind),len(col_ind)))
     for i,j in zip(row_ind,col_ind):
         P[i,j] = 1
     return P
示例#9
0
def compareDomStrs(cluster1,cluster2,match,mismatch,gap,scale):
    #first index each cluster
    clus1Strs = dict(enumerate(cluster1))
    clus2Strs = dict(enumerate(cluster2))

    clus1Size = len(cluster1)
    clus2Size = len(cluster2)
    scoreMatrix,word2num,num2word = aligntools.buildScoringDictScaled(chain(*(cluster1+cluster2)),match,mismatch,scale)

    alignScores = np.ndarray((clus1Size,clus2Size))

    # score each pairwise alignment of domain strings to populate a alignment scores matrix
    for i,domStr1 in enumerate(cluster1):
        for j,domStr2 in enumerate(cluster2):
            num1 = [word2num[x] for x in domStr1]
            num2 = [word2num[x] for x in domStr2]
            alignScore,a1,a2 = align.align(num1,num2,gap,gap,scoreMatrix,local=True)
            alignScores[i,j] = alignScore

    #prepare scoring matrix for hungarian algorithm: negate scores and pad matrix
    if clus1Size < clus2Size:
        costMatrix = -np.vstack((copy(alignScores),np.zeros((clus2Size-clus1Size,clus2Size))))
    elif clus2Size < clus1Size:
        costMatrix = -np.hstack((copy(alignScores),np.zeros((clus1Size,clus1Size-clus2Size))))
    else:
        costMatrix = -copy(alignScores)

    # apply hungarian algorithm for matching
    pairings = [(x,y) for x,y in zip(*linear_sum_assignment(costMatrix)) if (x<clus1Size) and (y<clus2Size)]
    clusterScore = sum(alignScores[pairing] for pairing in pairings)
    pairStrings = [(alignScores[(x,y)],clus1Strs[x],clus2Strs[y]) for x,y in pairings]
    pairStrings.sort(reverse=True)

    return clusterScore,pairStrings
示例#10
0
def finite_pt_dist(diagram1, diagram2, q):
	"""
	This function computes the smallest distance between the
	finite points of two persistence diagrams.  This requires
	matching each point in the first diagram to a point in the 
	second diagram or a point on the diagonal such that the distance
	between the pairs of points is minimized.

	The solution to this problem involves the Munkres (or Hungarian)
	algorithm, which has already been implemented in Python.

	This function returns the distanc between the two diagrams
	"""

	n = len(diagram1.points)
	m = len(diagram2.points)

	# if there are no points, the distance is zero
	if n + m == 0:
		return 0
	else:
		# this code can probabily be optimized
		dist_mat = make_dist_mat(diagram1, diagram2, q)
		# now we can compute the total distance
		row_ind, col_ind = linear_sum_assignment(dist_mat)
		total_dist = 0
		for i in range(len(row_ind)):
			row = row_ind[i]
			col = col_ind[i]
			value = dist_mat[row][col]
			total_dist += value
		return total_dist
示例#11
0
def find_matches(D_s, print_assignment=False):
    # todo todocument

    matches = []
    costs = []
    t_start = time.time()
    for ii, D in enumerate(D_s):
        # we make a copy not to set changes in the original
        DD = D.copy()
        if np.sum(np.where(np.isnan(DD))) > 0:
            raise Exception('Distance Matrix contains NaN, not allowed!')

        # we do the hungarian
        indexes = linear_sum_assignment(DD)
        indexes2 = [(ind1, ind2) for ind1, ind2 in zip(indexes[0], indexes[1])]
        matches.append(indexes)
        DD = D.copy()
        total = []
        # we want to extract those informations from the hungarian algo
        for row, column in indexes2:
            value = DD[row, column]
            if print_assignment:
                print(('(%d, %d) -> %f' % (row, column, value)))
            total.append(value)
        print(('FOV: %d, shape: %d,%d total cost: %f' %
               (ii, DD.shape[0], DD.shape[1], np.sum(total))))
        print((time.time() - t_start))
        costs.append(total)
        # send back the results in the format we want
    return matches, costs
示例#12
0
def find_matches(D_s, print_assignment=False):

    matches=[]
    costs=[]
    t_start=time.time()
    for ii,D in enumerate(D_s):
        DD=D.copy()    
        if np.sum(np.where(np.isnan(DD)))>0:
            raise Exception('Distance Matrix contains NaN, not allowed!')


    #    indexes = m.compute(DD)
#        indexes = linear_assignment(DD)
        indexes = linear_sum_assignment(DD)
        indexes2=[(ind1,ind2) for ind1,ind2 in zip(indexes[0],indexes[1])]
        matches.append(indexes)
        DD=D.copy()   
        total = []
        for row, column in indexes2:
            value = DD[row,column]
            if print_assignment:
                print(('(%d, %d) -> %f' % (row, column, value)))
            total.append(value)      
        print(('FOV: %d, shape: %d,%d total cost: %f' % (ii, DD.shape[0],DD.shape[1], np.sum(total))))
        print((time.time()-t_start))
        costs.append(total)      

    return matches,costs
示例#13
0
 def MatchFrame(self, frame, locs):
     coordsArrayTest = GetLocsCoordsArray(locs)
     # CoordsTrue array    
     actIndices = frame._fseries._actFrameMap[frame.nr]
     coordsTrue = frame._fseries._actCoordsArray[actIndices, :]
     
     # Dist and cost matrices
     distsXY = spatial.distance.cdist(coordsArrayTest[:, :2], coordsTrue[:, :2])
     distsZ = spatial.distance.cdist(coordsArrayTest[:, 2:], coordsTrue[:, 2:])
     distsXY = np.where(distsXY > self._groundTruth.tolXY, 1.0, distsXY)
     distsZ = np.where(distsZ > self._groundTruth.tolZ, 1.0, distsZ)
     costMatrix = np.sqrt(distsXY ** 2.0 + distsZ ** 2.0)
     
     # Matching
     idsTest, _idsTrue = optimize.linear_sum_assignment(costMatrix)
     cost = costMatrix[idsTest, _idsTrue]
   
     # Filter out unsucessful matches
     resIndices = np.argwhere(cost < 1.0).ravel()
     if len(resIndices) <= 0:
         return np.array([]), np.array([]), np.array([]), np.array([])
     else:
         resIdsTest = idsTest[resIndices] 
         resIdsTrue = actIndices[_idsTrue[resIndices]]
         resDistsXY, resDistsZ = distsXY[resIdsTest, _idsTrue[resIndices]], distsZ[resIdsTest, _idsTrue[resIndices]]
         return resIdsTest, resIdsTrue, resDistsXY, resDistsZ
示例#14
0
文件: utils.py 项目: anapophenic/knb
def km(A, B):
    k = np.shape(A)[1]
    dist = np.zeros((k,k))
    for i in range(k):
        for j in range(k):
            dist[i,j] = sign_dist(A[:,i], B[:,j])

    row_ind, col_ind = linear_sum_assignment(dist)
    errs = dist[row_ind, col_ind].sum()

    return col_ind, errs
示例#15
0
def test_linear_sum_assignment():
    for cost_matrix, expected_cost in [
        # Square
        ([[400, 150, 400],
          [400, 450, 600],
          [300, 225, 300]],
         [150, 400, 300]
         ),

        # Rectangular variant
        ([[400, 150, 400, 1],
          [400, 450, 600, 2],
          [300, 225, 300, 3]],
         [150, 2, 300]),

        # Square
        ([[10, 10, 8],
          [9, 8, 1],
          [9, 7, 4]],
         [10, 1, 7]),

        # Rectangular variant
        ([[10, 10, 8, 11],
          [9, 8, 1, 1],
          [9, 7, 4, 10]],
         [10, 1, 4]),

        # n == 2, m == 0 matrix
        ([[], []],
         []),
    ]:
        cost_matrix = np.array(cost_matrix)
        row_ind, col_ind = linear_sum_assignment(cost_matrix)
        assert_array_equal(row_ind, np.sort(row_ind))
        assert_array_equal(expected_cost, cost_matrix[row_ind, col_ind])

        cost_matrix = cost_matrix.T
        row_ind, col_ind = linear_sum_assignment(cost_matrix)
        assert_array_equal(row_ind, np.sort(row_ind))
        assert_array_equal(np.sort(expected_cost),
                           np.sort(cost_matrix[row_ind, col_ind]))
示例#16
0
def calculateClusterDist(cluster1,cluster2,hitDictID,linearDist = True):
    '''
    Given two clusters annotated with the same hitDict (typically and all-v-all comparison) will estimate the distance
    between the two clusters by calculating the normalized distance between each protein in the clusters. It will use
    this information to create a maximal matching will then scale the matching pairs distance by the percentage of the
    cluster each matching will give. Returns a value between zero and one and the maximal matching of the clusters
    '''

    clus1Size = len(cluster1)
    clus2Size = len(cluster2)

    scoreMatrix =  np.ndarray((clus1Size,clus2Size))

    clus1ProtSize = float(sum(protein.size() for protein in cluster1))
    clus2ProtSize = float(sum(protein.size() for protein in cluster2))

    # populate the score matrix if there are any proteins that are "close together"
    for i,proteinI in enumerate(cluster1):
        for j,proteinJ in enumerate(cluster2):
            scoreMatrix[i,j] = proteinI.calculate_distance(proteinJ,hitDictID,linearDist=linearDist)

    # get the pairings
    pairings = [(x,y) for x,y in zip(*linear_sum_assignment(scoreMatrix)) if (x<clus1Size) and (y<clus2Size)]
    pairScores = [(scoreMatrix[(x,y)],cluster1[x],cluster2[y]) for x,y in pairings]
    pairs = [(x,y,z) for x,y,z in pairScores if x < 1.]
    pairs.sort()
    # scale by the one with less coverage
    clus1cvg = sum(entry[1].size()/clus1ProtSize for entry in pairs)
    clus2cvg = sum(entry[2].size()/clus2ProtSize for entry in pairs)

    if clus1cvg < clus2cvg:
        lessCvgIdx = 1
        lessCvgSize = clus1ProtSize
    else:
        lessCvgIdx = 2
        lessCvgSize = clus2ProtSize
    # scale distances by larger cluster
    # print clus1cvg,clus2cvg
    # print lessCvgIdx,lessCvgSize
    distance = 0
    percentNonHit = 1
    for entry in pairs:
        distance += entry[0]*(entry[lessCvgIdx].size()/lessCvgSize)
        percentNonHit -= (entry[lessCvgIdx].size()/lessCvgSize)
        # print entry[1].hitName,entry[2].hitName,distance, percentNonHit
    percentNonHit = max(0,percentNonHit)
    # print percentNonHit
    distance += percentNonHit

    return distance,pairs
示例#17
0
def hungarian(A, B):
    """
    Hungarian reordering.
    Assume A and B are coordinates for atoms of SAME type only
    """

    # should be kabasch here i think
    distances = cdist(A, B, 'euclidean')

    # Perform Hungarian analysis on distance matrix between atoms of 1st
    # structure and trial structure
    indices_a, indices_b = linear_sum_assignment(distances)

    return indices_b
示例#18
0
def main():
    """Read input, calculate assignments."""
    # Determine source of input
    args = docopt(__doc__)
    if args['--filename'] is None:
        teams, assignments, cost = read_input_from_command_line()
    else:
        teams, assignments, cost = read_input_from_file(args['--filename'])

    # Calculate best assignment using Hungarian algorithm and output results
    row_ind, col_ind = linear_sum_assignment(cost)
    for team_no, assignment_no in enumerate(col_ind):
        print('Team {team} gets assignment {assignment}.'.format(team=teams[team_no],
            assignment=assignments[assignment_no]))
示例#19
0
def main():
    
    subs = [[1, 1], [2, 2], [2, 6], [4, 2], [4, 3], 
            [4, 7], [6, 1], [7, 1], [7, 4], [9, 1], 
            [9, 5], [9, 7], [10, 2], [10, 3], [10, 6]]    
    ships = [[2, 1], [3, 4], [4, 9], [6, 1], [6, 2], 
             [6, 3], [6, 5], [6, 8], [7, 5], [7, 6], [7, 8], 
             [8, 7], [10, 1], [10, 4], [10, 1]]

    distance = [get_distance(sub,ships) for sub in subs]
    cost = np.array(distance)
    row_ind, col_ind = linear_sum_assignment(cost)
    print(row_ind)
    print(col_ind)
    print(cost[row_ind, col_ind].sum())
示例#20
0
    def make_CostMatrix(C, m, n):
        ##assert(C.shape == (m + n, m + n))
        lsa_row_ind, lsa_col_ind = linear_sum_assignment(C)

        # Fixup dummy assignments:
        # each substitution i<->j should have corresponding dummy assignment m+j<->n+i
        # NOTE: fast reduce of Cv relies on it
        ##assert len(lsa_row_ind) == len(lsa_col_ind)
        subst_ind = list(k for k, i, j in zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
                         if i < m and j < n)
        dummy_ind = list(k for k, i, j in zip(range(len(lsa_row_ind)), lsa_row_ind, lsa_col_ind)
                         if i >= m and j >= n)
        ##assert len(subst_ind) == len(dummy_ind)
        lsa_row_ind[dummy_ind] = lsa_col_ind[subst_ind] + m
        lsa_col_ind[dummy_ind] = lsa_row_ind[subst_ind] + n

        return CostMatrix(C, lsa_row_ind, lsa_col_ind, C[lsa_row_ind, lsa_col_ind].sum())
示例#21
0
文件: WGRAP.py 项目: akobre01/lp-ir
 def _solve_assignment_and_update(self, rows, rows_to_revs, max_val = 10.0, show=False):
     """
     Implements 1 iteration of stagewise deepening (no stochastic refinement)
     """
     if show:
         print rows
     cost_matrix = self.munkres.make_cost_matrix(rows, lambda v: max_val - v)
     #indexes = self.munkres.compute(cost_matrix)
     row_inds, col_inds = linear_sum_assignment(np.array(cost_matrix))
     if show:
         print cost_matrix
     # for row, col in indexes:
     for row, col in zip(row_inds, col_inds):
         self.curr_assignment[rows_to_revs[row],col] = 1
         if show:
             value = rows[row][col]
             print '(%d, %d) -> %f' % (row, col, value)
示例#22
0
def allclose_perm(aarr, barr, **kwargs):
    """allclose but for any permutation of aarr and barr

    Parameters
    ----------
    aarr, barr : array_like
        Identically sized arrays of floats. This will determine if any
        permutation of their first axis will make `allclose` true.
    kwargs
        Additional arguments to pass to `isclose`.
    """
    aarr, barr = map(np.asarray, [aarr, barr])
    check(
        aarr.shape == barr.shape,
        'can only compare identically sized arrays')
    isclose = np.isclose(aarr[:, None], barr, **kwargs)
    isclose.shape = isclose.shape[:2] + (-1,)
    return isclose[optimize.linear_sum_assignment(~isclose.all(2))].all()
示例#23
0
def aliniere(colorare1, colorare2, k):
    cost = [[[0] for j in range(k)] for i in range(k)]
    for c1 in range(k):
        for c2 in range(k):
            indici1 = set([i for i, x in enumerate(colorare1) if x == c1])
            indici2 = set([i for i, x in enumerate(colorare2) if x == c2])
            intersectie = indici1 & indici2
            cost[c1][c2] = len(intersectie) #len(indici1) + len(indici2) - len(intersectie) #sau len(colorare1)-len ???
    (row_ind,col_ind) = linear_sum_assignment(cost)
    colorare_aliniata_2 = [-1 for i in range(len(colorare1))]
    for index in range(len(row_ind)):
        color_to_replace = col_ind[index]
        indices_to_change = [i for i, x in enumerate(colorare2) if x == color_to_replace]
        for index_tc in indices_to_change:
            colorare_aliniata_2[index_tc] = colorare1[row_ind[index]]
    if colorare_aliniata_2.count(-1)>0:
        print("ceva aiurea in aliniere")
    return (colorare1, colorare_aliniata_2)
示例#24
0
    def _assign_posterior(self):
        """assign posterior to prior based on Hungarian algorithm

        Returns
        -------
        TFA
            Returns the instance itself.
        """

        prior_centers = self.get_centers(self.local_prior)
        posterior_centers = self.get_centers(self.local_posterior_)
        posterior_widths = self.get_widths(self.local_posterior_)
        # linear assignment on centers
        cost = distance.cdist(prior_centers, posterior_centers, 'euclidean')
        _, col_ind = linear_sum_assignment(cost)
        # reorder centers/widths based on cost assignment
        self.set_centers(self.local_posterior_, posterior_centers[col_ind])
        self.set_widths(self.local_posterior_, posterior_widths[col_ind])
        return self
示例#25
0
    def _get_assignments(self, data):
        """
        Get the assignments of the 2D points to the regular grid by solving the linear assignment problem
        using the Jonker-Volgenant algorithm.

        Parameters
        ----------
        data: np.ndarray
            The normalized data.

        Returns
        -------
        int, array-like, array-like
            The cost and assigments to grid cells.

        """
        # create grid of size n with linearly spaced coordinates, reshape to list of coordinates (size_x*size_y x2)
        grid = np.dstack(np.meshgrid(np.linspace(0, 1, self.size_x, endpoint=False),
                                     np.linspace(0, 1, self.size_y, endpoint=False))).reshape(-1, 2)

        # get squared euclidean distances between all pairs of grid points and embeddings
        cost_matrix = cdist(grid, data, "sqeuclidean")

        """
        calculate the linear assignment problem - find the assignments with minimal cost
        the algorithm works if we have less embeddings than there are grid cells (generalized LAP)
        -1 is returned if there is no matching embedding, i.e. no samples match the grid cell
        """

        # Try to use lap if it is available - if not, use scipy's linear_sum_assignment.
        if importlib.util.find_spec("lap") is not None:
            res = lapjv(cost_matrix, extend_cost=True)
            cost, grid_indices, assignments = res[0], res[1], grid[res[2]]
        else:
            row_indices, col_indices = linear_sum_assignment(cost_matrix)
            cost = cost_matrix[row_indices, col_indices].sum()

            grid_indices = np.full((self.size_x * self.size_y), -1, dtype=np.int32)
            grid_indices[row_indices] = col_indices
            assignments = grid[row_indices]

        return cost, grid_indices, assignments
示例#26
0
 def _compute_agreement(self, S1, S2):
     """
     measuring the agreement between two different 
     k-way topic models, represented as two rank sets;
     the rank set is simply the top words for each topic
     """
     
     # compute the similarity matrix
     n_topic = len(S1)
     sim_mat = np.zeros(( n_topic, n_topic ))
     for row in range(n_topic):
         for col in range(n_topic):
             sim_mat[row, col] = self._compute_avg_jaccard(S1[row], S2[col])    
     
     # solve for the optimal permutation using hungarian algorithm,
     # for the scipy implement, each element is presented as cost
     # hence we use the negative sign of the similarity matrix as input
     row_ind, col_ind = linear_sum_assignment(-sim_mat)
     agreement = np.mean( sim_mat[row_ind, col_ind] )
     return agreement    
示例#27
0
def compute_assignments(locations, confidences, gt_bboxes, num_gt_bboxes, batch_size, alpha):
  """
  locations: [batch_size * num_predictions, 4]
  confidences: [batch_size * num_predictions]
  gt_bboxes: [batch_size, max num gt_bboxes, 4]
  num_gt_bboxes : [batch_size]  The number of gt bboxes in each image of the batch
  """
  
  num_predictions = locations.shape[0] / batch_size
  assignment_partitions = np.zeros(batch_size * num_predictions, dtype=np.int32)
  stacked_gt_bboxes = []
  
  log_confidences = np.log(confidences)
  v = 1. - confidences
  v[v > 1.] = 1.
  v[v <= 0] = small_epsilon
  log_one_minus_confidences = np.log(v)
  
  # Go through each image in the batch
  for b in range(batch_size):
    
    offset = b * num_predictions
    
    # we need to construct the cost matrix
    C = np.zeros((num_predictions, num_gt_bboxes[b]))
    for j in range(num_gt_bboxes[b]):
      C[:, j] = (alpha / 2.) * (np.linalg.norm(locations[offset:offset+num_predictions] - gt_bboxes[b][j], axis=1))**2 - log_confidences[offset:offset+num_predictions] + log_one_minus_confidences[offset:offset+num_predictions]
    
    #print C
    
    # Compute the assignments
    row_ind, col_ind = linear_sum_assignment(C)
    
    #print row_ind, col_ind
    
    for r, c in zip(row_ind, col_ind):
      assignment_partitions[offset + r] = 1
      stacked_gt_bboxes.append(gt_bboxes[b][c])
    
  return [assignment_partitions, np.array(stacked_gt_bboxes)]
示例#28
0
def align(X, Y, distance=lambda x,y: abs(x - y)):
    """
    Find a cheap alignment of X and Y.
    """
    from scipy.optimize import linear_sum_assignment
    assert len(X) == len(Y)
    n = len(X)
    c = np.zeros((n, n))
    for i, x in enumerate(X):
        for j, y in enumerate(Y):
            c[i,j] = distance(x, y)

    rs,cs = linear_sum_assignment(c)

    class result:
        cost = c[rs,cs].sum()
        x = list(X[r] for r in rs)
        y = list(Y[c] for c in cs)
        x_ind = rs
        y_ind = cs

    return result
示例#29
0
def consensus_score(a, b, similarity="jaccard"):
    """The similarity of two sets of biclusters.

    Similarity between individual biclusters is computed. Then the
    best matching between sets is found using the Hungarian algorithm.
    The final score is the sum of similarities divided by the size of
    the larger set.

    Read more in the :ref:`User Guide <biclustering>`.

    Parameters
    ----------
    a : (rows, columns)
        Tuple of row and column indicators for a set of biclusters.

    b : (rows, columns)
        Another set of biclusters like ``a``.

    similarity : string or function, optional, default: "jaccard"
        May be the string "jaccard" to use the Jaccard coefficient, or
        any function that takes four arguments, each of which is a 1d
        indicator vector: (a_rows, a_columns, b_rows, b_columns).

    References
    ----------

    * Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
      for bicluster acquisition
      <https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.

    """
    if similarity == "jaccard":
        similarity = _jaccard
    matrix = _pairwise_similarity(a, b, similarity)
    row_indices, col_indices = linear_sum_assignment(1. - matrix)
    n_a = len(a[0])
    n_b = len(b[0])
    return matrix[row_indices, col_indices].sum() / max(n_a, n_b)
示例#30
0
    def estimate_earth_mover_distance(self, step, n_batch=2):
        real = list()
        fake = list()

        for i in range(n_batch):
            __real__ = next(self.dataset_generator)
            __fake__ = self.sess.run(self.generator.output_tensor)

            real.append(__real__)
            fake.append(__fake__)

        real = np.vstack(real)
        fake = np.vstack(fake)

        cost_matrix = distance.cdist(fake, real, 'euclidean')

        row_ind, col_ind = linear_sum_assignment(cost_matrix)
        linear_sum = cost_matrix[row_ind, col_ind].sum()
        emd = linear_sum/real.shape[0]

        emd_fetch = self.sess.run(self.emd_summary, feed_dict={self.emd_placeholder: emd})
        self.summary_writer.add_summary(emd_fetch, step)
        self.summary_writer.flush()
示例#31
0
def skip_connections_distance(list_a, list_b):
    distance_matrix = np.zeros((len(list_a), len(list_b)))
    for i, a in enumerate(list_a):
        for j, b in enumerate(list_b):
            distance_matrix[i][j] = skip_connection_distance(a, b)
    return distance_matrix[linear_sum_assignment(distance_matrix)].sum() + abs(len(list_a) - len(list_b))
示例#32
0
def _quadratic_assignment_faq(
    A,
    B,
    maximize=False,
    partial_match=None,
    S=None,
    rng=None,
    P0="barycenter",
    shuffle_input=False,
    maxiter=30,
    tol=0.03,
):
    r"""
    Solve the quadratic assignment problem (approximately).
    This function solves the Quadratic Assignment Problem (QAP) and the
    Graph Matching Problem (GMP) using the Fast Approximate QAP Algorithm
    (FAQ) [1]_.
    Quadratic assignment solves problems of the following form:
    .. math::
        \min_P & \ {\ \text{trace}(A^T P B P^T)}\\
        \mbox{s.t. } & {P \ \epsilon \ \mathcal{P}}\\
    where :math:`\mathcal{P}` is the set of all permutation matrices,
    and :math:`A` and :math:`B` are square matrices.
    Graph matching tries to *maximize* the same objective function.
    This algorithm can be thought of as finding the alignment of the
    nodes of two graphs that minimizes the number of induced edge
    disagreements, or, in the case of weighted graphs, the sum of squared
    edge weight differences.
    Note that the quadratic assignment problem is NP-hard, is not
    known to be solvable in polynomial time, and is computationally
    intractable. Therefore, the results given are approximations,
    not guaranteed to be exact solutions.
    Parameters
    ----------
    A : 2d-array, square
        The square matrix :math:`A` in the objective function above.
    B : 2d-array, square
        The square matrix :math:`B` in the objective function above.
    method :  str in {'faq', '2opt'} (default: 'faq')
        The algorithm used to solve the problem. This is the method-specific
        documentation for 'faq'.
        :ref:`'2opt' <optimize.qap-2opt>` is also available.
    Options
    -------
    maximize : bool (default = False)
        Setting `maximize` to ``True`` solves the Graph Matching Problem (GMP)
        rather than the Quadratic Assingnment Problem (QAP). This is
        accomplished through trivial negation of the objective function.
    rng : {None, int, `~np.random.RandomState`, `~np.random.Generator`}
        This parameter defines the object to use for drawing random
        variates.
        If `rng` is ``None`` the `~np.random.RandomState` singleton is
        used.
        If `rng` is an int, a new ``RandomState`` instance is used,
        seeded with `rng`.
        If `rng` is already a ``RandomState`` or ``Generator``
        instance, then that object is used.
        Default is None.
    partial_match : 2d-array of integers, optional, (default = None)
        Allows the user to fix part of the matching between the two
        matrices. In the literature, a partial match is also known as a
        "seed".
        Each row of `partial_match` specifies the indices of a pair of
        corresponding nodes, that is, node ``partial_match[i, 0]`` of `A` is
        matched to node ``partial_match[i, 1]`` of `B`. Accordingly,
        ``partial_match`` is an array of size ``(m , 2)``, where ``m`` is
        not greater than the number of nodes, :math:`n`.
    S : 2d-array, square
        A similarity matrix. Should be same shape as ``A`` and ``B``.   
        Note: the scale of `S` may effect the weight placed on the term 
        :math:`\\text{trace}(S^T P)` relative to :math:`\\text{trace}(A^T PBP^T)` 
        during the optimization process.
    P0 : 2d-array, "barycenter", or "randomized" (default = "barycenter")
        The initial (guess) permutation matrix or search "position"
        `P0`.
        `P0` need not be a proper permutation matrix;
        however, it must be :math:`m' x m'`, where :math:`m' = n - m`,
        and it must be doubly stochastic: each of its rows and columns must
        sum to 1.
        If unspecified or ``"barycenter"``, the non-informative "flat
        doubly stochastic matrix" :math:`J = 1*1^T/m'`, where :math:`1` is
        a :math:`m' \times 1` array of ones, is used. This is the "barycenter"
        of the search space of doubly-stochastic matrices.
        If ``"randomized"``, the algorithm will start from the
        randomized initial search position :math:`P_0 = (J + K)/2`,
        where :math:`J` is the "barycenter" and :math:`K` is a random
        doubly stochastic matrix.
    shuffle_input : bool (default = False)
        To avoid artificially high or low matching due to inherent
        sorting of input matrices, gives users the option
        to shuffle the nodes. Results are then unshuffled so that the
        returned results correspond with the node order of inputs.
        Shuffling may cause the algorithm to be non-deterministic,
        unless a random seed is set or an `rng` option is provided.
    maxiter : int, positive (default = 30)
        Integer specifying the max number of Franke-Wolfe iterations performed.
    tol : float (default = 0.03)
        A threshold for the stopping criterion. Franke-Wolfe
        iteration terminates when the change in search position between
        iterations is sufficiently small, that is, when the relative Frobenius
        norm, :math:`\frac{||P_{i}-P_{i+1}||_F}{\sqrt{len(P_{i})}} \leq tol`,
        where :math:`i` is the iteration number.
    Returns
    -------
    res : OptimizeResult
        A :class:`scipy.optimize.OptimizeResult` containing the following
        fields.
        col_ind : 1-D array
            An array of column indices corresponding with the best
            permutation of the nodes of `B` found.
        fun : float
            The corresponding value of the objective function.
        nit : int
            The number of Franke-Wolfe iterations performed.
    Notes
    -----
    The algorithm may be sensitive to the initial permutation matrix (or
    search "position") due to the possibility of several local minima
    within the feasible region. A barycenter initialization is more likely to
    result in a better solution than a single random initialization. However,
    ``quadratic_assignment`` calling several times with different random
    initializations may result in a better optimum at the cost of longer
    total execution time.
    Examples
    --------
    As mentioned above, a barycenter initialization often results in a better
    solution than a single random initialization.
    >>> np.random.seed(0)
    >>> n = 15
    >>> A = np.random.rand(n, n)
    >>> B = np.random.rand(n, n)
    >>> res = quadratic_assignment(A, B)  # FAQ is default method
    >>> print(res.fun)
    46.871483385480545 # may vary
    >>> options = {"P0": "randomized"}  # use randomized initialization
    >>> res = quadratic_assignment(A, B, options=options)
    >>> print(res.fun)
    47.224831071310625 # may vary
    However, consider running from several randomized initializations and
    keeping the best result.
    >>> res = min([quadratic_assignment(A, B, options=options)
    ...            for i in range(30)], key=lambda x: x.fun)
    >>> print(res.fun)
    46.671852533681516 # may vary
    The '2-opt' method can be used to further refine the results.
    >>> options = {"partial_guess": np.array([np.arange(n), res.col_ind]).T}
    >>> res = quadratic_assignment(A, B, method="2opt", options=options)
    >>> print(res.fun)
    46.47160735721583 # may vary
    References
    ----------
    .. [1] J.T. Vogelstein, J.M. Conroy, V. Lyzinski, L.J. Podrazik,
           S.G. Kratzer, E.T. Harley, D.E. Fishkind, R.J. Vogelstein, and
           C.E. Priebe, "Fast approximate quadratic programming for graph
           matching," PLOS one, vol. 10, no. 4, p. e0121002, 2015,
           :doi:`10.1371/journal.pone.0121002`
    .. [2] D. Fishkind, S. Adali, H. Patsolic, L. Meng, D. Singh, V. Lyzinski,
           C. Priebe, "Seeded graph matching", Pattern Recognit. 87 (2019):
           203-215, :doi:`10.1016/j.patcog.2018.09.014`
    """

    maxiter = operator.index(maxiter)

    # ValueError check
    A, B, partial_match = _common_input_validation(A, B, partial_match)

    msg = None
    if isinstance(P0, str) and P0 not in {"barycenter", "randomized"}:
        msg = "Invalid 'P0' parameter string"
    elif maxiter <= 0:
        msg = "'maxiter' must be a positive integer"
    elif tol <= 0:
        msg = "'tol' must be a positive float"
    elif S.shape[0] != S.shape[1]:
        msg = "`S` must be square"
    elif S.ndim != 2:
        msg = "`S` must have exactly two dimensions"
    elif S.shape != A.shape:
        msg = "`S`, `A`, and `B` matrices must be of equal size"
    if msg is not None:
        raise ValueError(msg)

    rng = check_random_state(rng)
    n = A.shape[0]  # number of vertices in graphs
    n_seeds = partial_match.shape[0]  # number of seeds
    n_unseed = n - n_seeds

    # check outlier cases
    if n == 0 or partial_match.shape[0] == n:
        score = _calc_score(A, B, S, partial_match[:, 1])
        res = {"col_ind": partial_match[:, 1], "fun": score, "nit": 0}
        return OptimizeResult(res)

    obj_func_scalar = 1
    if maximize:
        obj_func_scalar = -1

    nonseed_B = np.setdiff1d(range(n), partial_match[:, 1])
    perm_S = np.copy(nonseed_B)
    if shuffle_input:
        nonseed_B = rng.permutation(nonseed_B)
        # shuffle_input to avoid results from inputs that were already matched

    nonseed_A = np.setdiff1d(range(n), partial_match[:, 0])
    perm_A = np.concatenate([partial_match[:, 0], nonseed_A])
    perm_B = np.concatenate([partial_match[:, 1], nonseed_B])

    S = S[:, perm_B]

    # definitions according to Seeded Graph Matching [2].
    A11, A12, A21, A22 = _split_matrix(A[perm_A][:, perm_A], n_seeds)
    B11, B12, B21, B22 = _split_matrix(B[perm_B][:, perm_B], n_seeds)
    S22 = S[perm_S, n_seeds:]

    # [1] Algorithm 1 Line 1 - choose initialization
    if isinstance(P0, str):
        # initialize J, a doubly stochastic barycenter
        J = np.ones((n_unseed, n_unseed)) / n_unseed
        if P0 == "barycenter":
            P = J
        elif P0 == "randomized":
            # generate a nxn matrix where each entry is a random number [0, 1]
            # would use rand, but Generators don't have it
            # would use random, but old mtrand.RandomStates don't have it
            K = rng.uniform(size=(n_unseed, n_unseed))
            # Sinkhorn balancing
            K = _doubly_stochastic(K)
            P = J * 0.5 + K * 0.5
    elif isinstance(P0, np.ndarray):
        P0 = np.atleast_2d(P0)
        _check_init_input(P0, n_unseed)
        invert_inds = np.argsort(nonseed_B)
        perm_nonseed_B = np.argsort(invert_inds)
        P = P0[:, perm_nonseed_B]
    else:
        msg = "`init` must either be of type str or np.ndarray."
        raise TypeError(msg)

    const_sum = A21 @ B21.T + A12.T @ B12 + S22

    # [1] Algorithm 1 Line 2 - loop while stopping criteria not met
    for n_iter in range(1, maxiter + 1):
        # [1] Algorithm 1 Line 3 - compute the gradient of f(P) = -tr(APB^tP^t)
        grad_fp = const_sum + A22 @ P @ B22.T + A22.T @ P @ B22
        # [1] Algorithm 1 Line 4 - get direction Q by solving Eq. 8
        _, cols = linear_sum_assignment(grad_fp, maximize=maximize)
        Q = np.eye(n_unseed)[cols]

        # [1] Algorithm 1 Line 5 - compute the step size
        # Noting that e.g. trace(Ax) = trace(A)*x, expand and re-collect
        # terms as ax**2 + bx + c. c does not affect location of minimum
        # and can be ignored. Also, note that trace(A@B) = (A.T*B).sum();
        # apply where possible for efficiency.
        R = P - Q
        b21 = ((R.T @ A21) * B21).sum()
        b12 = ((R.T @ A12.T) * B12.T).sum()
        AR22 = A22.T @ R
        BR22 = B22 @ R.T
        b22a = (AR22 * B22.T[cols]).sum()
        b22b = (A22 * BR22[cols]).sum()
        s = (S22 * R).sum()
        a = (AR22.T * BR22).sum()
        b = b21 + b12 + b22a + b22b + s
        # critical point of ax^2 + bx + c is at x = -d/(2*e)
        # if a * obj_func_scalar > 0, it is a minimum
        # if minimum is not in [0, 1], only endpoints need to be considered
        if a * obj_func_scalar > 0 and 0 <= -b / (2 * a) <= 1:
            alpha = -b / (2 * a)
        else:
            alpha = np.argmin([0, (b + a) * obj_func_scalar])

        # [1] Algorithm 1 Line 6 - Update P
        P_i1 = alpha * P + (1 - alpha) * Q
        if np.linalg.norm(P - P_i1) / np.sqrt(n_unseed) < tol:
            P = P_i1
            break
        P = P_i1
    # [1] Algorithm 1 Line 7 - end main loop

    # [1] Algorithm 1 Line 8 - project onto the set of permutation matrices
    _, col = linear_sum_assignment(-P)
    perm = np.concatenate((np.arange(n_seeds), col + n_seeds))

    unshuffled_perm = np.zeros(n, dtype=int)
    unshuffled_perm[perm_A] = perm_B[perm]

    score = _calc_score(A, B, S, unshuffled_perm)

    res = {"col_ind": unshuffled_perm, "fun": score, "nit": n_iter}

    return OptimizeResult(res)
    losses.append(np.mean(batch_losses))

sn.eval()
sn.noise_factor = 0.0
sn.n_samples = 1
sn.n_iters = 100

test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

prec_list = []
actions_pred = []
index_list = []
for im, seq, seq_ordered, indices in test_loader:
    P = sn.predict_P(im)

    index_list.append(indices.cpu().numpy())
    _, obj_ids = linear_sum_assignment(1 - P[0, :, :].cpu().detach().numpy())
    actions_pred.append(obj_ids)
    prec = np.sum(obj_ids == np.argmax(seq.cpu().numpy(), -1)) / 7
    prec_list.append(prec)

np.mean(prec_list)

parts = [k[0] for k in key.items()]

pred_extractions = np.array(parts)[np.array(actions_pred).astype(int)]
indices = np.array(index_list).astype(int)

np.save('pred_order_%02d.npy' % args.seed, pred_extractions)
np.save('test_indices_%02d.npy' % args.seed, indices)
示例#34
0
def hungarian_assignment(positions, targets):
    distances = distance.cdist(positions, targets, 'euclidean')
    row_ind, col_ind = linear_sum_assignment(distances)
    return targets[col_ind, :]
示例#35
0
    def forward(self, outputs, targets):
        """ Performs the matching

        Params:
            outputs: This is a dict that contains at least these entries:
                 "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
                 "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates

            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
                 "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth
                           objects in the target) containing the class labels
                 "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates

        Returns:
            A list of size batch_size, containing tuples of (index_i, index_j) where:
                - index_i is the indices of the selected predictions (in order)
                - index_j is the indices of the corresponding selected targets (in order)
            For each batch element, it holds:
                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)
        """
        bs, num_queries = outputs["pred_logits"].shape[:2]

        # We flatten to compute the cost matrices in a batch
        if self.use_focal:
            out_prob = outputs["pred_logits"].flatten(
                0, 1).sigmoid()  # [batch_size * num_queries, num_classes]
            out_bbox = outputs["pred_boxes"].flatten(
                0, 1)  # [batch_size * num_queries, 4]
        else:
            out_prob = outputs["pred_logits"].flatten(0, 1).softmax(
                -1)  # [batch_size * num_queries, num_classes]
            out_bbox = outputs["pred_boxes"].flatten(
                0, 1)  # [batch_size * num_queries, 4]

        # Also concat the target labels and boxes
        tgt_ids = torch.cat([v["labels"] for v in targets])
        tgt_bbox = torch.cat([v["boxes_xyxy"] for v in targets])

        # Compute the classification cost. Contrary to the loss, we don't use the NLL,
        # but approximate it in 1 - proba[target class].
        # The 1 is a constant that doesn't change the matching, it can be ommitted.
        if self.use_focal:
            # Compute the classification cost.
            alpha = self.focal_loss_alpha
            gamma = self.focal_loss_gamma
            neg_cost_class = (1 - alpha) * (out_prob**gamma) * (
                -(1 - out_prob + 1e-8).log())
            pos_cost_class = alpha * (
                (1 - out_prob)**gamma) * (-(out_prob + 1e-8).log())
            cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:,
                                                                     tgt_ids]
        else:
            cost_class = -out_prob[:, tgt_ids]

        # Compute the L1 cost between boxes
        image_size_out = torch.cat(
            [v["image_size_xyxy"].unsqueeze(0) for v in targets])
        image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries,
                                                            1).flatten(0, 1)
        image_size_tgt = torch.cat([v["image_size_xyxy_tgt"] for v in targets])

        out_bbox_ = out_bbox / image_size_out
        tgt_bbox_ = tgt_bbox / image_size_tgt
        cost_bbox = torch.cdist(out_bbox_, tgt_bbox_, p=1)

        # Compute the giou cost betwen boxes
        # cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))
        cost_giou = -generalized_box_iou(out_bbox, tgt_bbox)

        # Final cost matrix
        C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou
        C = C.view(bs, num_queries, -1).cpu()

        sizes = [len(v["boxes"]) for v in targets]
        indices = [
            linear_sum_assignment(c[i])
            for i, c in enumerate(C.split(sizes, -1))
        ]
        return [(torch.as_tensor(i, dtype=torch.int64),
                 torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
def min_cost_matching(
        distance_metric, max_distance, tracks, detections, track_indices=None,
        detection_indices=None):
    """Solve linear assignment problem.

    Parameters
    ----------
    distance_metric : Callable[List[Track], List[Detection], List[int], List[int]) -> ndarray
        The distance metric is given a list of tracks and detections as well as
        a list of N track indices and M detection indices. The metric should
        return the NxM dimensional cost matrix, where element (i, j) is the
        association cost between the i-th track in the given track indices and
        the j-th detection in the given detection_indices.
    max_distance : float
        Gating threshold. Associations with cost larger than this value are
        disregarded.
    tracks : List[track.Track]
        A list of predicted tracks at the current time step.
    detections : List[detection.Detection]
        A list of detections at the current time step.
    track_indices : List[int]
        List of track indices that maps rows in `cost_matrix` to tracks in
        `tracks` (see description above).
    detection_indices : List[int]
        List of detection indices that maps columns in `cost_matrix` to
        detections in `detections` (see description above).

    Returns
    -------
    (List[(int, int)], List[int], List[int])
        Returns a tuple with the following three entries:
        * A list of matched track and detection indices.
        * A list of unmatched track indices.
        * A list of unmatched detection indices.

    """
    if track_indices is None:
        track_indices = np.arange(len(tracks))
    if detection_indices is None:
        detection_indices = np.arange(len(detections))

    if len(detection_indices) == 0 or len(track_indices) == 0:
        return [], track_indices, detection_indices  # Nothing to match.

    cost_matrix = distance_metric(
        tracks, detections, track_indices, detection_indices)
    cost_matrix[cost_matrix > max_distance] = max_distance + 1e-5
    indices = linear_sum_assignment(cost_matrix)
    indices = np.asarray(indices)
    indices = np.transpose(indices)
    matches, unmatched_tracks, unmatched_detections = [], [], []
    for col, detection_idx in enumerate(detection_indices):
        if col not in indices[:, 1]:
            unmatched_detections.append(detection_idx)
    for row, track_idx in enumerate(track_indices):
        if row not in indices[:, 0]:
            unmatched_tracks.append(track_idx)
    for row, col in indices:
        track_idx = track_indices[row]
        detection_idx = detection_indices[col]
        if cost_matrix[row, col] > max_distance:
            unmatched_tracks.append(track_idx)
            unmatched_detections.append(detection_idx)
        else:
            matches.append((track_idx, detection_idx))
    return matches, unmatched_tracks, unmatched_detections
示例#37
0
def routing_algorithm(world, robots, mode="random", maximumSeconds=120):
    """
    Route the world using various algorithms.

    Parameters
    ----------
    world:          World object
        world object as defined in world.py
    robots:         list
        list of Robot objects that will be altered for the animation
    mode:           str
        a string that represents the mode we are going to use
    """
    assignments = [[] for _ in range(len(robots))]
    if mode == "random":
        """Assign each task to a random robot multiple times to find a distribution."""
        tasks = {}
        for i, hospital in enumerate(world.hospitals):
            if world.graph.nodes[hospital]['demand1'] != 0 or world.graph.nodes[
                    hospital]['demand2'] != 0:
                pointer = world.graph.nodes[hospital]
                tasks[hospital] = np.array(
                    [pointer['demand1'], pointer['demand2']], dtype=float)

        # Assign robots to tasks until there are none left
        while len(tasks) > 0:
            # print("Remaining tasks: ", tasks)
            for i, robot in enumerate(robots):
                if len(tasks) == 0:
                    break
                random_goal = random.choice(list(tasks.keys()))
                tasks[random_goal] -= np.array(
                    [robot.capacity, robot.capacity])
                assignments[i].append(
                    [random_goal,
                     np.array([robot.capacity, robot.capacity])])
                if np.all(tasks[random_goal] <= 0.):
                    del tasks[random_goal]

    elif mode == "hungarian":
        """ We want a cost matrix of size number_of_robots x no_of_tasks"""
        number_of_robots = len(robots)
        tasks = []

        for hospital in world.hospitals:
            if world.graph.nodes[hospital]['demand1'] != 0:
                tasks.append(
                    (hospital, 0, world.graph.nodes[hospital]['demand1']))
            if world.graph.nodes[hospital]['demand2'] != 0:
                tasks.append(
                    (hospital, 1, world.graph.nodes[hospital]['demand2']))

        cost_matrix = np.zeros((number_of_robots, len(tasks)))

        # So now we have a list of tasks. Each task is a tuple: (hospital, demand type, demand)
        # We also have a cost matrix of the correct size, now we fill it in with the time cost
        for i, robot in enumerate(robots):
            for j, task in enumerate(tasks):

                #print("robot: {}, task: {}".format(i,j))

                path_length = nx.shortest_path_length(world.graph,
                                                      robot.start_node,
                                                      task[0],
                                                      weight='length')

                # Calculate time taken to go to that hospital
                time_taken = path_length / robot.speed
                cost_matrix[i][j] = time_taken

        robot_ind, task_ind = linear_sum_assignment(cost_matrix)

        for i, index in enumerate(robot_ind):
            #print("robot_ind: ", i)

            task = tasks[task_ind[i]]
            assignments[i].append([
                task[0],
                np.array([(1 - task[1]) * robots[index].capacity,
                          task[1] * robots[index].capacity])
            ])

    elif mode == "linear_separate_tasks":
        # Takes all the tasks as separate tasks, i.e. does not join them together and the robot only delivers one type
        # of task.

        # Obtain all the demand and priority for goods 1 and 2
        demand = []
        tasks = []
        types = []
        priority = []
        for i, hospital in enumerate(world.hospitals):
            if world.graph.nodes[hospital]['demand1'] != 0:
                demand.append(world.graph.nodes[hospital]['demand1'])
                tasks.append(hospital)
                types.append(0)
                priority.append(world.graph.nodes[hospital]['priority'])
            if world.graph.nodes[hospital]['demand2'] != 0:
                demand.append(world.graph.nodes[hospital]['demand2'])
                tasks.append(hospital)
                types.append(1)
                priority.append(world.graph.nodes[hospital]['priority'])

        # Obtain the time cost matrix and the robots' capacity.
        T = np.zeros((len(robots), len(demand)))
        capacity = np.zeros(len(robots))
        for i, robot in enumerate(robots):
            capacity[i] = robot.capacity
            for j, task in enumerate(tasks):
                path_length = nx.shortest_path_length(world.graph,
                                                      robot.start_node,
                                                      task,
                                                      weight='length')
                time_taken = path_length / robot.speed
                T[i][j] = time_taken

        # Change the demand and priority arrays to numpy ones
        demand = np.array(demand)
        priority = np.array(priority) * 10000

        # Define the optimization problem
        x = cp.Variable((len(robots), len(demand)), boolean=True)
        cost = cp.sum(cp.multiply(T, x)) + cp.sum(
            cp.neg(cp.multiply(cp.matmul(capacity, x) - demand, priority)))
        objective = cp.Minimize(cost)
        inequality = [cp.sum(x, axis=1) <= 1]
        problem = cp.Problem(objective, inequality)
        problem.solve()

        # Assign the results to the robots and evaluate the costs
        nonzero = x.value.nonzero()
        for index in range(len(nonzero[0])):
            i = nonzero[1][index]
            j = nonzero[0][index]
            robot = robots[j]
            task = tasks[i]
            assignments[j].append([
                task,
                np.array([(1 - types[i]) * robot.capacity,
                          types[i] * robot.capacity])
            ])

    elif mode == "linear_joined_tasks":
        # As above but allows the robots to deliver two goods at the same time

        # Obtain all the demand and priority for goods 1 and 2
        demand = []
        tasks = []
        priority = []

        for i, hospital in enumerate(world.hospitals):
            if world.graph.nodes[hospital]['demand1'] != 0 or world.graph.nodes[
                    hospital]['demand2'] != 0:
                pointer = world.graph.nodes[hospital]
                demand.append([pointer['demand1'], pointer['demand2']])
                tasks.append(hospital)
                priority.append([pointer['priority'], pointer['priority']])

        # Obtain the time cost matrix and the robots' capacity
        T = np.zeros((len(robots), len(demand)))
        capacity = np.zeros((len(robots), 2))

        for i, robot in enumerate(robots):
            capacity[i, 0] = robot.capacity
            capacity[i, 1] = robot.capacity
            for j, task in enumerate(tasks):
                path_length = nx.shortest_path_length(world.graph,
                                                      robot.start_node,
                                                      task,
                                                      weight='length')
                time_taken = path_length / robot.speed
                T[i][j] = time_taken

        # Change the demand and priority arrays to numpy ones
        demand = np.array(demand)
        priority = np.array(priority) * 10000

        # Define the optimization problem
        x = cp.Variable((len(robots), len(demand)), boolean=True)
        cost = cp.sum(cp.multiply(T, x)) + cp.sum(
            cp.neg(
                cp.multiply(
                    cp.matmul(cp.transpose(x), capacity) - demand, priority)))
        objective = cp.Minimize(cost)
        inequality = [cp.sum(x, axis=1) <= 1]
        problem = cp.Problem(objective, inequality)
        problem.solve(verbose=True, tm_lim=60000)

        # Assign the results to the robots and evaluate the costs
        nonzero = x.value.nonzero()
        # print(nonzero)
        for index in range(len(nonzero[0])):
            i = nonzero[1][index]
            j = nonzero[0][index]
            task = tasks[i]
            assignments[j].append(
                [task, np.array([capacity[j][0], capacity[j][1]])])

    elif mode == "tsm":
        # Method based on the m-travelling salesmen problem

        # Obtain all the demand and priority for goods 1 and 2
        demand = []
        tasks = []
        priority = []

        for i, hospital in enumerate(world.hospitals):
            if world.graph.nodes[hospital]['demand1'] != 0 or world.graph.nodes[
                    hospital]['demand2'] != 0:
                pointer = world.graph.nodes[hospital]
                demand.append([pointer['demand1'], pointer['demand2']])
                tasks.append(hospital)
                priority.append([pointer['priority'], pointer['priority']])

        # Obtain the time cost matrix and the robots' capacity
        x = []
        v = []
        z = []
        capacities = []
        distances = []
        constraints = []
        costs = []
        for i, robot in enumerate(robots):
            # Create necessary variables
            x.append(
                cp.Variable((len(demand) + 1, len(demand) + 1), boolean=True))
            v.append(cp.Variable(len(demand) + 1, boolean=True))
            z.append(cp.Variable((len(demand), 1)))
            capacities.append(cp.Variable((len(demand), 2)))

            # Add constraints
            constraints.append(cp.sum(
                x[i], axis=0) == v[i])  # Note if city is moved from
            constraints.append(cp.sum(x[i], axis=0) == cp.sum(
                x[i], axis=1))  # Note if city is visited
            constraints.append(z[i] - cp.transpose(z[i]) +
                               len(demand) * x[i][1:, 1:] <= len(demand) - 1)
            constraints.append(cp.sum(x[i], axis=1) <= cp.sum(
                x[i][0, :]))  # We start from 0th node
            constraints.append(
                cp.sum(capacities[i], axis=0) <=
                robot.capacity)  # Make sure we are not over capacity
            constraints.append(
                cp.sum(capacities[i], axis=1) <=
                1000 * v[i][1:])  # Check how many units are delivered
            constraints.append(capacities[i] >= 0)

            # Fill in the distance matrix
            distances.append(np.zeros((len(demand) + 1, len(demand) + 1)))
            for j, task in enumerate([robot.start_node] + tasks):
                for k, other_task in enumerate([robot.start_node] + tasks):
                    if other_task != task:
                        path_length = nx.shortest_path_length(world.graph,
                                                              task,
                                                              other_task,
                                                              weight='length')
                        distances[i][j][k] = path_length / robot.speed
                    else:
                        distances[i][j][k] = 100000

            costs.append(cp.sum(cp.multiply(x[i], distances[i])))

        demand = np.array(demand)
        priority = np.array(priority) * 10000

        costs = cp.max(cp.hstack(costs)) + cp.sum(
            cp.multiply(cp.pos(demand - sum(capacities)), priority))
        objective = cp.Minimize(costs)
        problem = cp.Problem(objective, constraints)
        problem.solve(verbose=True,
                      solver=cp.CBC,
                      numberThreads=8,
                      logLevel=1,
                      maximumSeconds=maximumSeconds,
                      allowablePercentageGap=10)

        # Assign the results to the robots and evaluate the costs
        for i, x_i in enumerate(x):
            print(f"variable {i + 1}: {x_i.value}")
            print(f"variable {i + 1}: {np.sum(x_i.value * distances[i])}")
            print(f"variable {i + 1}: {capacities[i].value}")

        for i, robot in enumerate(robots):
            node = np.argmax(x[i].value[0, :])
            while node != 0:
                if np.any(capacities[i].value[node - 1] != 0):
                    task = tasks[node - 1]
                    assignments[i].append(
                        [task, capacities[i].value[node - 1]])
                node = np.argmax(x[i].value[node, :])

    elif mode == "home":
        # Home made method allowing to specify the depth of the solution to be considered N

        # Obtain all the demand and priority for goods 1 and 2
        demand = []
        tasks = []
        priority = []
        N = 2

        for i, hospital in enumerate(world.hospitals):
            if world.graph.nodes[hospital]['demand1'] != 0 or world.graph.nodes[
                    hospital]['demand2'] != 0:
                pointer = world.graph.nodes[hospital]
                demand.append([pointer['demand1'], pointer['demand2']])
                tasks.append(hospital)
                priority.append([pointer['priority'], pointer['priority']])

        # Obtain the time cost matrix and the robots' capacity
        x = [[] for _ in range(len(robots))]
        capacities = []
        constraints = []
        costs = []

        for i, robot in enumerate(robots):
            # Obtain the time matrix for the robot
            time_matrix = np.zeros((len(demand), len(demand)))
            original = np.zeros(len(demand))
            for j, task in enumerate(tasks):
                path_length = nx.shortest_path_length(world.graph,
                                                      robot.start_node,
                                                      task,
                                                      weight='length')
                original[j] = path_length / robot.speed

                for k, other_task in enumerate(tasks):
                    if other_task != task:
                        path_length = nx.shortest_path_length(world.graph,
                                                              task,
                                                              other_task,
                                                              weight='length')
                        time_matrix[j][k] = path_length / robot.speed
                    else:
                        time_matrix[j][k] = 0

            # Add the initial state
            current_costs = []
            x[i].append(cp.Variable((len(demand), 1), boolean=True))
            constraints.append(cp.sum(x[i][0]) <= 1)
            current_costs.append(cp.sum(cp.multiply(cp.vec(x[i][0]),
                                                    original)))
            capacities.append(cp.Variable((len(demand), 2), integer=True))

            # Loop over the other future states
            for n in range(1, N):
                x[i].append(cp.Variable((len(demand), 1), boolean=True))
                current_costs.append(
                    cp.sum(
                        cp.multiply(
                            cp.pos(x[i][n - 1] + cp.transpose(x[i][n]) - 1),
                            time_matrix)))
                constraints.append(cp.sum(x[i][n]) <= cp.sum(x[i][n - 1]))

            # Add the capacities constraints
            constraints.append(
                cp.sum(capacities[i], axis=0) <=
                robot.capacity)  # Make sure we are not over capacity
            constraints.append(
                cp.sum(capacities[i], axis=1) <= 1000 * cp.vec(sum(x[i])))
            constraints.append(capacities[i] >= 0)

            # Add the current costs to the total costs
            costs.append(sum(current_costs))

        demand = np.array(demand)
        priority = np.array(priority) * 10000

        costs = cp.max(cp.hstack(costs)) + cp.sum(
            cp.multiply(cp.pos(demand - sum(capacities)), priority))
        objective = cp.Minimize(costs)
        problem = cp.Problem(objective, constraints)
        problem.solve(verbose=True,
                      solver=cp.CBC,
                      logLevel=1,
                      numberThreads=4,
                      maximumSeconds=120,
                      allowablePercentageGap=5)

        # Assign the results to the robots and evaluate the costs
        for robot_i, robot in enumerate(x):
            print(f"ROBOT {robot_i}\n-------")
            print(f"delivered capacities: \n{capacities[robot_i].value}")
            print(f"step 0: {0}")
            for i, x_i in enumerate(robot):
                print(f"step {i + 1}: {x_i.value.T}")

        for i, robot in enumerate(robots):
            visited_tasks = set()
            robot._current_node = robot.start_node
            for j, x_i in enumerate(x[i]):
                index = np.argmax(x_i.value)
                task = tasks[index]
                if np.any(capacities[i].value[index] != 0
                          ) and task not in visited_tasks:
                    visited_tasks.add(task)
                    assignments[i].append([task, capacities[i].value[index]])
                    robot._current_node = task

    else:
        raise NotImplementedError

    return assignments
示例#38
0
def correct_coords(img_, coords_, min_distance=3, max_dist=5):

    peaks = peak_local_max(img_, min_distance=min_distance)
    peaks = peaks[:, ::-1]

    #remove `peaks` that is not close by to any `coord` by at most `max_dist`
    D = cdist(coords_, peaks)
    good = (D <= max_dist).any(axis=0)
    D = D[:, good]
    valid_peaks = peaks[good]

    #find the closest peaks
    closest_indexes = np.argmin(D, axis=1)

    #we will consider as an easy assigment if the closest peak is assigned to only one coord
    u_indexes = np.unique(closest_indexes)
    counts = np.bincount(closest_indexes)[u_indexes]
    easy_assigments = u_indexes[counts == 1]
    valid_pairs = [(ii, x) for ii, x in enumerate(closest_indexes)
                   if x in easy_assigments]

    easy_rows, easy_cols = map(np.array, zip(*valid_pairs))

    easy_cost = D[easy_rows, easy_cols]
    good = easy_cost < max_dist
    easy_rows = easy_rows[good]
    easy_cols = easy_cols[good]

    assert (D[easy_rows, easy_cols] <= max_dist).all()

    #now hard assigments are if a peak is assigned to more than one peak
    ambigous_rows = np.ones(D.shape[0], np.bool)
    ambigous_rows[easy_rows] = False
    ambigous_rows, = np.where(ambigous_rows)

    ambigous_cols = np.ones(D.shape[1], np.bool)
    ambigous_cols[easy_cols] = False
    ambigous_cols, = np.where(ambigous_cols)

    D_r = D[ambigous_rows][:, ambigous_cols]
    good = (D_r <= max_dist).any(axis=0)
    D_r = D_r[:, good]
    ambigous_cols = ambigous_cols[good]

    #for this one we use the hungarian algorithm for the assigment. This assigment is to slow over the whole matrix
    ri, ci = linear_sum_assignment(D_r)

    hard_rows, hard_cols = ambigous_rows[ri], ambigous_cols[ci]

    assert (D_r[ri, ci] == D[hard_rows, hard_cols]).all()

    hard_cost = D[hard_rows, hard_cols]
    good = hard_cost < max_dist
    hard_rows = hard_rows[good]
    hard_cols = hard_cols[good]

    #let's combine both and assign the corresponding peak
    rows = np.concatenate((easy_rows, hard_rows))
    cols = np.concatenate((easy_cols, hard_cols))

    new_coords = coords_.copy()
    new_coords[rows] = valid_peaks[
        cols]  #coords that do not satisfy the close peak condition will not be changed

    return new_coords
示例#39
0
def test(args, seed):
    np.random.seed(seed)
    gmm = GaussianMixture(n_components=args.num, tol=1e-3, max_iter=200, n_init=1, verbose=1)

    if args.pca_num > 0:
        pca = PCA(n_components=args.pca_num)


    train_x = np.loadtxt(os.path.join(args.exp_dir, "train.vec"), delimiter="\t")
    valid_x = np.loadtxt(os.path.join(args.exp_dir, "val.vec"), delimiter="\t")
    test_x = np.loadtxt(os.path.join(args.exp_dir, "test.vec"), delimiter="\t")

    if args.pca_num > 0:
        pca.fit(train_x)

        train_x = pca.transform(train_x)
        valid_x = pca.transform(valid_x)
        test_x = pca.transform(test_x)

    print(train_x.shape)

    print("start fitting gmm on training data")
    gmm.fit(train_x)

    valid_pred_y = gmm.predict(valid_x)
    valid_true_y = np.loadtxt(os.path.join(args.exp_dir, "val.label"), dtype=np.int)

    if args.one2one:
        print("linear assignment")
        cost_matrix = np.zeros((args.num, args.num))

        for i, j in zip(valid_pred_y, valid_true_y):
            cost_matrix[i,j] -= 1

        row_ind, col_ind = linear_sum_assignment(cost_matrix)
    else:
        # (nsamples, ncomponents)
        valid_score = gmm.predict_proba(valid_x)
        valid_max_index = np.argmax(valid_score, axis=0)
        col_ind = {}
        for i in range(args.num):
            col_ind[i] = valid_true_y[valid_max_index[i]]

    print(col_ind)
    correct = 0.
    for i, j in zip(valid_pred_y, valid_true_y):
        if col_ind[i] == j:
            correct += 1
    print("validation acc {}".format(correct / len(valid_pred_y)))

    test_pred_y = gmm.predict(test_x)
    test_true_y = np.loadtxt(os.path.join(args.exp_dir, "test.label"), dtype=np.int)
    correct = 0.
    for i, j in zip(test_pred_y, test_true_y):
        if col_ind[i] == j:
            correct += 1
    print("test acc {}".format(correct / len(test_pred_y)))

    train_pred_y = gmm.predict(train_x)
    train_true_y = np.loadtxt(os.path.join(args.exp_dir, "train.label"), dtype=np.int)
    correct = 0.
    for i, j in zip(train_pred_y, train_true_y):
        if col_ind[i] == j:
            correct += 1
    print("train acc {}".format(correct / len(train_pred_y)))
    return correct / len(train_pred_y)
obj_list = []
gt = []
for im, seq in test_loader:
    seq_pred = bc(im)
    obj_ids = np.round(
        np.matmul(seq_pred[0, :, :].cpu().detach().numpy(),
                  np.arange(6))).astype(int)

    obj_list.append(obj_ids)
    gt.append(seq[0, :].cpu().numpy())

np.savetxt('../../exps/perms/actions_tcn_%02d.txt' % args.seed,
           np.array(obj_list))
np.savetxt('../../exps/perms/gt_actions_tcn_%02d.txt' % args.seed,
           np.array(gt))

# Constrain using Hungarian algorithm

obj_list = []
for im, seq in test_loader:
    seq_pred = bc(im)
    _, obj_ids = linear_sum_assignment(
        1.0 - seq_pred[0, :, :].cpu().detach().numpy())
    #obj_ids = np.argmax(seq_pred[0,:,:].detach().numpy(),-1)

    obj_list.append(obj_ids)

np.savetxt('../../exps/perms/actions_tcn_hung_%02d.txt' % args.seed,
           np.array(obj_list))
示例#41
0
    def permutations(p_atoms, p_centroid, p_weights, q_atoms, q_centroid,
                     q_weights):
        """Generates two possible permutations of atom order. This method uses the principle component
        of the inertia tensor to prealign the molecules and hungarian method to determine the order.
        There are always two possible permutation depending on the way to pre-aligning the molecules.

        Args:
            p_atoms: atom numbers
            p_centroid: array of atom positions
            p_weights: array of atom weights
            q_atoms: atom numbers
            q_centroid: array of atom positions
            q_weights: array of atom weights

        Yield:
            perm_inds: array of atoms' order
        """
        # get the principal axis of P and Q
        p_axis = HungarianOrderMatcher.get_principal_axis(
            p_centroid, p_weights)
        q_axis = HungarianOrderMatcher.get_principal_axis(
            q_centroid, q_weights)

        # rotate Q onto P considering that the axis are parallel and antiparallel
        U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, p_axis)
        p_centroid_test = np.dot(p_centroid, U)

        # generate full view from q shape to fill in atom view on the fly
        perm_inds = np.zeros(len(p_atoms), dtype=int)

        # Find unique atoms
        species = np.unique(p_atoms)

        for specie in species:
            p_atom_inds = np.where(p_atoms == specie)[0]
            q_atom_inds = np.where(q_atoms == specie)[0]
            A = q_centroid[q_atom_inds]
            B = p_centroid_test[p_atom_inds]

            # Perform Hungarian analysis on distance matrix between atoms of 1st
            # structure and trial structure
            distances = cdist(A, B, "euclidean")
            a_inds, b_inds = linear_sum_assignment(distances)

            perm_inds[q_atom_inds] = p_atom_inds[b_inds]

        yield perm_inds

        # rotate Q onto P considering that the axis are parallel and antiparallel
        U = HungarianOrderMatcher.rotation_matrix_vectors(q_axis, -p_axis)
        p_centroid_test = np.dot(p_centroid, U)

        # generate full view from q shape to fill in atom view on the fly
        perm_inds = np.zeros(len(p_atoms), dtype=int)

        # Find unique atoms
        species = np.unique(p_atoms)

        for specie in species:
            p_atom_inds = np.where(p_atoms == specie)[0]
            q_atom_inds = np.where(q_atoms == specie)[0]
            A = q_centroid[q_atom_inds]
            B = p_centroid_test[p_atom_inds]

            # Perform Hungarian analysis on distance matrix between atoms of 1st
            # structure and trial structure
            distances = cdist(A, B, "euclidean")
            a_inds, b_inds = linear_sum_assignment(distances)

            perm_inds[q_atom_inds] = p_atom_inds[b_inds]

        yield perm_inds
def label_remapper(labels1, labels2):
    conf_mat = confusion_matrix(labels1, labels2)
    row_inds, col_inds = linear_sum_assignment(conf_mat, maximize=True)
    remapper = dict(zip(col_inds, row_inds))
    return np.array(list(map(remapper.get, labels2)))
示例#43
0
文件: model.py 项目: zbxzc35/deepsvg
    def perfect_matching(self, command_logits, args_logits, hierarch_logits,
                         tgt_commands, tgt_args):
        with torch.no_grad():
            N, G, S, n_args = tgt_args.shape
            visibility_mask = _get_visibility_mask(tgt_commands, seq_dim=-1)
            padding_mask = _get_padding_mask(
                tgt_commands, seq_dim=-1,
                extended=True) * visibility_mask.unsqueeze(-1)

            # Unsqueeze
            tgt_commands, tgt_args, tgt_hierarch = tgt_commands.unsqueeze(
                2), tgt_args.unsqueeze(2), visibility_mask.unsqueeze(2)
            command_logits, args_logits, hierarch_logits = command_logits.unsqueeze(
                1), args_logits.unsqueeze(1), hierarch_logits.unsqueeze(
                    1).squeeze(-2)

            # Loss
            tgt_hierarch, hierarch_logits = tgt_hierarch.repeat(
                1, 1, self.cfg.num_groups_proposal), hierarch_logits.repeat(
                    1, G, 1, 1)
            tgt_commands, command_logits = tgt_commands.repeat(
                1, 1, self.cfg.num_groups_proposal,
                1), command_logits.repeat(1, G, 1, 1, 1)
            tgt_args, args_logits = tgt_args.repeat(
                1, 1, self.cfg.num_groups_proposal, 1,
                1), args_logits.repeat(1, G, 1, 1, 1, 1)

            padding_mask, mask = padding_mask.unsqueeze(2).repeat(
                1, 1, self.cfg.num_groups_proposal,
                1), self.cmd_args_mask[tgt_commands.long()]

            loss_args = F.cross_entropy(
                args_logits.reshape(-1, self.args_dim),
                tgt_args.reshape(-1).long() + 1,
                reduction="none").reshape(N, G, self.cfg.num_groups_proposal,
                                          S, n_args)  # shift due to -1 PAD_VAL
            loss_cmd = F.cross_entropy(
                command_logits.reshape(-1, self.cfg.n_commands),
                tgt_commands.reshape(-1).long(),
                reduction="none").reshape(N, G, self.cfg.num_groups_proposal,
                                          S)
            loss_hierarch = F.cross_entropy(hierarch_logits.reshape(-1, 2),
                                            tgt_hierarch.reshape(-1).long(),
                                            reduction="none").reshape(
                                                N, G,
                                                self.cfg.num_groups_proposal)

            loss_args = (loss_args *
                         mask).sum(dim=[-1, -2]) / mask.sum(dim=[-1, -2])
            loss_cmd = (loss_cmd *
                        padding_mask).sum(dim=-1) / padding_mask.sum(dim=-1)

            loss = 2.0 * loss_args + 1.0 * loss_cmd + 1.0 * loss_hierarch

        # Iterate over the batch-dimension
        assignment_list = []

        full_set = set(range(self.cfg.num_groups_proposal))
        for i in range(N):
            costs = loss[i]
            mask = visibility_mask[i]
            _, assign = linear_sum_assignment(costs[mask].cpu())
            assign = assign.tolist()
            assignment_list.append(assign + list(full_set - set(assign)))

        assignment = torch.tensor(assignment_list,
                                  device=command_logits.device)

        return assignment.unsqueeze(-1).unsqueeze(-1)
示例#44
0
    def deal(self):
        # 初始化
        max_mn = max(self.m, self.n)
        cost = np.zeros([max_mn, max_mn])
        for i in range(self.m):
            for j in range(self.n):
                cost[i][j] = np.linalg.norm(self.agent_pos[i]-self.task_pos[j])
        uavs_pos_record = []
        uavs_pos_record.append(self.agent_pos)

        # 开始
        if self.m < self.n:
            while self.m < self.n:
                # 添加虚拟agent,补齐cost矩阵
                for i in range(self.m, self.n):
                    for j in range(self.n):
                        cost[i][j] = 0x3f3f3f3f

                row_ind, col_ind = linear_sum_assignment(cost)
                self.agent_pos = self.task_pos[col_ind[:self.m]]   # 更新agent位置
                self.task_pos = np.delete(
                    self.task_pos, col_ind[:self.m], axis=0)     # 更新task
                self.n = self.n - self.m
                uavs_pos_record.append(self.agent_pos[:])

                # 更新代价矩阵
                max_mn = max(self.m, self.n)
                cost = np.zeros([max_mn, max_mn])
                for i in range(self.m):
                    for j in range(self.n):
                        cost[i][j] = np.linalg.norm(self.agent_pos[i]-self.task_pos[j])

            # 添加虚拟task,补齐cost矩阵
            for i in range(self.m):
                for j in range(self.n, self.m):
                    cost[i][j] = 0x3f3f3f3f

            row_ind, col_ind = linear_sum_assignment(cost)
            tmp = np.zeros(self.agent_pos.shape)
            for i in range(self.m):
                if col_ind[i] < self.n:
                    tmp[i] = self.task_pos[col_ind[i]]   # 更新agent位置
                else:
                    tmp[i] = self.agent_pos[i]
            # self.agent_pos = self.task_pos[col_ind[:self.m]]
            uavs_pos_record.append(tmp)
        
        else:
            k = self.m // self.n
            tmp = np.zeros(self.agent_pos.shape)
            for t in range(k):
                row_ind, col_ind = linear_sum_assignment(cost[t*self.n:(t+1)*self.n,:self.n])
                tmp[t*self.n:(t+1)*self.n] = self.task_pos[col_ind[:]]

            if self.m%self.n != 0:
                cost_res = np.zeros([self.n, self.n])
                cost_res[:self.m%self.n] = cost[k*self.n:, :self.n]
                cost_res[self.m%self.n:] = 0x3f3f3f3f * np.ones([self.n-self.m%self.n, self.n])
                row_ind, col_ind = linear_sum_assignment(cost_res)
                tmp[k*self.n:] = self.task_pos[col_ind[:self.m%self.n]]

            self.agent_pos = tmp
            uavs_pos_record.append(tmp)

        return uavs_pos_record
示例#45
0
文件: MOTM.py 项目: jutanke/pak
def evaluate(Gt, Hy, T, calc_cost, debug_info=False):
    """ Runs the Multiple Object Tracking Metrics algorithm

        Gt: Ground-truth: [
            [frame, pid, ..DATA.. ],
            ...
        ]

        Hy: hypothesis: [
            [frame, pid, ..DATA.. ],
            ...
        ]

        T: cost threshold after which pairs cannot be
            connected anymore

        calc_cost: {function} that gets the ..DATA.. term
            as parameter: e.g. for points it could calculate
            the distance, for aabb's it could calculate IoU..

        debug_info: {boolean} if True we get the actual Gt-Hy pairs
            that created FP, FN and MME

        return:
            fp: List<Integer> of false-positives
            m: List<Integer> of misses
            mme: List<Integer> of mismatches
            c: List<Integer> of matches
            d: List<Double> of distances beween o_i and h_i (summed)
            g: List<Integer> number of objects in t

            all lists have the same length of total number of
            frames
    """
    global HIGH_VALUE
    first_frame = np.min(Gt[:, 0])
    last_frame = np.max(Gt[:, 0])
    assert floor(first_frame) == ceil(first_frame)  # make sure the
    assert floor(last_frame) == ceil(last_frame)  # values are integers
    first_frame = int(first_frame)
    last_frame = int(last_frame)

    number_of_frames = last_frame - first_frame + 1

    fp = [0] * number_of_frames
    m = [0] * number_of_frames
    mme = [0] * number_of_frames
    c = [0] * number_of_frames
    d = [0] * number_of_frames
    g = [0] * number_of_frames

    M = MatchLookup(first_frame, last_frame)

    if debug_info:
        FP_pairs = []
        FN_pairs = []
        MME_pairs = []
        Gt_local2global = LocalFrameIdToGlobal(Gt)
        Hy_local2global = LocalFrameIdToGlobal(Hy)

    # ----------------------------
    # "t" is the true frame number that can be any integer
    # "t_pos" is the respective integer that starts at 0 ...
    for t_pos, t in enumerate(range(first_frame, last_frame + 1)):
        Ot = SingleFrameData(extract_eq(Gt, col=0, value=t))
        Ht = SingleFrameData(extract_eq(Hy, col=0, value=t))
        g[t_pos] = Ot.total_elements  # count number of objects in t

        # ----------------------------------
        # verify if old match is still valid!
        # ----------------------------------
        is_empty = True
        for (o, h) in M.get_matches(t - 1):
            oid, hid = o[0], h[0]
            if Ot.has(oid) and Ht.has(hid):
                o_cur = Ot.find(oid)
                h_cur = Ht.find_best(hid, o_cur, calc_cost)
                cost = calc_cost(o_cur[1:], h_cur[1:])
                if cost < T:
                    # the tracked object is still valid! :)
                    Ot.remove(o_cur)
                    Ht.remove(h_cur)
                    M.insert_match(t, o_cur, h_cur)
                    is_empty = False

        if is_empty:
            M.init(t)  # to allow frames with no matches!

        # ----------------------------------
        # match not-yet corresponding pairs
        # ----------------------------------
        Ot_ummatched = Ot.as_list()  # the already matched elements
        Ht_unmatched = Ht.as_list()  # were removed
        count_o, count_h = len(Ot_ummatched), len(Ht_unmatched)
        C = np.ones((count_o, count_h)) * HIGH_VALUE
        for i, o in enumerate(Ot_ummatched):
            for j, h in enumerate(Ht_unmatched):
                cost = calc_cost(o[1:], h[1:])
                C[i, j] = cost if cost < T else HIGH_VALUE

        row_ind, col_ind = linear_sum_assignment(C)

        for i, j in zip(row_ind, col_ind):
            o_cur, h_cur, cost = Ot_ummatched[i], Ht_unmatched[j], C[i, j]
            if cost < T:
                Ot.remove(o_cur)
                Ht.remove(h_cur)
                M.insert_match(t, o_cur, h_cur)
                if M.has_mismatch(t, o_cur, h_cur):
                    mme[t_pos] += 1
                    if debug_info:  # only do this if we want debug infos
                        MME_pairs.append((Gt_local2global.get_true_idx(t, i),
                                          Hy_local2global.get_true_idx(t, j)))

        # ----------------------------------
        # handle unmatched rest
        # ----------------------------------
        c[t_pos] = M.count_matches(t)
        fp[t_pos] = Ht.elements_left
        assert fp[t_pos] >= 0
        if debug_info:  # only do this if we want debug infos
            for _, leftovers in Ht.lookup.items():
                for leftover in leftovers:
                    item = [t]
                    item.extend(leftover)
                    FP_pairs.append(item)

        m[t_pos] = Ot.elements_left
        assert m[t_pos] >= 0
        if debug_info:  # only do this if we want debug infos
            for _, leftovers in Ot.lookup.items():
                for leftover in leftovers:
                    item = [t]
                    item.extend(leftover)
                    FN_pairs.append(item)

        # ----------------------------------
        # calculate cost between all matches
        # ----------------------------------
        cost_sum = 0
        for (o, h) in M.get_matches(t):
            cost_sum += calc_cost(o[1:], h[1:])
        d[t_pos] = cost_sum

    # ----------------------------

    if debug_info:
        return fp, m, mme, c, d, g, \
               np.array(FN_pairs), np.array(FP_pairs), np.array(MME_pairs)
    else:
        return fp, m, mme, c, d, g
示例#46
0
def evaluate(net, loader, writer, epoch):
    """evaluates on provided data
    """

    net.eval()
    predicts = np.zeros(len(loader.dataset), dtype=np.int32)
    labels = np.zeros(len(loader.dataset), dtype=np.int32)
    intermediates = np.zeros((len(loader.dataset), 2048), dtype=np.float32)
    images = np.zeros((len(loader.dataset), 3, 64, 64), dtype=np.float32)

    print(f"Evaluating on {len(loader.dataset)} samples")

    with torch.no_grad():
        for batch_idx, (batch, targets) in enumerate(loader):
            # logger.progress('processing %d/%d batch' % (batch_idx, len(loader)))
            batch = batch.to(cfg.device, non_blocking=True)
            # assuming the last head is the main one
            # output dimension of the last head
            # should be consistent with the ground-truth
            logits = net(batch, -1)
            start = batch_idx * loader.batch_size
            end = start + loader.batch_size
            end = min(end, len(loader.dataset))
            labels[start:end] = targets.cpu().numpy()
            predicts[start:end] = logits.max(1)[1].cpu().numpy()

            if epoch % cfg.embedding_freq == 0:
                intermediates[start:end] = net(batch, -1, True).cpu().numpy()
                if not cfg.tfm_adaptive_thresholding:
                    for i in range(3):
                        batch[:, i] = (batch[:, i] *
                                       cfg.tfm_stds[i]) + cfg.tfm_means[i]
                images[start:end] = torch.nn.functional.interpolate(
                    batch, size=(64, 64), mode='bicubic',
                    align_corners=False).cpu().numpy()

    # TODO: Gather labels and predicts
    # compute accuracy
    num_classes = labels.max().item() + 1
    count_matrix = np.zeros((num_classes, num_classes), dtype=np.int32)
    for i in range(predicts.shape[0]):
        count_matrix[predicts[i], labels[i]] += 1
    reassignment = np.dstack(
        linear_sum_assignment(count_matrix.max() - count_matrix))[0]
    acc = count_matrix[reassignment[:, 0], reassignment[:, 1]].sum().astype(
        np.float32) / predicts.shape[0]
    nmi = NMI(labels, predicts)
    ari = ARI(labels, predicts)

    # compute f1 scores per class
    predicts_reassigned = reassignment[predicts, 1]
    precision = precision_score(labels,
                                predicts_reassigned,
                                average=None,
                                zero_division=0)
    recall = recall_score(labels,
                          predicts_reassigned,
                          average=None,
                          zero_division=0)
    f1 = f1_score(labels, predicts_reassigned, average=None, zero_division=0)

    logger.info('Evaluation results at epoch %d are: '
                'ACC: %.3f, NMI: %.3f, ARI: %.3f' % (epoch, acc, nmi, ari))
    if cfg.local_rank == 0:
        writer.add_scalar('Evaluate/ACC', acc, epoch)
        writer.add_scalar('Evaluate/NMI', nmi, epoch)
        writer.add_scalar('Evaluate/ARI', ari, epoch)

        for i in range(len(f1)):
            writer.add_scalar(f'Evaluate/f1_{i}', f1[i], epoch)
            writer.add_scalar(f'Evaluate/precision_{i}', precision[i], epoch)
            writer.add_scalar(f'Evaluate/recall_{i}', recall[i], epoch)

        if epoch % cfg.embedding_freq == 0 and cfg.embedding_freq != -1:
            writer.add_embedding(intermediates, labels, images, epoch,
                                 cfg.session)

    return acc
def evaluate_linear_sum_assignment(gt_labels, pred_labels, outFn,
                                   overlapping_inst=False, filterSz=None):
    if filterSz is not None:
        ls, cs = np.unique(pred_labels, return_counts=True)
        pred_labels2 = np.copy(pred_labels)
        print(sorted(zip(cs, ls)))
        for l, c in zip(ls, cs):
            if c < filterSz:
                pred_labels[pred_labels==l] = 0
            else:
                pred_labels2[pred_labels==l] = 0
        print(outFn)
        with h5py.File(outFn + ".hdf", 'w') as f:
            f.create_dataset(
                'volumes/small_inst',
                data=pred_labels2,
                compression='gzip')
    pred_labels_rel, _, _ = relabel_sequential(pred_labels)
    gt_labels_rel, _, _ = relabel_sequential(gt_labels)

    if overlapping_inst:
        pred_tile = [1, ] * pred_labels_rel.ndim
        pred_tile[0] = gt_labels_rel.shape[0]
        gt_tile = [1, ] * gt_labels_rel.ndim
        gt_tile[1] = pred_labels_rel.shape[0]
        pred_tiled = np.tile(pred_labels_rel, pred_tile).flatten()
        gt_tiled = np.tile(gt_labels_rel, gt_tile).flatten()
        mask = np.logical_or(pred_tiled > 0, gt_tiled > 0)
        overlay = np.array([
            pred_tiled[mask],
            gt_tiled[mask]
        ])
        overlay_labels, overlay_labels_counts = np.unique(
            overlay, return_counts=True, axis=1)
        overlay_labels = np.transpose(overlay_labels)
    else:
        overlay = np.array([pred_labels_rel.flatten(),
                            gt_labels_rel.flatten()])
        logger.debug("overlay shape relabeled %s", overlay.shape)
        # get overlaying cells and the size of the overlap
        overlay_labels, overlay_labels_counts = np.unique(
            overlay, return_counts=True, axis=1)
        overlay_labels = np.transpose(overlay_labels)

    # get gt cell ids and the size of the corresponding cell
    gt_labels_list, gt_counts = np.unique(gt_labels_rel, return_counts=True)
    gt_labels_count_dict = {}
    logger.debug("%s %s", gt_labels_list, gt_counts)
    for (l,c) in zip(gt_labels_list, gt_counts):
        gt_labels_count_dict[l] = c

    # get pred cell ids
    pred_labels_list, pred_counts = np.unique(pred_labels_rel,
                                              return_counts=True)
    logger.debug("%s %s", pred_labels_list, pred_counts)

    pred_labels_count_dict = {}
    for (l,c) in zip(pred_labels_list, pred_counts):
        pred_labels_count_dict[l] = c

    num_pred_labels = int(np.max(pred_labels_rel))
    num_gt_labels = int(np.max(gt_labels_rel))
    num_matches = min(num_gt_labels, num_pred_labels)
    iouMat = np.zeros((num_gt_labels+1, num_pred_labels+1),
                      dtype=np.float32)
    recallMat = np.zeros((num_gt_labels+1, num_pred_labels+1),
                         dtype=np.float32)
    precMat = np.zeros((num_gt_labels+1, num_pred_labels+1),
                       dtype=np.float32)
    fscoreMat = np.zeros((num_gt_labels+1, num_pred_labels+1),
                         dtype=np.float32)

    for (u,v), c in zip(overlay_labels, overlay_labels_counts):
        iou = c / (gt_labels_count_dict[v] + pred_labels_count_dict[u] - c)

        iouMat[v, u] = iou
        recallMat[v, u] = c / gt_labels_count_dict[v]
        precMat[v, u] = c / pred_labels_count_dict[u]
        fscoreMat[v, u] = 2 * (precMat[v, u] * recallMat[v, u]) / \
                              (precMat[v, u] + recallMat[v, u])
    iouMat = iouMat[1:, 1:]
    recallMat = recallMat[1:, 1:]
    precMat = precMat[1:, 1:]
    fscoreMat = fscoreMat[1:, 1:]

    metrics = Metrics(outFn)
    tblNameGen = "general"
    metrics.addTable(tblNameGen)
    metrics.addMetric(tblNameGen, "Num GT", num_gt_labels)
    metrics.addMetric(tblNameGen, "Num Pred", num_pred_labels)

    ths = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
    aps = []
    metrics.addTable("confusion_matrix")
    for th in ths:
        tblname = "confusion_matrix.th_"+str(th).replace(".", "_")
        metrics.addTable(tblname)
        fscore = 0
        if num_matches > 0 and np.max(iouMat) > th:
            costs = -(iouMat >= th).astype(float) - iouMat / (2*num_matches)
            logger.info("start computing lin sum assign for th %s (%s)",
                        th, outFn)
            gt_ind, pred_ind = linear_sum_assignment(costs)
            assert num_matches == len(gt_ind) == len(pred_ind)
            match_ok = iouMat[gt_ind, pred_ind] >= th
            tp = np.count_nonzero(match_ok)
            fscore_cnt = 0
            for idx, match in enumerate(match_ok):
                if match:
                    fscore = fscoreMat[gt_ind[idx], pred_ind[idx]]
                    if fscore >= 0.8:
                        fscore_cnt += 1
        else:
            tp = 0
            fscore_cnt = 0
        metrics.addMetric(tblname, "Fscore_cnt", fscore_cnt)
        fp = num_pred_labels - tp
        fn = num_gt_labels - tp
        metrics.addMetric(tblname, "AP_TP", tp)
        metrics.addMetric(tblname, "AP_FP", fp)
        metrics.addMetric(tblname, "AP_FN", fn)
        ap = tp / max(1, tp + fn + fp)
        aps.append(ap)
        metrics.addMetric(tblname, "AP", ap)
        precision = tp / max(1, tp + fp)
        metrics.addMetric(tblname, "precision", precision)
        recall = tp / max(1, tp + fn)
        metrics.addMetric(tblname, "recall", recall)
        if (precision + recall) > 0:
            fscore = (2. * precision * recall) / max(1, precision + recall)
        else:
            fscore = 0.0
        metrics.addMetric(tblname, 'fscore', fscore)

    avAP = np.mean(aps)
    metrics.addMetric("confusion_matrix", "avAP", avAP)

    metrics.save()
    return metrics.metricsDict
示例#48
0
    def _merge_paths(self, paths, start_frame, overlap):
        # 1. Split paths on two parts: new and which can be intersected
        # with existing paths.
        new_paths = [
            path for path in paths if path.frame >= start_frame + overlap
        ]
        int_paths = [
            path for path in paths if path.frame < start_frame + overlap
        ]
        assert len(new_paths) + len(int_paths) == len(paths)

        # 4. Find old paths which are intersected with int_paths
        old_paths = []
        for path in self.paths:
            box = path.get_interpolated_boxes()[-1]
            if box.frame >= start_frame:
                old_paths.append(path)

        # 3. Add new paths as is. It should be done only after old_paths
        # variable is initialized.
        self.paths.extend(new_paths)

        # Nothing to merge. Just add all int_paths if any.
        if not old_paths or not int_paths:
            self.paths.extend(int_paths)
            return

        # 4. Build cost matrix for each path and find correspondence using
        # Hungarian algorithm.
        min_cost_thresh = 0.5
        cost_matrix = np.empty(shape=(len(int_paths), len(old_paths)),
                               dtype=float)
        for i, int_path in enumerate(int_paths):
            for j, old_path in enumerate(old_paths):
                cost_matrix[i][j] = 1
                if int_path.label.id == old_path.label.id:
                    # Here start_frame is the start frame of next segment
                    # and stop_frame is the stop frame of current segment
                    stop_frame = start_frame + overlap - 1
                    int_boxes = int_path.get_interpolated_boxes()
                    old_boxes = old_path.get_interpolated_boxes()
                    int_boxes = {
                        box.frame: box
                        for box in int_boxes if box.frame <= stop_frame
                    }
                    old_boxes = {
                        box.frame: box
                        for box in old_boxes if box.frame >= start_frame
                    }
                    assert int_boxes and old_boxes

                    count, error = 0, 0
                    for frame in range(start_frame, stop_frame + 1):
                        box0, box1 = int_boxes.get(frame), old_boxes.get(frame)
                        if box0 and box1:
                            if box0.outside != box1.outside:
                                error += 1
                            else:
                                error += 1 - _calc_box_IoU(box0, box1)
                            count += 1
                        elif box0 or box1:
                            error += 1
                            count += 1

                    cost_matrix[i][j] = error / count

        # 6. Find optimal solution using Hungarian algorithm.
        row_ind, col_ind = linear_sum_assignment(cost_matrix)
        int_paths_indexes = list(range(0, len(int_paths)))
        for i, j in zip(row_ind, col_ind):
            # Reject the solution if the cost is too high. Remember
            # inside int_boxes_indexes boxes which were handled.
            if cost_matrix[i][j] <= min_cost_thresh:
                old_paths[j].merge(int_paths[i])
                int_paths_indexes[i] = -1

        # 7. Add all paths which were not processed.
        for i in int_paths_indexes:
            if i != -1:
                self.paths.append(int_paths[i])
示例#49
0
 def _assign_unique_in_plate(self, plate):
     preds = np.array(list(map(itemgetter(1), plate)))
     preds = np.vectorize(lambda x: x if x != -np.inf else -1e10)(preds)
     _, indices = linear_sum_assignment(-preds)
     return [(k, v.item()) for (k, _), v in zip(plate, indices)]
示例#50
0
    def assign(self,
               bbox_pred,
               cls_pred,
               gt_bboxes,
               gt_labels,
               img_meta,
               gt_bboxes_ignore=None,
               eps=1e-7):
        """Computes one-to-one matching based on the weighted costs.

        This method assign each query prediction to a ground truth or
        background. The `assigned_gt_inds` with -1 means don't care,
        0 means negative sample, and positive number is the index (1-based)
        of assigned gt.
        The assignment is done in the following steps, the order matters.

        1. assign every prediction to -1
        2. compute the weighted costs
        3. do Hungarian matching on CPU based on the costs
        4. assign all to 0 (background) first, then for each matched pair
           between predictions and gts, treat this prediction as foreground
           and assign the corresponding gt index (plus 1) to it.

        Args:
            bbox_pred (Tensor): Predicted boxes with normalized coordinates
                (cx, cy, w, h), which are all in range [0, 1]. Shape
                [num_query, 4].
            cls_pred (Tensor): Predicted classification logits, shape
                [num_query, num_class].
            gt_bboxes (Tensor): Ground truth boxes with unnormalized
                coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
            img_meta (dict): Meta information for current image.
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`. Default None.
            eps (int | float, optional): A value added to the denominator for
                numerical stability. Default 1e-7.

        Returns:
            :obj:`AssignResult`: The assigned result.
        """
        assert gt_bboxes_ignore is None, \
            'Only case when gt_bboxes_ignore is None is supported.'
        num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)

        # 1. assign -1 by default
        assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
                                              -1,
                                              dtype=torch.long)
        assigned_labels = bbox_pred.new_full((num_bboxes, ),
                                             -1,
                                             dtype=torch.long)
        if num_gts == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            if num_gts == 0:
                # No ground truth, assign all to background
                assigned_gt_inds[:] = 0
            return AssignResult(num_gts,
                                assigned_gt_inds,
                                None,
                                labels=assigned_labels)
        img_h, img_w, _ = img_meta['img_shape']
        factor = torch.Tensor([img_w, img_h, img_w,
                               img_h]).unsqueeze(0).to(gt_bboxes.device)

        # 2. compute the weighted costs
        # classification and bboxcost.
        cls_cost = self.cls_cost(cls_pred, gt_labels)
        # regression L1 cost
        normalize_gt_bboxes = gt_bboxes / factor
        reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
        # regression iou cost, defaultly giou is used in official DETR.
        bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
        iou_cost = self.iou_cost(bboxes, gt_bboxes)
        # weighted sum of above three costs
        cost = cls_cost + reg_cost + iou_cost

        # 3. do Hungarian matching on CPU using linear_sum_assignment
        cost = cost.detach().cpu()
        if linear_sum_assignment is None:
            raise ImportError('Please run "pip install scipy" '
                              'to install scipy first.')
        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
        matched_row_inds = torch.from_numpy(matched_row_inds).to(
            bbox_pred.device)
        matched_col_inds = torch.from_numpy(matched_col_inds).to(
            bbox_pred.device)

        # 4. assign backgrounds and foregrounds
        # assign all indices to backgrounds first
        assigned_gt_inds[:] = 0
        # assign foregrounds based on matching results
        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
        return AssignResult(num_gts,
                            assigned_gt_inds,
                            None,
                            labels=assigned_labels)
示例#51
0
    def learn_policy(self, game_env, context=None, time=None):
        # context is not used in ESE
        (nbPlayer, nbArm) = np.shape(game_env)
        assert nbPlayer == self.nbPlayer and nbArm == self.nbArm, "input does not match the stored environment parameters."
        assert nbPlayer <= nbArm, "player number should be larger than or equal to arm number."

        self.time = self.time + 1

        pulls = np.zeros((nbPlayer, nbArm))

        # there is a single random hopping phases in the game, arm-value is not learned in this phase
        if self.time <= self.Tr:
            for agentID in range(nbPlayer):
                armID = self.agents[agentID].random_hop(None, time)
                pulls[agentID][armID] = 1

            collisions = self.resolve_collision(pulls)

        elif self.time <= self.Tr + self.nbArm:
            # this phase is to simulate the process for players to estimate the number of players
            for agentID in range(nbPlayer):
                pulls[agentID][self.agents[agentID].current_arm] = 1

            collisions = self.resolve_collision(pulls)

        elif self.time - self.round_to_last_epoch <= self.nbArm * self.Ts:
            #exploration with sequential hopping
            for agentID in range(nbPlayer):
                armID = self.agents[agentID].sequential_hop(None, time)
                pulls[agentID][armID] = 1

            collisions = self.resolve_collision(pulls)
            for agentID in range(nbPlayer):
                self.agents[agentID].learn_arm_value(None,
                                                     game_env[agentID, :],
                                                     collisions)

        elif self.time - self.round_to_last_epoch - self.nbArm * self.Ts <= self.nbPlayer * self.nbArm * self.Tb:
            # signaling phase
            # we don't need to simulate the complete signaling process, but we need to truncate the prevision of each player's estimated arm-values

            # the original phase of signaling, according to [Tibrewal2019], does not contributes to the accumulated sum (experience) of arm evaluation,
            # since for most of the time one player needs to either "observe" a certain arm without playing one, or stop to play to "broadcast" bit "0" to the other players.
            # However, this incurs HUGE amount of regret since the signaling phase is too long to neglect.
            # This may be a flaw of the original design of the ESE algorithm.
            # In our implementation we assume that the players are still able to get a reward during signaling phase.

            if self.time - self.round_to_last_epoch - self.nbArm * self.Ts == 1:
                arm_matrix = np.zeros((self.nbPlayer, self.nbArm))
                for agentID in range(nbPlayer):
                    arm_matrix[agentID, :] = self.agents[agentID].arm_score

                truncated_arm_matrix = numfi(
                    arm_matrix,
                    bits_frac=int(np.log2(4 * self.nbPlayer / self.epsilon)))

                for agentID in range(nbPlayer):
                    self.agents[
                        agentID].estimated_arm_matrix = truncated_arm_matrix
                    self.agents[agentID].estimated_arm_matrix[
                        agentID, :] = self.agents[agentID].arm_score

                    # each player performs local Hungarian algorithm to derive its "optimal" policy
                    # the mehtod requires the number of rows (jobs) to be larger than that of columns (workers)
                    cost_matrix = np.negative(
                        self.agents[agentID].estimated_arm_matrix.transpose())
                    # note that the cost_matrix is a transpose of the original matrix
                    col_ind, row_ind = linear_sum_assignment(cost_matrix)

                    # set player's policy
                    for ii in range(len(row_ind)):
                        playerID = row_ind[ii]
                        if playerID == agentID:
                            self.agents[agentID].policy = col_ind[ii]
                            pulls[agentID][col_ind[ii]] = 1

            for agentID in range(self.nbPlayer):
                armID = self.agents[agentID].policy
                pulls[agentID][armID] = 1

            collisions = self.resolve_collision(pulls)

        elif self.time - self.round_to_last_epoch - self.nbArm * self.Ts - self.nbPlayer * self.nbArm * self.Tb <= int(
                np.exp(self.current_epoch)):
            # exploitation phase
            for agentID in range(nbPlayer):
                armID = self.agents[agentID].exploit(context, self.time)
                pulls[agentID][armID] = 1

            collisions = self.resolve_collision(pulls)

            if self.time == self.round_to_last_epoch + self.nbArm * self.Ts + self.nbPlayer * self.nbArm * self.Tb + int(
                    np.exp(self.current_epoch)):
                #update round number
                self.round_to_last_epoch += self.nbArm * self.Ts + self.nbPlayer * self.nbArm * self.Tb + int(
                    np.exp(self.current_epoch))
                self.current_epoch = self.current_epoch + 1

                info_logger().log_info('ESE play epoch {}'.format(
                    self.current_epoch))  #debug

        current_rewards = self.observe_distributed_payoff(game_env, collisions)
        total_rewards = np.sum(current_rewards)
        return pulls, total_rewards, current_rewards
示例#52
0
文件: tracker.py 项目: bjuncek/detr
    def reid(self, blob, new_det_pos, new_det_scores):
        """Tries to ReID inactive tracks with new detections."""
        new_det_features = [
            torch.zeros(0).cuda() for _ in range(len(new_det_pos))
        ]

        if self.do_reid:
            new_det_features = self.reid_network.test_rois(
                blob['img'], new_det_pos).data

            if len(self.inactive_tracks) >= 1:
                # calculate appearance distances
                dist_mat, pos = [], []
                for t in self.inactive_tracks:
                    dist_mat.append(
                        torch.cat([
                            t.test_features(feat.view(1, -1))
                            for feat in new_det_features
                        ],
                                  dim=1))
                    pos.append(t.pos)
                if len(dist_mat) > 1:
                    dist_mat = torch.cat(dist_mat, 0)
                    pos = torch.cat(pos, 0)
                else:
                    dist_mat = dist_mat[0]
                    pos = pos[0]

                # calculate IoU distances
                iou = bbox_overlaps(pos, new_det_pos)
                iou_mask = torch.ge(iou, self.reid_iou_threshold)
                iou_neg_mask = ~iou_mask
                # make all impossible assignments to the same add big value
                dist_mat = dist_mat * iou_mask.float() + iou_neg_mask.float(
                ) * 1000
                dist_mat = dist_mat.cpu().numpy()

                row_ind, col_ind = linear_sum_assignment(dist_mat)

                assigned = []
                remove_inactive = []
                for r, c in zip(row_ind, col_ind):
                    if dist_mat[r, c] <= self.reid_sim_threshold:
                        t = self.inactive_tracks[r]
                        self.tracks.append(t)
                        t.count_inactive = 0
                        t.pos = new_det_pos[c].view(1, -1)
                        t.reset_last_pos()
                        t.add_features(new_det_features[c].view(1, -1))
                        assigned.append(c)
                        remove_inactive.append(t)

                for t in remove_inactive:
                    self.inactive_tracks.remove(t)

                keep = torch.Tensor([
                    i for i in range(new_det_pos.size(0)) if i not in assigned
                ]).long().cuda()
                if keep.nelement() > 0:
                    new_det_pos = new_det_pos[keep]
                    new_det_scores = new_det_scores[keep]
                    new_det_features = new_det_features[keep]
                else:
                    new_det_pos = torch.zeros(0).cuda()
                    new_det_scores = torch.zeros(0).cuda()
                    new_det_features = torch.zeros(0).cuda()

        return new_det_pos, new_det_scores, new_det_features
示例#53
0
    def assign_prediction_to_detection(self, predictions, detections, eps, is_ball=False):
        """
        Match predictions to detections by
        looking at the predictions we generated
        at the previous time-step.
        Distance-based matching is performed with
        sensibility parameter eps.
        Matching detections to predictions is
        a minimum weight matching problem in a
        bi-partite graph.
        :param eps: max distance up to which we
        can consider 2 boxes to be matching.
        :param detections: bounding boxes coming
        from detector at current time-step.

        :return: 3 lists containing indices of detections and
        prev_predictions -matches (list of couples),
        unmatched_detections and unmatched_predictions.
        """
        # handle case in which either detections or predictions are missing
        if len(detections) == 0:
            return [], [], [i for i in range(len(predictions))]
        elif len(predictions) == 0:
            return [], [i for i in range(len(detections))], []

        # build matrices for simplifying computations (avoid last col)
        predm = np.array(predictions)
        detections = np.array(detections)

        # compute cost matrix
        sse = lambda x, y: min(np.sum((x - y) ** 2), eps)

        cost = np.zeros((len(detections), len(predm)))
        if not is_ball:
            for i, (x, y, w, h) in enumerate(detections):
                # dc = np.array( [x+w/2, y+h/2] )
                dc = np.array([x, y, x + w, y + h])
                for j, (x, y, w, h) in enumerate(predm):
                    # pc = np.array( [x + w / 2, y + h / 2] )
                    pc = np.array([x, w, x + w, y + h])
                    # cost[i, j] = cost_function(dc, pc)
                    cost[i, j] = -iou(dc, pc)
        else:
            # ball tracker will need a more 'forgiving'
            # metric since the speed is much higher wrt players
            for i, (x, y, w, h) in enumerate(detections):
                dc = np.array([x + w / 2, y + h / 2])
                for j, (x, y, w, h) in enumerate(predm):
                    pc = np.array([x + w / 2, y + h / 2])
                    cost[i, j] = sse(dc, pc)

        # solve minimum weight matching in bipartite graph problem
        # for assigning each detection to at most one prediction (cost matrix might be rectangular)
        # more here https://docs.scipy.org/doc/scipy-0.18.1/reference/generated/scipy.optimize.linear_sum_assignment.html
        row_ind, col_ind = linear_sum_assignment(cost)
        # print("indices", row_ind, col_ind)
        print("Cost matrix", cost[row_ind, col_ind])
        matches = []
        # linear sum assignment will exclude nodes if cost is not a square matrix
        unmatched_d = [i for i in range(len(detections)) if i not in row_ind]
        unmatched_p = [i for i in range(len(predictions)) if i not in col_ind]
        # filter out assignments with higher cost than eps
        for i, cost_val in enumerate(cost[row_ind, col_ind]):
            if cost_val < eps:
                # matches will contain indices
                matches.append((row_ind[i], col_ind[i]))
            else:
                # this detection-prediction match is undone
                unmatched_d.append(row_ind[i])
                unmatched_p.append(col_ind[i])

        if is_ball:
            print("Matches(d,p):", matches)
            print("Unmatched prediction:", unmatched_p)
            print("Unmatched detection:", unmatched_d)

        return matches, unmatched_d, unmatched_p
示例#54
0
def assignDetectionsToTrackers(trackers, detections, ndThreshold,
                               threshold_flag, x_thresh, y_thresh,
                               distance_flag):
    # Build a cost matrix - all zeros (size determined by num trackers and detections
    # Set it up as float to match our cost function
    costMatrix = np.zeros((len(trackers), len(detections)), dtype=np.float32)

    # Fill the cost matrix with 'prices' derived from the
    # cost term
    # A cost for every combination of tracker and new detection
    for t, trk in enumerate(trackers):
        for d, det in enumerate(detections):
            costMatrix[t, d] = boxCost(trk, det, threshold_flag, x_thresh,
                                       y_thresh, distance_flag)

    # Produce matches
    # Solve the maximising of the sum of cost asignment using the
    # Hungarian algorithm (aka Munkres algorithm)
    matchedRowIdx, matchedColIdx = linear_sum_assignment(-costMatrix)

    # First of all find any tracker that didn't find a date
    # with a new detection at all
    # add it to the unmatchedTrackers list
    # Maybe that object has gone away ...
    unmatchedTrackers, unmatchedDetections = [], []
    for t, trk in enumerate(trackers):
        if (t not in matchedRowIdx):
            unmatchedTrackers.append(t)

    # Now find any detection that didn't find a date
    # with an old trackeer  at all
    # add it to the unmatchedDetections list
    # Maybe its a new object
    for d, det in enumerate(detections):
        if (d not in matchedColIdx):
            unmatchedDetections.append(d)

    # Now, look at the matches in more detail
    # Maybe there's a few matches that are not
    # going to work
    matches = []

    # If the cost is than nd_theshold then
    # override the match - its not good enough
    # If you change the cost function, you'll probably
    # need to change ndThreshold as well
    for m, _ in enumerate(matchedRowIdx):
        if (costMatrix[matchedRowIdx[m], matchedColIdx[m]] < ndThreshold):
            # Nope, not really a match
            # add the detection to unmatched detections list
            # add the tracker to unmatched tracker list
            unmatchedTrackers.append(matchedRowIdx[m])
            unmatchedDetections.append(matchedColIdx[m])
        else:
            # Its a match
            # Record details of the match - tracker index and detection index
            match = np.empty((1, 2), dtype=int)
            match[0][0] = matchedRowIdx[m]
            match[0][1] = matchedColIdx[m]
            # Add to matches list
            matches.append(match)

    # Clean and return
    if (len(matches) == 0):
        matches = np.empty((0, 2), dtype=int)
    else:
        matches = np.concatenate(matches, axis=0)

    return matches, np.array(unmatchedDetections), np.array(unmatchedTrackers)
        cluster_boxes.append(((left, top, right - left, bottom - top), str(count), cluster)) # xywh, id
        cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 1)
    
    count += 1



# IOU MATCHING WITH YOLO
# objects = yolo(img, 'door_and_handle_custom', 0.2, 0.3).detected_objects
objects = yolo(img, 'costa', 0.5, 0.3).detected_objects
cost_matrix = np.zeros((len(objects), len(cluster_boxes)))
for i, obj in enumerate(objects):
    for j, (box,id, cluster) in enumerate(cluster_boxes):
        cost_matrix[i,j] = calcBoundingBoxIOU(obj.xywh, box)
cost_matrix = -cost_matrix
matches = linear_sum_assignment(cost_matrix)



selection = None

for i, cluster in enumerate(cluster_boxes):
    for j, index in enumerate(matches[1]):
        if i == index:
            cv2.putText(frame, 'cluster {}: {}'.format(cluster[1], objects[j].name), (cluster[0][0], cluster[0][1]-5), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
            if objects[j].name == 'coffee':
                selection = cluster

if selection is not None:
    cluster = selection
示例#56
0
文件: fcos.py 项目: zyg11/DeFCN
    def get_ground_truth(self, shifts, targets, box_cls, box_delta,
                         box_filter):
        """
        Args:
            shifts (list[list[Tensor]]): a list of N=#image elements. Each is a
                list of #feature level tensors. The tensors contains shifts of
                this image on the specific feature level.
            targets (list[Instances]): a list of N `Instances`s. The i-th
                `Instances` contains the ground-truth per-instance annotations
                for the i-th input image.  Specify `targets` during training only.

        Returns:
            gt_classes (Tensor):
                An integer tensor of shape (N, R) storing ground-truth
                labels for each shift.
                R is the total number of shifts, i.e. the sum of Hi x Wi for all levels.
                Shifts in the valid boxes are assigned their corresponding label in the
                [0, K-1] range. Shifts in the background are assigned the label "K".
                Shifts in the ignore areas are assigned a label "-1", i.e. ignore.
            gt_shifts_deltas (Tensor):
                Shape (N, R, 4).
                The last dimension represents ground-truth shift2box transform
                targets (dl, dt, dr, db) that map each shift to its matched ground-truth box.
                The values in the tensor are meaningful only when the corresponding
                shift is labeled as foreground.
        """
        gt_classes = []
        gt_shifts_deltas = []

        box_cls = torch.cat(
            [permute_to_N_HWA_K(x, self.num_classes) for x in box_cls], dim=1)
        box_delta = torch.cat([permute_to_N_HWA_K(x, 4) for x in box_delta],
                              dim=1)
        box_filter = torch.cat([permute_to_N_HWA_K(x, 1) for x in box_filter],
                               dim=1)
        box_cls = box_cls.sigmoid_() * box_filter.sigmoid_()

        num_fg = 0
        num_gt = 0

        for shifts_per_image, targets_per_image, box_cls_per_image, box_delta_per_image in zip(
                shifts, targets, box_cls, box_delta):
            shifts_over_all_feature_maps = torch.cat(shifts_per_image, dim=0)

            gt_boxes = targets_per_image.gt_boxes

            prob = box_cls_per_image[:, targets_per_image.gt_classes].t()
            boxes = self.shift2box_transform.apply_deltas(
                box_delta_per_image, shifts_over_all_feature_maps)
            iou = pairwise_iou(gt_boxes, Boxes(boxes))
            quality = prob**(1 - self.poto_alpha) * iou**self.poto_alpha

            deltas = self.shift2box_transform.get_deltas(
                shifts_over_all_feature_maps, gt_boxes.tensor.unsqueeze(1))

            if self.center_sampling_radius > 0:
                centers = gt_boxes.get_centers()
                is_in_boxes = []
                for stride, shifts_i in zip(self.fpn_strides,
                                            shifts_per_image):
                    radius = stride * self.center_sampling_radius
                    center_boxes = torch.cat((
                        torch.max(centers - radius, gt_boxes.tensor[:, :2]),
                        torch.min(centers + radius, gt_boxes.tensor[:, 2:]),
                    ),
                                             dim=-1)
                    center_deltas = self.shift2box_transform.get_deltas(
                        shifts_i, center_boxes.unsqueeze(1))
                    is_in_boxes.append(center_deltas.min(dim=-1).values > 0)
                is_in_boxes = torch.cat(is_in_boxes, dim=1)
            else:
                # no center sampling, it will use all the locations within a ground-truth box
                is_in_boxes = deltas.min(dim=-1).values > 0

            quality[~is_in_boxes] = -1

            gt_idxs, shift_idxs = linear_sum_assignment(quality.cpu().numpy(),
                                                        maximize=True)

            num_fg += len(shift_idxs)
            num_gt += len(targets_per_image)

            gt_classes_i = shifts_over_all_feature_maps.new_full(
                (len(shifts_over_all_feature_maps), ),
                self.num_classes,
                dtype=torch.long)
            gt_shifts_reg_deltas_i = shifts_over_all_feature_maps.new_zeros(
                len(shifts_over_all_feature_maps), 4)
            if len(targets_per_image) > 0:
                # ground truth classes
                gt_classes_i[shift_idxs] = targets_per_image.gt_classes[
                    gt_idxs]
                # ground truth box regression
                gt_shifts_reg_deltas_i[
                    shift_idxs] = self.shift2box_transform.get_deltas(
                        shifts_over_all_feature_maps[shift_idxs],
                        gt_boxes[gt_idxs].tensor)

            gt_classes.append(gt_classes_i)
            gt_shifts_deltas.append(gt_shifts_reg_deltas_i)

        get_event_storage().put_scalar("num_fg_per_gt", num_fg / num_gt)

        return torch.stack(gt_classes), torch.stack(gt_shifts_deltas)
示例#57
0
    def update(self, measurements, bbs_centers, bbs_sizes, time_step,
               empty_list):
        # calculate the time difference between two neighboured frames
        dt = time_step - self.last_time_step
        self.last_time_step = time_step
        # check to see if the list of input bounding box centroids
        # is empty.

        if empty_list == 1:
            # loop over any existing tracked pedestrians and mark them
            # as disappeared
            for pedestrianID in list(self.disappeared.keys()):
                self.disappeared[pedestrianID] += 1
                self.visibility[pedestrianID] = False

                # if we have reached a maximum number of consecutive
                # frames where a given pedestrian has been marked as
                # missing, deregister it
                if self.disappeared[pedestrianID] > self.maxDisappeared:
                    self.deregister(pedestrianID)
            # update the position of not disappeared pedestrians by prediction
            try:
                for pedestrianID in list(self.estimates.key()):
                    self.kfs[pedestrianID].predict()
                    self.estimates[pedestrianID] = [
                        self.kfs[pedestrianID].x[0, 0],
                        self.kfs[pedestrianID].x[2, 0]
                    ]
                    self.direction[pedestrianID] = [
                        self.kfs[pedestrianID].x[1, 0] * dt,
                        self.kfs[pedestrianID].x[3, 0] * dt
                    ]
                    self.area_of_ellipse[
                        pedestrianID] = self.cal_size_of_ellipse(
                            self.kfs[pedestrianID].P)
            except AttributeError:
                print('no measurements!!')
            # terminate early as there are no centroids or tracking info to update
            return self.estimates, self.bbs_centers, self.bbs_sizes, self.visibility, self.direction, self.area_of_ellipse,\
                    self.sum_of_distance, self.num_of_matches
        # initialize an array of input centroids for the current frame
        newMeasurements = np.asarray(measurements)

        # if we are currently not tracking any pedestrians take the input
        # centroids and register each of them
        if len(self.estimates) == 0:
            for i in range(0, len(newMeasurements)):
                self.register(newMeasurements[i], bbs_centers[i], bbs_sizes[i],
                              dt)

        # otherwise, we are currently tracking pedestrians so we need to
        # try to match the measurements to existing pedestrians
        # centroids
        else:
            # grab the set of pedestrian IDs and corresponding centroids
            pedestrianIDs = list(self.estimates.keys())
            # predict next position firstly, then do the association part
            for pedestrianID in pedestrianIDs:
                self.kfs[pedestrianID].predict()
                self.estimates[pedestrianID] = [
                    self.kfs[pedestrianID].x[0, 0], self.kfs[pedestrianID].x[2,
                                                                             0]
                ]
            # get the predicted position of pedestrians
            pedestrianCentroids = list(self.estimates.values())

            D = dist.cdist(np.array(pedestrianCentroids), newMeasurements)
            rows, cols = linear_sum_assignment(D)
            '''
            rows = D.min(axis=1).argsort()
            cols = D.argmin(axis=1)[rows]
            '''
            usedRows = set()  # Unordered collections of unique elements
            usedCols = set()
            for (row, col) in zip(rows, cols):
                # if the distance between associated ids a and b is no larger than the threshold,
                # the id a will be deregistered and b will be registered
                if D[row, col] > 4:
                    self.register(newMeasurements[col], bbs_centers[col],
                                  bbs_sizes[col], dt)
                    pedestrianID = pedestrianIDs[row]
                    self.visibility[pedestrianID] = False
                    self.disappeared[pedestrianID] += 1
                    # deregister the pedestrian if he has dsiappeared long enough
                    if self.disappeared[pedestrianID] > self.maxDisappeared:
                        self.deregister(pedestrianID)
                else:
                    pedestrianID = pedestrianIDs[row]
                    self.kfs[pedestrianID].update(newMeasurements[col])
                    self.estimates[pedestrianID] = [
                        self.kfs[pedestrianID].x[0, 0],
                        self.kfs[pedestrianID].x[2, 0]
                    ]
                    self.area_of_ellipse[
                        pedestrianID] = self.cal_size_of_ellipse(
                            self.kfs[pedestrianID].P)
                    self.direction[pedestrianID] = [
                        self.kfs[pedestrianID].x[1, 0] * dt,
                        self.kfs[pedestrianID].x[3, 0] * dt
                    ]
                    self.bbs_centers[pedestrianID] = bbs_centers[col]
                    self.bbs_sizes[pedestrianID] = bbs_sizes[col]
                    self.disappeared[pedestrianID] = 0
                    self.visibility[pedestrianID] = True
                    self.num_of_matches += 1
                    self.sum_of_distance += D[row, col]

                usedRows.add(row)
                usedCols.add(col)

            unusedRows = set(range(0, D.shape[0])).difference(usedRows)
            unusedCols = set(range(0, D.shape[1])).difference(usedCols)

            #if D.shape[0] >= D.shape[1]:164
            # loop over the unused row indexes
            for row in unusedRows:
                # grab the pedestrian ID for the corresponding row
                # index and increment the disappeared counter164
                pedestrianID = pedestrianIDs[row]
                self.visibility[pedestrianID] = False
                self.disappeared[pedestrianID] += 1
                # check to see if the number of consecutive
                # frames the pedestrian has been marked "disappeared"
                # for warrants deregistering the pedestrian
                if self.disappeared[pedestrianID] > self.maxDisappeared:
                    self.deregister(pedestrianID)

            for col in unusedCols:
                self.register(newMeasurements[col], bbs_centers[col],
                              bbs_sizes[col], dt)

            # otherwise, if the number of input centroids is greater
            # than the number of existing pedestrian centroids we need to
            # register each new input centroid as a trackable pedestrian
            '''
            else:
                for col in unusedCols:
                    self.register(newMeasurements[col], bbs_centers[col], bbs_sizes[col], dt)
            '''
        # return the set of trackable pedestrians
        # return self.pedestrians
        return self.estimates, self.bbs_centers, self.bbs_sizes, self.visibility, self.direction, self.area_of_ellipse,\
                self.num_of_matches, self.sum_of_distance
示例#58
0
for i in range(len(pattern_coords)):
    #print("coordinate:",pattern_coords[i], "label:", labels[i])
    plt.plot(pattern_coords[i][0], pattern_coords[i][1], "g.", markersize=10)
    #cv2.circle(img,(pattern_coords[i][0], pattern_coords[i][1]), 10, (255,0,0), -1)

# for j in range(len(centroids)):
#     cv2.circle(frame,(int(centroids[j][0]), int(centroids[j][1])), 10, (255,0,0), 3)

#cv2.imshow('clusttered plot',frame)
#plt.scatter(centroids[:, 1],centroids[:, 0], marker = "x", s=150, linewidths = 5, zorder = 10)
#plt.scatter(centroids[:, 1],centroids[:, 0], marker = "x", s=150, linewidths = 5, zorder = 10)

robot_coords_initial = c_centre
C = cdist(robot_coords_initial, centroids)
_, assignment = linear_sum_assignment(C)

#print 'assignment : ',assignment

plt.axis([0, w, h, 0])

for p in range(max_robots):

    plt.plot(robot_coords_initial[p][0],
             robot_coords_initial[p][1],
             'bo',
             markersize=10)
    plt.plot(centroids[p][0], centroids[p][1], 'rs', markersize=7)
    plt.plot((robot_coords_initial[p][0], centroids[assignment[p]][0]),
             (robot_coords_initial[p][1], centroids[assignment[p]][1]), 'k')
示例#59
0
    def eval_sequence(self, data):
        """Returns J&F metrics for one sequence"""

        # Only loaded when run to reduce minimum requirements
        from pycocotools import mask as mask_utils

        num_timesteps = data['num_timesteps']
        num_tracker_ids = data['num_tracker_ids']
        num_gt_ids = data['num_gt_ids']
        gt_dets = data['gt_dets']
        tracker_dets = data['tracker_dets']
        gt_ids = data['gt_ids']
        tracker_ids = data['tracker_ids']

        # get shape of frames
        frame_shape = None
        if num_gt_ids > 0:
            for t in range(num_timesteps):
                if len(gt_ids[t]) > 0:
                    frame_shape = gt_dets[t][0]['size']
                    break
        elif num_tracker_ids > 0:
            for t in range(num_timesteps):
                if len(tracker_ids[t]) > 0:
                    frame_shape = tracker_dets[t][0]['size']
                    break

        if frame_shape:
            # append all zero masks for timesteps in which tracks do not have a detection
            zero_padding = np.zeros((frame_shape), order= 'F').astype(np.uint8)
            padding_mask = mask_utils.encode(zero_padding)
            for t in range(num_timesteps):
                gt_id_det_mapping = {gt_ids[t][i]: gt_dets[t][i] for i in range(len(gt_ids[t]))}
                gt_dets[t] = [gt_id_det_mapping[index] if index in gt_ids[t] else padding_mask for index
                              in range(num_gt_ids)]
                tracker_id_det_mapping = {tracker_ids[t][i]: tracker_dets[t][i] for i in range(len(tracker_ids[t]))}
                tracker_dets[t] = [tracker_id_det_mapping[index] if index in tracker_ids[t] else padding_mask for index
                                   in range(num_tracker_ids)]
            # also perform zero padding if number of tracker IDs < number of ground truth IDs
            if num_tracker_ids < num_gt_ids:
                diff = num_gt_ids - num_tracker_ids
                for t in range(num_timesteps):
                    tracker_dets[t] = tracker_dets[t] + [padding_mask for _ in range(diff)]
                num_tracker_ids += diff

        j = self._compute_j(gt_dets, tracker_dets, num_gt_ids, num_tracker_ids, num_timesteps)

        # boundary threshold for F computation
        bound_th = 0.008

        # perform matching
        if self.optim_type == 'J&F':
            f = np.zeros_like(j)
            for k in range(num_tracker_ids):
                for i in range(num_gt_ids):
                    f[k, i, :] = self._compute_f(gt_dets, tracker_dets, k, i, bound_th)
            optim_metrics = (np.mean(j, axis=2) + np.mean(f, axis=2)) / 2
            row_ind, col_ind = linear_sum_assignment(- optim_metrics)
            j_m = j[row_ind, col_ind, :]
            f_m = f[row_ind, col_ind, :]
        elif self.optim_type == 'J':
            optim_metrics = np.mean(j, axis=2)
            row_ind, col_ind = linear_sum_assignment(- optim_metrics)
            j_m = j[row_ind, col_ind, :]
            f_m = np.zeros_like(j_m)
            for i, (tr_ind, gt_ind) in enumerate(zip(row_ind, col_ind)):
                f_m[i] = self._compute_f(gt_dets, tracker_dets, tr_ind, gt_ind, bound_th)
        else:
            raise TrackEvalException('Unsupported optimization type %s for J&F metric.' % self.optim_type)

        # append zeros for false negatives
        if j_m.shape[0] < data['num_gt_ids']:
            diff = data['num_gt_ids'] - j_m.shape[0]
            j_m = np.concatenate((j_m, np.zeros((diff, j_m.shape[1]))), axis=0)
            f_m = np.concatenate((f_m, np.zeros((diff, f_m.shape[1]))), axis=0)

        # compute the metrics for each ground truth track
        res = {
            'J-Mean': [np.nanmean(j_m[i, :]) for i in range(j_m.shape[0])],
            'J-Recall': [np.nanmean(j_m[i, :] > 0.5 + np.finfo('float').eps) for i in range(j_m.shape[0])],
            'F-Mean': [np.nanmean(f_m[i, :]) for i in range(f_m.shape[0])],
            'F-Recall': [np.nanmean(f_m[i, :] > 0.5 + np.finfo('float').eps) for i in range(f_m.shape[0])],
            'J-Decay': [],
            'F-Decay': []
        }
        n_bins = 4
        ids = np.round(np.linspace(1, data['num_timesteps'], n_bins + 1) + 1e-10) - 1
        ids = ids.astype(np.uint8)

        for k in range(j_m.shape[0]):
            d_bins_j = [j_m[k][ids[i]:ids[i + 1] + 1] for i in range(0, n_bins)]
            res['J-Decay'].append(np.nanmean(d_bins_j[0]) - np.nanmean(d_bins_j[3]))
        for k in range(f_m.shape[0]):
            d_bins_f = [f_m[k][ids[i]:ids[i + 1] + 1] for i in range(0, n_bins)]
            res['F-Decay'].append(np.nanmean(d_bins_f[0]) - np.nanmean(d_bins_f[3]))

        # count number of tracks for weighting of the result
        res['num_gt_tracks'] = len(res['J-Mean'])
        for field in ['J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall', 'F-Decay']:
            res[field] = np.mean(res[field])
        res['J&F'] = (res['J-Mean'] + res['F-Mean']) / 2
        return res
示例#60
0
    def _merge_boxes(self, boxes, start_frame, overlap):
        # 1. Split boxes on two parts: new and which can be intersected
        # with existing boxes.
        new_boxes = [
            box for box in boxes if box.frame >= start_frame + overlap
        ]
        int_boxes = [box for box in boxes if box.frame < start_frame + overlap]
        assert len(new_boxes) + len(int_boxes) == len(boxes)

        # 2. Convert to more convenient data structure (boxes by frame)
        int_boxes_by_frame = {}
        for box in int_boxes:
            if box.frame in int_boxes_by_frame:
                int_boxes_by_frame[box.frame].append(box)
            else:
                int_boxes_by_frame[box.frame] = [box]

        old_boxes_by_frame = {}
        for box in self.boxes:
            if box.frame >= start_frame:
                if box.frame in old_boxes_by_frame:
                    old_boxes_by_frame[box.frame].append(box)
                else:
                    old_boxes_by_frame[box.frame] = [box]

        # 3. Add new boxes as is. It should be done only after old_boxes_by_frame
        # variable is initialized.
        self.boxes.extend(new_boxes)

        # Nothing to merge here. Just add all int_boxes if any.
        if not old_boxes_by_frame or not int_boxes_by_frame:
            self.boxes.extend(int_boxes)
            return

        # 4. Build cost matrix for each frame and find correspondence using
        # Hungarian algorithm. In this case min_cost_thresh is stronger
        # because we compare only on one frame.
        min_cost_thresh = 0.25
        for frame in int_boxes_by_frame:
            if frame in old_boxes_by_frame:
                int_boxes = int_boxes_by_frame[frame]
                old_boxes = old_boxes_by_frame[frame]
                cost_matrix = np.empty(shape=(len(int_boxes), len(old_boxes)),
                                       dtype=float)
                # 5.1 Construct cost matrix for the frame.
                for i, box0 in enumerate(int_boxes):
                    for j, box1 in enumerate(old_boxes):
                        if box0.label.id == box1.label.id:
                            cost_matrix[i][j] = 1 - _calc_box_IoU(box0, box1)
                        else:
                            cost_matrix[i][j] = 1

                # 6. Find optimal solution using Hungarian algorithm.
                row_ind, col_ind = linear_sum_assignment(cost_matrix)
                int_boxes_indexes = list(range(0, len(int_boxes)))
                for i, j in zip(row_ind, col_ind):
                    # Reject the solution if the cost is too high. Remember
                    # inside int_boxes_indexes boxes which were handled.
                    if cost_matrix[i][j] <= min_cost_thresh:
                        old_boxes[j].merge(int_boxes[i])
                        int_boxes_indexes[i] = -1

                # 7. Add all boxes which were not processed.
                for i in int_boxes_indexes:
                    if i != -1:
                        self.boxes.append(int_boxes[i])
            else:
                # We don't have old boxes on the frame. Let's add all new ones.
                self.boxes.extend(int_boxes_by_frame[frame])