Exemplo n.º 1
0
def test_small():
    costs = np.array([[6, 9, 1], [10, 3, 2], [8, 7, 4.]], dtype=np.float32)
    r = lap.solve_dense(costs)
    expected = np.array([[0, 1, 2], [2, 1, 0]])
    np.testing.assert_allclose(r, expected)

    costs = np.array([[6, 9, 1], [10, 3, 2], [8, 7, 4.]], dtype=float)
    r = lap.solve_dense(costs)
    expected = np.array([[0, 1, 2], [2, 1, 0]])
    np.testing.assert_allclose(r, expected)

    costs = np.array([[6, 9, 1], [10, 3, 2], [8, 7, 4.]], dtype=int)
    r = lap.solve_dense(costs)
    expected = np.array([[0, 1, 2], [2, 1, 0]])
    np.testing.assert_allclose(r, expected)
    def _compute_distance(self, phi_a: pd.DataFrame, phi_b: pd.DataFrame) -> float:
        assert phi_a.shape[1] == phi_b.shape[1]

        num_topics = phi_a.shape[1]
        topic_distances = np.zeros(shape=(num_topics, num_topics))
        topic_indices = list(range(num_topics))

        if self._max_num_top_words is None:
            def col_to_topic(phi: pd.DataFrame, col: int) -> pd.Series:
                return phi.iloc[:, col]
        else:
            def col_to_topic(phi: pd.DataFrame, col: int) -> pd.Series:
                return phi.iloc[:, col].sort_values(ascending=False)[:self._max_num_top_words]

        topics_a = [col_to_topic(phi_a, phi_col) for phi_col in topic_indices]
        topics_b = [col_to_topic(phi_b, phi_col) for phi_col in topic_indices]

        for topic_index_a, topic_a in enumerate(topics_a):
            for topic_index_b, topic_b in enumerate(topics_b):
                topic_distance = self._compute_topic_distance(
                    topic_a, topic_b
                )
                topic_distances[topic_index_a, topic_index_b] = topic_distance

        row_ids, column_ids = solve_dense(topic_distances)

        return float(
            np.sum(
                topic_distances[row_ids, column_ids]
            ) / max(1.0, num_topics)
        )
Exemplo n.º 3
0
def compare_heatmap(y11, y12, y21, y22, mata, matb):

    from lapsolver import solve_dense
    from sklearn.metrics.pairwise import euclidean_distances as ed
    from natto.process.hungutil import make_canvas_and_spacemaps
    from natto.out.draw import quickdoubleheatmap
    distances = ed(mata, matb)
    hungmatch = solve_dense(distances)

    def prephtmap(y1, y2):
        # returns: canvas,y1map2,y2map2,row,col
        a, b, c = make_canvas_and_spacemaps(y1, y2, hungmatch, normalize=False)
        d, e = solve_dense(c)
        return c, a, b, d, e

    comp1 = prephtmap(y11, y12)
    comp2 = prephtmap(y21, y22)

    quickdoubleheatmap(comp1, comp2)

    def calcmissmatches(stuff):
        canv = stuff[0]
        r, c = stuff[-2:]
        for rr, cc in zip(r, c):
            canv[rr, cc] = 0
        return canv.sum()

    print("clust1 missplaced:", calcmissmatches(comp1))
    print("clust2 missplaced:", calcmissmatches(comp2))
    print("set1 randindex:", rand(y11, y21))
    print("set2 randindex:", rand(y12, y22))
Exemplo n.º 4
0
def matching_upd_j(weights_j, global_weights, sigma_inv_j, global_sigmas,
                   prior_mean_norm, prior_inv_sigma, popularity_counts, gamma,
                   J):

    L = global_weights.shape[0]

    full_cost = compute_cost(global_weights, weights_j, global_sigmas,
                             sigma_inv_j, prior_mean_norm, prior_inv_sigma,
                             popularity_counts, gamma, J)

    #row_ind, col_ind = linear_sum_assignment(-full_cost)
    # please note that this can not run on non-Linux systems
    row_ind, col_ind = solve_dense(-full_cost)

    assignment_j = []

    new_L = L

    for l, i in zip(row_ind, col_ind):
        if i < L:
            popularity_counts[i] += 1
            assignment_j.append(i)
            global_weights[i] += weights_j[l]
            global_sigmas[i] += sigma_inv_j
        else:  # new neuron
            popularity_counts += [1]
            assignment_j.append(new_L)
            new_L += 1
            global_weights = np.vstack(
                (global_weights, prior_mean_norm + weights_j[l]))
            global_sigmas = np.vstack(
                (global_sigmas, prior_inv_sigma + sigma_inv_j))

    return global_weights, global_sigmas, popularity_counts, assignment_j
Exemplo n.º 5
0
def matching_upd_j(atoms_j, global_atoms, global_atoms_squared, sigma, sigma0, mu0, popularity_counts, gamma, J):
    
    L = global_atoms.shape[0]
    
    compute_cost_start = time.time()
    full_cost = compute_cost(global_atoms.astype(np.float32), atoms_j.astype(np.float32), global_atoms_squared.astype(np.float32), sigma, sigma0, mu0.astype(np.float32), popularity_counts, gamma, J)
    compute_cost_dur = time.time() - compute_cost_start
    logger.info("Compute cost duration: {}".format(compute_cost_dur))

    #row_ind, col_ind = linear_sum_assignment(-full_cost)
    row_ind, col_ind = solve_dense(-full_cost)

    assignment_j = []
    
    new_L = L
    
    for l, i in zip(row_ind, col_ind):
        if i < L:
            popularity_counts[i] += 1
            assignment_j.append(i)
            global_atoms[i] += atoms_j[l]
            global_atoms_squared[i] += atoms_j[l]**2
        else:
            popularity_counts += [1]
            assignment_j.append(new_L)
            new_L += 1
            global_atoms = np.vstack((global_atoms,atoms_j[l]))
            global_atoms_squared = np.vstack((global_atoms_squared,atoms_j[l]**2))

    return global_atoms, global_atoms_squared, popularity_counts, assignment_j
Exemplo n.º 6
0
    def _assign_gt(self):
        """
        Assigns a GT identity to every detection in self.det_df, based on the ground truth boxes in self.gt_df.
        The assignment is done frame by frame via bipartite matching.
        """
        if self.det_df.seq_info_dict['has_gt'] and not self.det_df.seq_info_dict['is_gt']:
            print(f"Assigning ground truth identities to detections to sequence {self.seq_name}")
            for frame in self.det_df['frame'].unique():
                frame_detects = self.det_df[self.det_df.frame == frame]
                frame_gt = self.gt_df[self.gt_df.frame == frame]

                # Compute IoU for each pair of detected / GT bounding box
                iou_matrix = iou(frame_detects[['bb_top', 'bb_left', 'bb_bot', 'bb_right']].values,
                                 frame_gt[['bb_top', 'bb_left', 'bb_bot', 'bb_right']].values)

                iou_matrix[iou_matrix < self.dataset_params['gt_assign_min_iou']] = np.nan
                dist_matrix = 1 - iou_matrix
                assigned_detect_ixs, assigned_detect_ixs_ped_ids = solve_dense(dist_matrix)
                unassigned_detect_ixs = np.array(list(set(range(frame_detects.shape[0])) - set(assigned_detect_ixs)))

                assigned_detect_ixs_index = frame_detects.iloc[assigned_detect_ixs].index
                assigned_detect_ixs_ped_ids = frame_gt.iloc[assigned_detect_ixs_ped_ids]['id'].values
                unassigned_detect_ixs_index = frame_detects.iloc[unassigned_detect_ixs].index

                self.det_df.loc[assigned_detect_ixs_index, 'id'] = assigned_detect_ixs_ped_ids
                self.det_df.loc[unassigned_detect_ixs_index, 'id'] = -1  # False Positives
def match_given_alpha(diff):
    n2, n1 = diff.shape
    P = np.zeros((n2, n1))
    msoln = solve_dense(diff.T)
    P[msoln[1], msoln[0]] = 1.0
    P = P[:, :n1]
    return P.copy()
Exemplo n.º 8
0
def test_missing_edge_negative():
    costs = np.array([[-1000, -1], [-1, np.nan]])
    r = lap.solve_dense(costs)
    # The optimal solution is (0, 1), (1, 0) with cost -1 + -1.
    # If the implementation does not use a large enough constant, it may choose
    # (0, 0), (1, 1) with cost -1000 + L.
    expected = np.array([[0, 1], [1, 0]])
    np.testing.assert_allclose(r, expected)
Exemplo n.º 9
0
def test_files_for_dense():
    files = glob.glob(os.path.join(DATA_DIR, 'dense', '*.npz'))
    print(DATA_DIR)
    assert len(files) > 0
    for f in files:
        data = np.load(f)
        rids, cids = lap.solve_dense(data['costs'])

        assert data['costs'][rids, cids].sum() == data['total_cost']
Exemplo n.º 10
0
def test_nonsquare():
    costs = np.array([[6, 9], [10, 3], [8, 7]], dtype=float)

    r = lap.solve_dense(costs)
    expected = np.array([[0, 1], [0, 1]])
    np.testing.assert_allclose(r, expected)

    r = lap.solve_dense(costs.T)  # view test
    expected = np.array([[0, 1], [0, 1]])
    np.testing.assert_allclose(r, expected)

    costs = np.array([[-17.13614455, -536.59009819],
                      [292.64662837, 187.49841358],
                      [664.70501771, 948.09658792]])

    expected = np.array([[0, 1], [1, 0]])
    r = lap.solve_dense(costs)
    np.testing.assert_allclose(r, expected)
Exemplo n.º 11
0
def test_plain_array_fractional():
    # Add fractional costs that change the solution.
    # Before: (1 + 3 + 8) = 12 < 13 = (6 + 5 + 2)
    # After: (1.4 + 3.4 + 8.4) = 13.2 < 13
    # This confirms that pylib11 did not cast float to int.
    costs = [[6, 9, 1.4], [10, 3.4, 2], [8.4, 5, 4]]
    r = lap.solve_dense(costs)
    expected = np.array([[0, 1, 2], [0, 2, 1]])
    np.testing.assert_allclose(r, expected)
Exemplo n.º 12
0
def sum_assign_thres(dists, thres=None):
    # prev_ind, new_ind = optimize.linear_sum_assignment(dists)
    from lapsolver import solve_dense
    prev_ind, new_ind = solve_dense(dists)
    if thres is not None:
        picked_dists = dists[prev_ind, new_ind]
        good = picked_dists < thres
        prev_ind = prev_ind[good]
        new_ind = new_ind[good]
    return prev_ind, new_ind
Exemplo n.º 13
0
def lsa_solve_lapsolver(costs):
    """Solves the LSA problem using the lapsolver library."""
    from lapsolver import solve_dense

    # Note that lapsolver will add expensive finite edges internally.
    # However, older versions did not add a large enough edge.
    finite_costs = add_expensive_edges(costs)
    rids, cids = solve_dense(finite_costs)
    rids, cids = _exclude_missing_edges(costs, rids, cids)
    return rids, cids
Exemplo n.º 14
0
def lapsolver_get_comparison_score(inv_matrix):
    rows, cols = solve_dense(inv_matrix)
    total = 0
    for row, col in zip(rows, cols):
        # print(row, col) # Row/column pairings
        val = inv_matrix[row][col]
        total += val
    comparison_score = total        # use this to print out just the comparison score and then populate the comparison score matrix
    # comparison_score = f'sum of scores: {total}'      # this prints out "Sum of Scores: total"
    return comparison_score
    def define_matching(self, input, out):
        # Input points need to at least 1.2 times more than output points
        L = np.random.choice(np.arange(input.shape[0]),
                             int(1.2 * out.shape[0]),
                             replace=False)
        input = input[L]

        dist = scipy.spatial.distance.cdist(out, input)
        rids, cids = solve_dense(dist)
        matched = input[cids]
        return matched
Exemplo n.º 16
0
def SIOU_matched_segments(target, pred_labels, primitives_pred, primitives,
                          weights):
    """
    Computes iou for segmentation performance and primitive type
    prediction performance.
    First it computes the matching using hungarian matching
    between predicted and ground truth labels.
    Then it computes the iou score, starting from matching pairs
    coming out from hungarian matching solver. Note that
    it is assumed that the iou is only computed over matched pairs.
    That is to say, if any column in the matched pair has zero
    number of points, that pair is not considered.
    
    It also computes the iou for primitive type prediction. In this case
    iou is computed only over the matched segments.
    """
    # 2 is open spline and 9 is close spline
    primitives[primitives == 0] = 9
    primitives[primitives == 6] = 9
    primitives[primitives == 7] = 9
    primitives[primitives == 8] = 2

    primitives_pred[primitives_pred == 0] = 9
    primitives_pred[primitives_pred == 6] = 9
    primitives_pred[primitives_pred == 7] = 9
    primitives_pred[primitives_pred == 8] = 2

    labels_one_hot = to_one_hot(target)
    cluster_ids_one_hot = to_one_hot(pred_labels)

    cost = relaxed_iou_fast(
        torch.unsqueeze(cluster_ids_one_hot, 0).float(),
        torch.unsqueeze(labels_one_hot, 0).float())
    cost_ = 1.0 - cost.data.cpu().numpy()
    matching = []

    for b in range(1):
        rids, cids = solve_dense(cost_[b])
        matching.append([rids, cids])

    primitives_pred_hot = to_one_hot(primitives_pred, 10,
                                     weights.device.index).float()

    # this gives you what primitive type the predicted segment has.
    prim_pred = primitive_type_segment_torch(primitives_pred_hot,
                                             weights).data.cpu().numpy()
    target = np.expand_dims(target, 0)
    pred_labels = np.expand_dims(pred_labels, 0)
    prim_pred = np.expand_dims(prim_pred, 0)
    primitives = np.expand_dims(primitives, 0)

    segment_iou, primitive_iou, iou_b_prims = mean_IOU_primitive_segment(
        matching, pred_labels, target, prim_pred, primitives)
    return segment_iou, primitive_iou, matching, iou_b_prims
Exemplo n.º 17
0
def symmetrized_norm(U, V):
    """
    Computes the Frobenius error between U and V while minimizing over all
    possible signed permutations of the columns of V by solving a linear assignment problem.

    Solves a linear assigment problem to find the best matching between columns of U and V.
    The cost for the problem is the squared 2-norm of the difference of the two respective columns.
    """
    C = build_cost_matrix(U, V)
    row_idx, col_idx = solve_dense(C)
    best_frobenius_norm = np.sqrt(C[row_idx, col_idx].sum())
    return best_frobenius_norm
Exemplo n.º 18
0
def hungarian(costs: np.ndarray, maximise=False, cutoff=None, row_labels=None, col_labels=None):
    """
    Solves the Assignment problem.

    This method is a wrapper lapsolver's solve_dense that:
     1. Can Threshold certain costs,
     2. Can Handle np.NaN (as np.Inf)
     3. Can deal with rows/columns of NaN
     4. Can keep track of labels, rather than just indices

    :param costs:      Cost Matrix to optimise
    :param maximise:   (default: False) - Calculates a maximum weight matching if true.
    :param cutoff:     If set, use this as a threshold. The cutoff range depends on whether
                       maximising (in which case lower-values are invalidated) or minimising
                       (higher values inadmissable).
    :param row_labels: Row-Labels (optional) - If None, using 0-based indices
    :param col_labels: Column-Labels (optional) - If None, using 0-based indices
    :return:
    """
    # Prepare
    _cost = costs.astype(float)

    # Handle Edge Cases
    _cost[~np.isfinite(_cost)] = np.NaN
    if cutoff is not None:
        if maximise:
            _cost[_cost < cutoff] = np.NaN
        else:
            _cost[_cost > cutoff] = np.NaN

    # Extract only valid rows/columns (i.e. where the is at least one element which is valid)
    valid = np.isfinite(_cost)
    if ~valid.any():  # Guard against having no valid assignments
        return [], []
    val_r, val_c = valid.any(axis=1), valid.any(axis=0)
    _cost = _cost[val_r, :]
    _cost = _cost[:, val_c]

    # Perform Hungarian (but handle Maximisation)
    if maximise:
        finite = np.isfinite(_cost)
        _cost[finite] = np.max(_cost[finite]) - _cost[finite]
    r, c = solve_dense(_cost)

    # Map to original Indices
    r_ids, c_ids = np.where(val_r)[0][r], np.where(val_c)[0][c]
    if row_labels is not None:
        r_ids = np.asarray(row_labels)[r_ids]
    if col_labels is not None:
        c_ids = np.asarray(col_labels)[c_ids]

    # Return
    return r_ids, c_ids
Exemplo n.º 19
0
    def _merge_subseq_dfs(self, subseq_dfs):
        seq_df = subseq_dfs[0]
        for subseq_df in subseq_dfs[1:]:
            # Make sure that ped_ids in subseq_df are new and all greater than the ones in seq_df:
            subseq_df['ped_id'] += seq_df['ped_id'].max() + 1

            """
            # Match ids from both sides
            # TODO: We could do this based on bipartite matching based on common detection_ids per ped_id in the
            # TODO: overlap section
            left_ped_ids = seq_df.groupby('ped_id')['detection_id'].max().reset_index().rename(
                columns={'ped_id': 'left_ped_id'})
            right_ped_ids = subseq_df.groupby('ped_id')['detection_id'].min().reset_index().rename(
                columns={'ped_id': 'right_ped_id'})
            matched_ids = pd.merge(left_ped_ids, right_ped_ids, on='detection_id', how='inner')[
                ['left_ped_id', 'right_ped_id']]
            

            """
            intersect_frames = np.intersect1d(seq_df.frame, subseq_df.frame)
            left_df = seq_df[['detection_id', 'ped_id']][seq_df.frame.isin(intersect_frames)]
            left_ids_pos = left_df[['ped_id']].drop_duplicates(); left_ids_pos['ped_id_pos'] = np.arange(left_ids_pos.shape[0])
            left_df = left_df.merge(left_ids_pos, on='ped_id').set_index('detection_id')

            #left_df['left_ped_id_pos'] = np.arange(left_df.shape[0])
            right_df = subseq_df[['detection_id', 'ped_id']][subseq_df.frame.isin(intersect_frames)]
            right_ids_pos = right_df[['ped_id']].drop_duplicates(); right_ids_pos['ped_id_pos'] = np.arange(right_ids_pos.shape[0])
            right_df = right_df.merge(right_ids_pos, on='ped_id').set_index('detection_id')


            #right_df['right_ped_id_pos'] = np.arange(right_df.shape[0])
            # Build the linear assignment matrix
            common_boxes = left_df[['ped_id_pos']].join(right_df['ped_id_pos'], lsuffix='_left', rsuffix='_right').dropna(thresh = 2).reset_index().groupby(['ped_id_pos_left', 'ped_id_pos_right'])['detection_id'].count()
            common_boxes = common_boxes.reset_index().astype(int)

            cost_mat = np.full((common_boxes['ped_id_pos_left'].max() +1 , common_boxes['ped_id_pos_right'].max() + 1), fill_value = np.nan)
            cost_mat[common_boxes['ped_id_pos_left'].values, common_boxes['ped_id_pos_right'].values] =  - common_boxes['detection_id'].values
            matched_left_ids_pos, matched_right_ids_pos = solve_dense(cost_mat)
            matched_ids = pd.DataFrame(data = np.stack((left_ids_pos['ped_id'].values[matched_left_ids_pos],
                                                       right_ids_pos['ped_id'].values[matched_right_ids_pos])).T,
                                       columns = ['left_ped_id', 'right_ped_id'])


            # Assign the ids matched to subseq_df
            subseq_df = pd.merge(subseq_df, matched_ids, how='outer', left_on='ped_id', right_on='right_ped_id')
            subseq_df['left_ped_id'].fillna(np.inf, inplace=True)
            subseq_df['ped_id'] = np.minimum(subseq_df['left_ped_id'], subseq_df['ped_id'])

            # Update seq_df
            seq_df = pd.concat([seq_df, subseq_df[subseq_df['frame'] > seq_df['frame'].max()]])

        return seq_df
Exemplo n.º 20
0
def match_given_alpha(diff):
    n2, n1 = diff.shape
    if n1 == n2:
        return np.eye(n1)
    P = np.zeros((n2, n1))
    am = np.argmin(diff, axis=0)
    if np.unique(am).shape[0] == n1 :
        msoln = (np.arange(n1), am)
    else:    
        msoln = solve_dense(diff.T)
    P[msoln[1], msoln[0]] = 1.0
    P = P[:,:n1]
    return P.copy() 
Exemplo n.º 21
0
def test_missing_edge_positive():
    costs = np.array([
        [np.nan, 1000, np.nan],
        [np.nan, 1, 1000],
        [1000, np.nan, 1],
    ])
    costs_copy = costs.copy()
    r = lap.solve_dense(costs)
    # The optimal solution is (0, 1), (1, 2), (2, 0) with cost 1000 + 1000 + 1000.
    # If the implementation does not use a large enough constant, it may choose
    # (0, 0), (1, 1), (2, 2) with cost (L + 1 + 1) instead.
    expected = np.array([[0, 1, 2], [1, 2, 0]])
    np.testing.assert_allclose(r, expected)
Exemplo n.º 22
0
def find_search_direction(grad, k):
    """Find the search direction by finding the direction in the doubly
    stochastic matrices where grad is minimised."""
    # Minimise the negative of the grad.
    #row_ind, col_ind = sp.optimize.linear_sum_assignment(grad)
    row_ind, col_ind = solve_dense(grad)
    #m = Munkres()
    #indices = np.array(m.compute(grad))
    #row_ind = indices[:,0]
    #col_ind = indices[:,1]
    W = np.zeros([k, k])
    W[row_ind, col_ind] = 1

    return W
Exemplo n.º 23
0
def match(target, pred_labels):
    labels_one_hot = to_one_hot(target)
    cluster_ids_one_hot = to_one_hot(pred_labels)

    # cost = relaxed_iou(torch.unsqueeze(cluster_ids_one_hot, 0).float(), torch.unsqueeze(labels_one_hot, 0).float())
    # cost_ = 1.0 - torch.as_tensor(cost)
    cost = relaxed_iou_fast(torch.unsqueeze(cluster_ids_one_hot, 0).float(), torch.unsqueeze(labels_one_hot, 0).float())

    # cost_ = 1.0 - torch.as_tensor(cost)
    cost_ = 1.0 - cost.data.cpu().numpy()
    rids, cids = solve_dense(cost_[0])

    unique_target = np.unique(target)
    unique_pred = np.unique(pred_labels)
    return rids, cids, unique_target, unique_pred
Exemplo n.º 24
0
 def update(self, images, bboxs):
     self.frame_count += 1
     if self.device:
         images = images.to(self.device)
         bboxs = bboxs.to(self.device)
     if len(self.trackers) == 0:
         for (img, bbox) in zip(images, bboxs):
             trk = Tracker(img, bbox)
             self.trackers.append(trk)
         assert len(self.trackers) == 5
     else:
         inp = torch.zeros((5, 2, 28, 28), device=self.device)
         inp[:images.size(0), 0, :, :] = images
         temp = []
         for trk in self.trackers:
             state = trk.predict()
             temp.append(state.view(1, 28, 28))
         temp = torch.cat(temp, dim=0)
         assert temp.size(0) == 5
         inp[:temp.size(0), 1, :, :] = temp
         with torch.no_grad():
             matrix = self.internet(inp.unsqueeze(0), self.rel_rec,
                                    self.rel_send)
             matrix = self.softmax(matrix)
         matrix = matrix[0].to('cpu').detach().numpy()
         # print(np.round(matrix, 3))
         # assert 2 == 1
         rids, cids = solve_dense(-matrix)
         matched_indices = np.array([rids, cids]).T
         matched_indices = matched_indices[:images.size(0)]
         for (d, t) in matched_indices:
             self.trackers[t].update(images[d], bboxs[d])
     ret = []
     i = len(self.trackers)
     for trk in reversed(self.trackers):
         d = trk.get_state().to('cpu').detach().numpy()[:4]
         if ((trk.time_since_update < 1)
                 and (trk.hit_streak >= self.min_hits
                      or self.frame_count <= self.min_hits)):
             ret.append(np.concatenate((d, [trk.id + 1])).reshape(
                 1, -1))  # +1 as MOT benchmark requires positive
         i -= 1
         #remove dead tracklet
         if (trk.time_since_update > self.max_age):
             self.trackers.pop(i)
     if (len(ret) > 0):
         return np.concatenate(ret)
     return np.empty((0, 5))
Exemplo n.º 25
0
def findMatch(pdAugOne, pdAugTwo, numPtsOne, numPtsTwo, homDim, solver="scipy"):
#pdAugOne and pdAugTwo are persistence diagrams, which are augmented by projections on the diagonal
#of points from the other PD, for the given homology dimension. They are usually created by the function augmentPd.  

    #numPtsOne is the number of points in pdOne for given homology dimension before augmentation.
    #numPtsTwo is the number of points in pdTwo for given homology dimension before augmentation.
    totPts = numPtsOne + numPtsTwo

    #the main aspect is about defining the cost matrix

    costMat = np.zeros((totPts, totPts),dtype=np.float64)

    pdAugOnePick = [x for x in pdAugOne if (x[0] == homDim)]
    pdAugTwoPick = [x for x in pdAugTwo if (x[0] == homDim)] 

    for iPt in range(totPts):
        for jPt in range(totPts):
            #pairwise costs between original (not augmented) points 
            if (iPt < numPtsOne) and (jPt < numPtsTwo):
                costMat[iPt,jPt] = (pdAugOnePick[iPt][1][0] - pdAugTwoPick[jPt][1][0])**2 + (pdAugOnePick[iPt][1][1] - pdAugTwoPick[jPt][1][1])**2

            #for a given original point in a PD, pairwise costs w.r.t. to all the augmented points in the other PD
            #is the same and it is equal to the cost w.r.t. to that points projection on the diagonal
            elif (iPt < numPtsOne) and (jPt >= numPtsTwo):
                costMat[iPt,jPt] = (pdAugOnePick[iPt][1][0] - pdAugTwoPick[numPtsTwo + iPt][1][0])**2 + (pdAugOnePick[iPt][1][1] - pdAugTwoPick[numPtsTwo + iPt][1][1])**2 

            elif (iPt >= numPtsOne) and (jPt < numPtsTwo):
                costMat[iPt,jPt] = (pdAugOnePick[numPtsOne + jPt][1][0] - pdAugTwoPick[jPt][1][0])**2 + (pdAugOnePick[numPtsOne + jPt][1][1] - pdAugTwoPick[jPt][1][1])**2 

            #the last case is (iPt >= numPtsOne) and (jPt >= numPtsTwo), for which cost between projected points on the diagonal is zero

    if (solver == "scipy"): 
        srcInd, tgtInd = spopt.linear_sum_assignment(costMat)
    elif (solver == "solve_dense"):
        srcInd, tgtInd = solve_dense(costMat)  

    #if an original point is mapped to one of the projections on the diagonal,
    #then that point is mapped to its projection on the diagonal. 
    for iPt in range(numPtsOne):
        if (tgtInd[iPt] >= numPtsTwo):
            tgtInd[iPt] = numPtsTwo+iPt  

    for iPt in range(numPtsTwo):
        if (srcInd[iPt] >= numPtsOne):
            tgtInd[iPt] = numPtsTwo+iPt  

    return tgtInd
Exemplo n.º 26
0
def optimize_open_spline(reconstructed_points, input_points_):
    """
    Assuming that initial point cloud size is greater than or equal to
    400.
    """
    out = reconstructed_points[0]
    out = out.data.cpu().numpy()
    out = out.reshape((30, 30, 3))
    out = out.reshape((900, 3))

    input = input_points_[0]
    N = input.shape[0]
    input = up_sample_points_torch_in_range(input, 1200, 1300)
    input = input.data.cpu().numpy()

    dist = np.linalg.norm(np.expand_dims(out, 1) - np.expand_dims(input, 0),
                          axis=2)

    rids, cids = solve_dense(dist)
    matched = input[cids]
    size_u = 30
    size_v = 30
    degree_u = 2
    degree_v = 2

    # Do global surface approximation
    try:
        surf = geomdl_fitting.approximate_surface(
            matched.tolist(),
            size_u,
            size_v,
            degree_u,
            degree_v,
            ctrlpts_size_u=10,
            ctrlpts_size_v=10,
        )
    except:
        print("open spline, smaller than 400")
        return reconstructed_points

    regular_parameters = draw_surf.regular_parameterization(30, 30)
    optimized_points = surf.evaluate_list(regular_parameters)
    optimized_points = torch.from_numpy(
        np.array(optimized_points).astype(np.float32)).cuda()
    optimized_points = torch.unsqueeze(optimized_points, 0)
    return optimized_points
Exemplo n.º 27
0
def run_lapsolver(matrix, printlowestcost):
    temp = inspect.stack()[0][3]
    method_name = temp[4:]
    print(" %s" % (method_name), end=' ')

    t_start = time.time()
    row_ind, column_ind = solve_dense(matrix)
    t_end = time.time()

    if printlowestcost:
        lowest_cost = 0.00
        lowest_cost = matrix[row_ind, column_ind].sum()
        print("  %12s %s %5.3f" % (method_name, "minimum cost", lowest_cost))

    del row_ind
    del column_ind
    return t_end - t_start
Exemplo n.º 28
0
def optimize_close_spline(reconstructed_points, input_points_):
    """
    Assuming that initial point cloud size is greater than or equal to
    400.
    """
    out = reconstructed_points[0]
    out = out.data.cpu().numpy()
    out = out.reshape((31, 30, 3))
    out = out[np.arange(0, 31, 1.5).astype(
        np.int32)][:, np.arange(0, 30, 1.5).astype(np.int32).tolist()]
    out = out.reshape((20 * 21, 3))

    input = input_points_[0]
    N = input.shape[0]
    input = up_sample_points_torch_in_range(input, 2000, 2100)
    # L = np.random.choice(np.arange(N), 30 * 31, replace=False)
    input = input.data.cpu().numpy()

    dist = np.linalg.norm(np.expand_dims(out, 1) - np.expand_dims(input, 0),
                          axis=2)

    rids, cids = solve_dense(dist)
    matched = input[cids]
    size_u = 21
    size_v = 20
    degree_u = 3
    degree_v = 3

    # Do global surface approximation
    surf = geomdl_fitting.approximate_surface(
        matched.tolist(),
        size_u,
        size_v,
        degree_u,
        degree_v,
        ctrlpts_size_u=10,
        ctrlpts_size_v=10,
    )

    regular_parameters = draw_surf.regular_parameterization(31, 30)
    optimized_points = surf.evaluate_list(regular_parameters)
    optimized_points = torch.from_numpy(
        np.array(optimized_points).astype(np.float32)).cuda()
    optimized_points = torch.unsqueeze(optimized_points, 0)
    return optimized_points
Exemplo n.º 29
0
    def reinitialize(self, frame, obstacles):
        if self._trackers == []:
            self.initialize(frame, obstacles)
        # Create matrix of similarities between detection and tracker bboxes.
        cost_matrix = self._create_hungarian_cost_matrix(
            frame.frame, obstacles)
        # Run linear assignment (Hungarian Algo) with matrix
        row_ids, col_ids = solve_dense(cost_matrix)
        matched_obstacle_indices, matched_tracker_indices = set(row_ids), set(
            col_ids)

        updated_trackers = []
        # Separate matched and unmatched tracks
        unmatched_tracker_indices = \
            set(range(len(self._trackers))) - matched_tracker_indices
        matched_trackers = [self._trackers[i] for i in matched_tracker_indices]
        unmatched_trackers = [
            self._trackers[i] for i in unmatched_tracker_indices
        ]
        # Separate matched and unmatched detections
        unmatched_obstacle_indices = \
            set(range(len(obstacles))) - matched_obstacle_indices
        matched_obstacles = [obstacles[i] for i in matched_obstacle_indices]
        unmatched_obstacles = [
            obstacles[i] for i in unmatched_obstacle_indices
        ]

        # Add successfully matched trackers to updated_trackers
        for tracker in matched_trackers:
            tracker.missed_det_updates = 0
            updated_trackers.append(tracker)
        # Add 1 to age of any unmatched trackers, filter old ones
        for tracker in unmatched_trackers:
            tracker.missed_det_updates += 1
            if tracker.missed_det_updates < MAX_MISSED_DETECTIONS:
                updated_trackers.append(tracker)
            else:
                self._logger.debug("Dropping tracker with id {}".format(
                    tracker.obstacle.id))

        for obstacle in unmatched_obstacles:
            updated_trackers.append(
                SingleObjectDaSiamRPNTracker(frame, obstacle, self._siam_net))

        self._trackers = updated_trackers
Exemplo n.º 30
0
def bbox2d_alignment(curr_bboxes, prev_bboxes, curr_image, prev_image):
    iou = pairwise_iou(curr_bboxes, prev_bboxes)
    vis = np.zeros_like(iou)
    for i, curr_box in enumerate(curr_bboxes):
        for j, prev_box in enumerate(prev_bboxes):
            curr_visual = np.array(curr_image.crop(curr_box).resize(
                (32, 32))).astype(float) / 255
            prev_visual = np.array(prev_image.crop(prev_box).resize(
                (32, 32))).astype(float) / 255

            vis_error = np.absolute(curr_visual - prev_visual).sum() / (32 *
                                                                        32)
            vis[i, j] = vis_error

    iou[iou < 0.2] = np.nan
    cost = (1 - iou) + 10 * vis
    rids, cids = lapsolver.solve_dense(cost)
    return rids, cids