Esempio n. 1
0
def matching(L1, L2):
    sim_matrix = np.zeros((len(L1), len(L2)))
    for i, w1 in enumerate(L1):
        for j, w2 in enumerate(L2):
            sim_matrix[i][j] = viwordnet.path(w1, w2)

    row_ind, col_ind = lsa(-sim_matrix)
    return sim_matrix[row_ind, col_ind].tolist(), row_ind.tolist(), col_ind.tolist()
Esempio n. 2
0
def get_results(filename):
    print(filename)
    _dir, _, _file = filename.rpartition('/')
    _image, _, _ext = _file.rpartition('.')
    _annots, _, _im_ext = _image.rpartition('.')

    files.append(_image)
    # print _image

    # grab the train and test annotations
    trains = np.load(os.path.join(args.train_dir, _annots + _ + _ext)).item()
    tests = np.load(filename)
    #print filename, maps_dir+_image, args.train_dir+_annots+_+_ext

    # data structure for saving the IoU values
    IoU_vals = np.zeros((len(tests), len(trains.keys())))

    # save the train anots
    train_polys = []
    for i in trains.keys():
        # print(trains[i]['vertices'])
        train_polys.append(Polygon(trains[i]['vertices']))
        pass
    s = STRtree(train_polys)

    # save the test annots
    test_polys = []
    for i in range(len(tests)):
        poly = tests[i]
        poly = poly.tolist()
        # poly.append(poly[0])
        test_poly = Polygon(poly)
        if not test_poly.is_valid:
            continue
        try:
            results = s.query(test_poly)
            for j in range(len(results)):
                _id = train_polys.index(results[j])
                _intersection = train_polys[_id].intersection(test_poly).area
                _union = train_polys[_id].union(test_poly).area
                IoU_vals[i, _id] = _intersection / _union
            test_polys.append(test_poly)
        except Exception:
            continue

    # do the linear sum assignment
    _row, _col = lsa(1 - IoU_vals)
    assignment_matrix = IoU_vals[_row, _col]

    # compute the numbers
    TP = (assignment_matrix >= IoU_thresh).sum()
    FP = (assignment_matrix < IoU_thresh).sum()
    FN = len(trains.keys()) - TP
    return [TP, FP, FN]
Esempio n. 3
0
def match_atom(x, D):
    '''Find least cost assignment from x to an atom of D.'''

    cost = np.zeros(D.shape[1])
    assignment = dict()
    for ii in trange(D.shape[1], leave=False):
        C = cdist(x[:, None], D[:, ii][:, None])
        rows, cols = lsa(C)
        assignment[ii] = cols
        cost[ii] = C[rows, cols].sum()
    jj = np.argmin(cost)
    return (jj, assignment[jj])
Esempio n. 4
0
def obj1(c0, x, lam, norm):
    '''Find the cost between current xhat and x using lsa.

    Notes
    -----
    This bakes the cost of linear sum assignment right into the
    objectve function and adds a regularizing l1 term to encourage
    the solution to be sparse.
    '''
    xhat = make_xhat(c0, norm)
    C = cdist(xhat[:, None], x[:, None])
    row, col = lsa(C)
    return C[row, col].sum() + lam * np.linalg.norm(c0, ord=1)
def acc(targets, predictions, k=None):
    assert len(targets) == len(predictions)
    # targets = torch.tensor(targets)
    # predictions = torch.tensor(predictions)
    n = len(targets)
    if k is None:
        k = targets.max() + 1
    cost = torch.zeros(k, k)
    for i in range(n):
        cost[targets[i].item(), predictions[i].item()] -= 1
    stuff = -cost[lsa(cost)]
    total = stuff.sum().item() / n
    for i in range(k):
        stuff[i] /= -cost[i].sum()
    return total, stuff
Esempio n. 6
0
def accuracy_score(y_act, y_pred):
    Cluster_Matrix = np.zeros((max(y_act) + 1, max(y_pred) + 1))
    print "Cluster_Matrix shape:"
    for i in range(len(y_act)):
        Cluster_Matrix[y_act[i], y_pred[i]] += 1.0
    Inv_Cluster_Matrix = -1 * Cluster_Matrix
    row, col = lsa(Inv_Cluster_Matrix)
    accu = np.sum(Cluster_Matrix[row, col]) / float(
        np.sum(np.sum(Cluster_Matrix)))
    for i, j in itertools.izip(row, col):
        print(i, j,
              np.sum(Cluster_Matrix[i, :]) / np.sum(np.sum(Cluster_Matrix)),
              Cluster_Matrix[i, j] / float(np.sum(Cluster_Matrix[:, j])))
    print(Cluster_Matrix.astype(int))
    print(row)
    print(col)
    return accu
def get_assignment_matrix(distance_matrix):
    """
    Get hungarian assignment using scipy's linear_sum_assignment
    Parameters:
    ----------
    distance_matrix: `numpy.ndarray`
        N x M array with distances of N trackers and M detections

    Returns:
    --------
    rows: `numpy.ndarray`
        sorted list of indices of trackers (starting with 0,1,2...)
    cols: `numpy.ndarray`
        array of indices of respective assigned boxes to the trackers
    """
    rows, cols = lsa(distance_matrix)
    return rows, cols
Esempio n. 8
0
def maxcost(u, v):
    r"""
    Solves the maximum bipartite matching problem for the bipartite graph with
    vertex sets :math:`A=\{a_1,\dots,a_k\}` and :math:`B=\{b_1,\dots,b_k\}`
    such that the edge :math:`(a_i, b_j)` has weight :math:`|u_i-v_j|`, with
    :math:`u,v` distribution vectors in the unit :math:`k`-cube. Tells us the
    maximum sum of squared differences over all permutations of coordinates in
    :math:`u` and :math:`v`.

    :param list u: Vector.
    :param list v: Vector.
    :return: Maximum sum of squared differences.
    :rtype: float
    """
    # Create a cost matrix for the linear sum assignment method.
    C = np.array([[(u[i] - v[j])**2 for j in range(len(v))]
                  for i in range(len(u))])

    return np.sqrt(C[lsa(C, maximize=False)].sum())
Esempio n. 9
0
def agree(dict1, dict2):
    """
    :param dict1: is a dict containing a set of lists for every key
    :param dict2: is a dict containing a set of lists for every key
    :return: an agreement score as to how both sets are similar to each other
    """
    M = np.empty((len(dict1), len(dict2)))
    M[:] = np.nan

    for k_i, v_i in dict1.items():
        for k_j, v_j in dict2.items():
            M[k_i, k_j] = jaccardSim_avg(v_i, v_j)

    # M is a 'profit' matrix. It has to be inverted to a 'cost' matrix to find the min
    M_invert = 1 - M
    # Extract the index of the minimum values (hence max value) using the Hungarian Method
    rowIndex, colIndex = lsa(M_invert)
    jSim_max = M[rowIndex, colIndex]

    return sum(jSim_max) / len(dict1)
Esempio n. 10
0
def matchBbox(bbox0, bbox1):
    # Hungarian matching of the bipartite graph between two sets of bounding boxes
    # weight (cost) is the distance between bounding boxes
    # if #row > #col, it will return #row of indices, and vice versa

    cost_matrix = np.zeros((len(bbox0), len(bbox1)))
    for row, bbox_gt_ in enumerate(bbox0):
        center_x_gt = bbox_gt_[0] + bbox_gt_[2] / 2
        center_y_gt = bbox_gt_[1] + bbox_gt_[3] / 2

        for col, bbox_eff_ in enumerate(bbox1):
            center_x_eff = bbox_eff_[0] + bbox_eff_[2] / 2
            center_y_eff = bbox_eff_[1] + bbox_eff_[3] / 2

            cost_matrix[row, col] = ((center_x_gt - center_x_eff)**2 +
                                     (center_y_gt - center_y_eff)**2)**0.5

    row_ind, col_ind = lsa(cost_matrix)

    return row_ind, col_ind
Esempio n. 11
0
    def fit(self, X, Y):
        """
        Fits transformation matrices (Wx, Wy) and, if
        center_columns == True, bias terms (mx_, my_).

        Parameters
        ----------
        X : ndarray
            (num_samples x num_neurons) matrix of activations.
        Y : ndarray
            (num_samples x num_neurons) matrix of activations.
        """

        X, Y = check_equal_shapes(X, Y, nd=2, zero_pad=self.zero_pad)

        if self.center_columns:
            self.mx_ = mx = np.mean(X, axis=0)
            self.my_ = my = np.mean(Y, axis=0)
            X = X - mx[None, :]
            Y = Y - mx[None, :]

        self.Px_, self.Py_ = lsa(X.T @ Y, maximize=True)

        return self
Esempio n. 12
0
def correlate_clusters(c1, c2, and_mapping=False, mean_only=True):
    from scipy.optimize import linear_sum_assignment as lsa
    K = c1.shape[0]

    #get the correlation between all the clusters
    #in a matrix
    corrs = np.zeros([K, K])
    weights = cosine_latitude_weights(c1[0])
    for i, j in itertools.product(np.arange(K), repeat=2):
        corrs[i, j] = ipearsonr(c1[i], c2[j], weights=weights).data
    order = lsa(-corrs)
    mapping = [(a, b) for a, b in zip(order[0], order[1])]

    pattern_corrs = np.array([corrs[map_] for map_ in mapping])
    mean_corrs = pattern_corrs.mean()
    if mean_only:
        corrs = mean_corrs
    else:
        corrs = (mean_corrs, pattern_corrs)

    if and_mapping == True:
        return corrs, mapping
    else:
        return corrs
Esempio n. 13
0
def linear_assignment(row_list, col_list, costMatrix):
    """
    Compute the linear assignment between two label sets.

    Parameters:
    - - - - -
        row_list : list of label values in non-moving brain
        col_list : list of label values in moving brain
        costMatrix : matrix of costs between labels in each brain
    """

    # Compute linear assignment
    ar, ac = lsa(costMatrix)

    rows = row_list[ar]
    cols = col_list[ac]

    non_mapped = set(col_list).difference(set(cols))

    # Remap assignment indices to true label values
    remapped = dict(zip(rows, cols))
    unmapped = list(non_mapped)

    return remapped, unmapped
	global_IoU_vals.append(IoU_vals)
	trains_all.append(train_polys)
	test_all.append(test_polys)
	#break

print "done computing the IoUs"

#'''
for IoU_thresh in np.arange(0, 1+step, step):
	print IoU_thresh
	stats = []
	for i in range(len(global_IoU_vals)):
		IoU_vals = np.copy(global_IoU_vals[i])
		_id = IoU_vals < IoU_thresh
		IoU_vals[_id] = 0
		_row, _col = lsa(1-IoU_vals)
		
		TP = (IoU_vals[_row, _col] > 0).sum()
		FP = (IoU_vals[_row, _col] == 0).sum()
		# can't have true negative
		FN = len(trains_all[i]) - TP
		
		stats.append([TP, FP, FN])

	global_stats.append(stats)
	
	stats = np.asarray(stats)
	avg_TP = float(stats[:,0].sum()) / float(stats.shape[0])
	avg_FP = float(stats[:,1].sum()) / float(stats.shape[0])
	avg_FN = float(stats[:,2].sum()) / float(stats.shape[0])
	
Esempio n. 15
0
def ordinator1d(prior,
                k,
                forward,
                inverse,
                chunksize=10,
                pdf=None,
                pdf_metric=None,
                sparse_metric=None,
                disp=False):
    '''Find permutation that maximizes sparsity of 1d signal.

    Parameters
    ----------
    prior : array_like
        Prior signal estimate to base ordering.
    k : int
        Desired sparsity level.
    forward : callable
        Sparsifying transform.
    inverse : callable
        Inverse sparsifying transform.
    chunksize : int, optional
        Chunk size for parallel processing pool.
    pdf : callable, optional
        Function that estimates pixel intensity distribution.
    pdf_metric : callable, optional
        Function that returns the distance between pdfs.
    sparse_metric : callable, optional
        Metric to use to measure sparsity.  Uses l1 norm by default.
    disp : bool, optional
        Whether or not to display coefficient plots at the end.

    Returns
    -------
    array_like
        Reordering indices.

    Raises
    ------
    ValueError
        If disp=True and forward function is not provided.

    Notes
    -----
    pdf_method=None uses histogram.  pdf_metric=None uses l2 norm. If
    disp=True then forward transform function must be provided.
    Otherwise, forward is not required, only inverse.

    pdf_method should assume the signal will be bounded between
    (-1, 1).  We do this by always normalizing a signal before
    computing pdf or comparing.
    '''

    # # Make sure we have the forward transform if we want to display
    # if disp and forward is None:
    #     raise ValueError(
    #         'Must provide forward transform for display!')

    # Make sure we have a sparsity metric
    if sparse_metric is None:
        sparse_metric = lambda x0: np.linalg.norm(x0, ord=1)

    # Make sure we do in fact have a 1d signal
    if prior.ndim > 1:
        logging.warning('Prior is not 1d! Flattening!')
        prior = prior.flatten()
    N = prior.size

    # Go ahead and normalize the signal so we don't have to keep
    # track of the limits of the pdfs we want to compare, always
    # between (-1, 1).
    prior /= np.max(np.abs(prior)) + np.finfo('float').eps

    # Default to histogram
    if pdf is None:
        pdf_object = pdf_default(prior)
        pdf = pdf_object.pdf
        pdf_ref = pdf_object.pdf_ref
    else:
        # Get reference pdf
        pdf_ref = pdf(prior)

    # Default to l2 metric
    if pdf_metric is None:
        pdf_metric = pdf_metric_default

    # Let's try to do things in parallel -- more than twice as fast!
    search_fun_partial = partial(search_fun,
                                 N=N,
                                 k=k,
                                 inverse=inverse,
                                 pdf_ref=pdf_ref,
                                 pdf_metric=pdf_metric,
                                 pdf=pdf)

    t0 = time()  # start the timer
    with Pool() as pool:
        res = list(
            tqdm(pool.imap(search_fun_partial, combinations(range(N), k),
                           chunksize),
                 total=comb(N, k, exact=True),
                 leave=False))
    res = np.array(res)

    # Choose the winner
    winner_idx = np.where(res[:, -1] == res[:, -1].min())[0]
    potentials = []
    for idx0 in winner_idx:
        potentials.append(res[idx0, :])
        print('potential:', potentials[-1][0])
    print('Found %d out of %d (%%%g) potentials in %d seconds!' %
          (len(potentials), res.shape[0], len(potentials) / res.shape[0] * 100,
           time() - t0))

    # Now solve the assignment problem, we only need one of the
    # potentials, so look at all of them and choose the one that is
    # most sparse
    if disp:
        import matplotlib.pyplot as plt

    winner_sparse = np.inf
    cur_win = None
    for potential in potentials:
        c = np.zeros(N)
        idx_proposed = potential[0]
        c[idx_proposed] = potential[1]
        xhat = inverse(c)
        xhat /= np.max(np.abs(xhat)) + np.finfo('float').eps
        C = cdist(xhat[:, None], prior[:, None])
        _rows, cols = lsa(C)

        cur_sparse = sparse_metric(forward(prior[cols]))
        if cur_sparse < winner_sparse:
            winner_sparse = cur_sparse
            print('New win: %g' % cur_sparse)
            cur_win = cols

        if disp:
            tcoeffs = np.abs(forward(prior[cols]))
            # tcoeffs /= np.max(tcoeffs)
            plt.plot(-np.sort(-tcoeffs), '--', label='xpi')

    # Show reference coefficients
    if disp:
        # plt.plot(-np.sort(-np.abs(forward(xhat))), label='xhat')
        tcoeffs = np.abs(forward(np.sort(prior)))
        # tcoeffs /= np.max(tcoeffs)
        plt.plot(-np.sort(-tcoeffs), ':', label='sort(x)')
        plt.legend()
        plt.title('Sorted, Normalized Transform Coefficients')
        plt.show()

    return cur_win
Esempio n. 16
0
def hung_dedup(df, return_dups=False, dups=False):

    if not isinstance(df, pd.DataFrame):
        df = pd.read_csv(prediction_file)
    if dups:
        just_dups = df
    else:
        a = df[df.columns[0]]
        b = df[df.columns[1]]
        g = df[a.isin(a[a.duplicated(keep=False)])]
        h = df[b.isin(b[b.duplicated(keep=False)])]
        just_dups = pd.concat([g, h], ignore_index=True).drop_duplicates(
            subset=[df.columns[0], df.columns[1]])
        just_dups = just_dups.loc[:,
                                  ~just_dups.columns.str.contains('^Unnamed')]

    index1, index2 = list(set(just_dups.iloc[:, 0])), list(
        set(just_dups.iloc[:, 1]))
    col1, col2, col3 = list(just_dups.iloc[:, 0]), list(
        just_dups.iloc[:, 1]), list(just_dups.iloc[:, 2])

    k = len(index1)
    ind_ind1 = dict(zip(index1, list(range(k))))
    j = k + len(index2)
    ind_ind2 = dict(zip(index2, list(range(k, j))))

    inv_ind1 = {v: k for k, v in ind_ind1.items()}
    inv_ind2 = {v: k for k, v in ind_ind2.items()}

    row, col, data = [], [], []

    row = [ind_ind1[x] for x in col1]
    col = [ind_ind2[x] for x in col2]
    data = list(-1 * np.array(just_dups.iloc[:, 2]))
    A = csc_matrix((data, (row, col)), shape=(j, j))

    jl = list(range(j))
    n, f = csgraph.connected_components(A, directed=False, connection='weak')
    l = {
        k: set(map(lambda x: x[1], v))
        for k, v in itertools.groupby(sorted(zip(f, jl)), key=lambda x: x[0])
    }

    pairs = list(zip(row, col))
    get_prob = dict(zip(pairs, data))
    num_connected_sets = len(l)
    y1ind, y2ind = set(range(k)), set(range(k, j))
    pairs = set(pairs)
    year1, year2, prob = [], [], []

    for i in range(num_connected_sets):
        s = l[i]
        y1 = list(s & y1ind)
        y2 = list(s & y2ind)
        mat = np.zeros((len(y1), len(y2)))
        q = itertools.product(y1, y2)
        for a in q:
            if a in pairs:
                one, two = a
                mat[y1.index(one), y2.index(two)] = get_prob[a]
        r, c = lsa(mat)
        for j in range(len(c)):
            a = (y1[r[j]], y2[c[j]])
            if a in pairs:
                year1 += [inv_ind1[y1[r[j]]]]
                year2 += [inv_ind2[y2[c[j]]]]
                prob += [get_prob[a]]
    prob = list(-1 * np.array(prob))
    final = pd.DataFrame(
        list(zip(year1, year2, prob)),
        columns=[just_dups.columns[0], just_dups.columns[1], 'link_prob'])
    if return_dups:
        return final, just_dups
    else:
        return final
Esempio n. 17
0
    def get_action(self, agent_list, target_list):
        agent_pos_list = np.array([agent.pos for agent in agent_list])
        target_pos_list = np.array([target.pos for target in target_list])
        target_time_list = np.array([target.time for target in target_list])

        if len(target_pos_list) == 0:
            return [0] * len(agent_pos_list)

        agent2target_list = []

        for i, agent_pos in enumerate(agent_pos_list):
            agent2target_list.append([])
            for target_pos, target_time in zip(target_pos_list,
                                               target_time_list):
                distVtime = np.sum(np.abs(target_pos - agent_pos))
                agent2target_list[i].append(distVtime)

        target_for_agent = []

        cost = np.array(agent2target_list)

        if len(target_list) >= 2:
            _, target_for_agent = lsa(cost)
        elif 0 < len(target_list) < len(agent_pos_list):
            cost = np.hstack((cost, 1000 * np.ones((len(agent_pos_list), 1))))
            _, target_for_agent = lsa(cost)
        else:
            target_for_agent = [-1] * len(agent_pos_list)

        action_list = []
        for agent_idx, target_idx in enumerate(target_for_agent):
            if (target_idx == -1 or target_idx > len(target_list) - 1
                    or np.sum(
                        np.abs(target_pos_list[target_idx] -
                               agent_pos_list[agent_idx])) >
                    target_time_list[target_idx]):
                action_list.append(0)
            else:
                x, y = target_pos_list[target_idx] - agent_pos_list[agent_idx]

                if x == 0 and y == 0:
                    action_list.append(0)

                elif x == 0 or y == 0:
                    if not x == 0:
                        if x > 0:
                            action_list.append(1)
                        else:
                            action_list.append(2)
                    elif not y == 0:
                        if y > 0:
                            action_list.append(4)
                        else:
                            action_list.append(3)
                else:  # both are non zero
                    choice = np.random.randint(0, 2)
                    if choice == 0:
                        if x > 0:
                            action_list.append(1)
                        else:
                            action_list.append(2)
                    else:
                        if y > 0:
                            action_list.append(4)
                        else:
                            action_list.append(3)

        return action_list
Esempio n. 18
0
def relaxed_ordinator(x,
                      lam,
                      k,
                      unsparsify,
                      norm=False,
                      warm=False,
                      transform_shape=None,
                      disp=False):
    '''Find ordering pi that makes x[pi] sparse.

    Parameters
    ----------
    x : array_like
        Signal to find ordering of.
    lam : float
        Lagrangian weight on l1 term of objective function.
    k : int
        Expected sparsity level (number of nonzero coefficients) of
        ordererd signal, x[pi].
    unsparsify : callable
        Function that computes inverse sparsifying transform.
    norm : bool, optional
        Normalize xhat at each step (probably don't do this.)
    warm : bool
        Whether to look for warm start file and save intermedate
        results.
    transform_shape : int
        Shape of transform coefficients (if different than x.shape).
        None will use x.shape.
    disp : bool
        Display progress messages.

    Returns
    -------
    pi : array_like
        Flattened ordering array (like is returned by numpy.argsort).

    Notes
    -----
    `size_transform` will be x.size - 1 for finite differences
    transform.
    '''

    # If size of coefficients is different than x.shape, make note
    if transform_shape is None:
        transform_shape = x.shape

    # Check to see if we can warm start
    c0 = load_intermediate()
    if not warm or c0 is None:
        c0 = np.ones(transform_shape).flatten()
    else:
        print('WARM START')

    pobj = partial(obj,
                   x=x,
                   lam=lam,
                   unsparsify=unsparsify,
                   norm=norm,
                   transform_shape=transform_shape)
    res = minimize(pobj,
                   c0,
                   callback=lambda x: save_intermediate(x, pobj(x), disp))
    # print(res)

    # Go ahead and hard threshold here
    c_est = res['x']
    c_est[np.abs(c_est) < np.sort(np.abs(c_est))[-k]] = 0

    # plt.plot(res['x'])
    # plt.plot(c_true)
    # plt.show()

    # Do the assignment and try to recover x
    xhat = make_xhat(c_est.reshape(transform_shape), unsparsify, norm)
    C = cdist(xhat.flatten()[:, None], x.flatten()[:, None])
    _row, pi = lsa(C)

    return pi
Esempio n. 19
0
def hungarian(A):
    B = A.T
    rows, cols = lsa(B)
    return cols, sum(B[rows, cols])
Esempio n. 20
0
    def error(self, variables):
        ########## Expectation ##########
        pose = variables.at(self.keys()[0])
        location = np.append(pose.translation(), self.z)  # append z
        orientation = np.array([0, 0, pose.so2().theta()])

        if self._first_time:
            # Store the initially guessed pose when computing error the first time
            self._init_tform = Transform.from_conventional(
                location, orientation)
            self._init_orientation = orientation

        if self.static:
            # Static mode
            if self._first_time:
                # First time extracting expected land boundaries
                fbumper_location = get_fbumper_location(
                    location, orientation, self.px)
                self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                    fbumper_location, orientation)

                expected_coeffs_list = [expected.get_c0c1_list()
                                        for expected in self.me_format_expected_markings]
                expected_type_list = [expected.type
                                      for expected in self.me_format_expected_markings]

                # The snapshot is stored in their normal forms; i.e. a, b, c, and alpha describing the lines
                self._init_normal_forms = [compute_normal_form_line_coeffs(self.px, c[0], c[1])
                                           for c in expected_coeffs_list]
                # Snapshot of lane boundary types
                self._init_types = expected_type_list

                self._first_time = False
            else:
                # Not first time, use snapshot of lane boundaries extracted the first time to compute error
                # Pose difference is wrt local frame
                pose_diff = self._get_pose_diff(location, orientation)

                # Compute expected lane boundary coefficients using the snapshot
                expected_coeffs_list = []
                for normal_form in self._init_normal_forms:
                    c0c1 = self._compute_expected_c0c1(normal_form, pose_diff)
                    expected_coeffs_list.append(c0c1)
                # Retrieve lane boundary types from snapshot
                expected_type_list = self._init_types
        else:
            # Not static mode
            # Extract ground truth from the Carla server
            fbumper_location = get_fbumper_location(
                location, orientation, self.px)
            self.in_junction, self.into_junction, self.me_format_expected_markings = self.expected_lane_extractor.extract(
                fbumper_location, orientation)

            # List of expected markings' coefficients
            expected_coeffs_list = [expected.get_c0c1_list()
                                    for expected in self.me_format_expected_markings]

            # List of expected markings' type
            expected_type_list = [expected.type
                                  for expected in self.me_format_expected_markings]

        ########## Measurement ##########
        if self.left_marking:
            measured_coeffs_left = np.asarray(
                self.left_marking.get_c0c1_list()).reshape(2, -1)
            measured_type_left = self.left_marking.type

        if self.right_marking:
            measured_coeffs_right = np.asarray(
                self.right_marking.get_c0c1_list()).reshape(2, -1)
            measured_type_right = self.right_marking.type

        # Null hypothesis
        # Use the measurement itself at every optimization iteration as the null hypothesis.
        # This is, of course, just a trick.
        # This means the error for null hypothesis is always zeros.
        null_expected_c0c1_left = self.left_marking.get_c0c1_list()
        null_expected_c0c1_right = self.right_marking.get_c0c1_list()
        null_error = np.zeros((2, 1))   # same for both left and right

        # Compute innovation matrix for the null hypo
        null_noise_cov = self.noise_cov * self.null_std_scale**2

        # Compute measurement likelihood weighted by null probability
        null_weighted_meas_likelihood = self.prob_null * \
            multivariate_normal.pdf(null_error.squeeze(), cov=null_noise_cov)

        # In this implementation, scaling down error and jacobian is done to achieve
        # the same effect of tuning the information matrix online.
        # Here, however, scale down error for null hypo; i.e.
        # null_error /= self.null_std_scale
        # is not necessary, since its always zero.
        # Zero error and jacobian effectively result in zero information matrix as well.

        if self.ignore_junction and (self.in_junction or self.into_junction):
            self._null_hypo_left = True
            self._null_hypo_right = True
        elif not expected_coeffs_list:
            self._null_hypo_left = True
            self._null_hypo_right = True
        else:
            # Data association
            num_expected_markings = len(self.me_format_expected_markings)
            asso_table = np.zeros((2, num_expected_markings+2))

            # Left lane marking
            errors_left = []
            asso_probs = []
            meas_likelihoods = []
            for exp_coeffs, exp_type in zip(expected_coeffs_list, expected_type_list):
                # Compute innovation matrix
                expected_c0, expected_c1 = exp_coeffs
                H = compute_H(self.px, expected_c0, expected_c1)
                innov = H @ self.pose_uncert @ H.T + self.noise_cov

                # Compute squared mahalanobis distance
                error = np.asarray(exp_coeffs).reshape(
                    2, -1) - measured_coeffs_left
                squared_mahala_dist = error.T @ np.linalg.inv(
                    innov) @ error

                # Semantic likelihood
                if self.semantic:
                    # Conditional probability on type
                    sem_likelihood = self._conditional_prob_type(
                        exp_type, measured_type_left)
                else:
                    # Truning off semantic association is equivalent to always
                    # set semantic likelihood to 1.0
                    sem_likelihood = 1.0

                # Gating (geometric and semantic)
                # Reject both geometrically and semantically unlikely associations
                # Note:
                # Since location distribution across lanes is essentially multimodal,
                # geometric gating often used when assuming location is unimodal is not
                # very reasonable and will inevitablly reject possible associations.
                # Here the geometric gate is set very large so we can preserve associations that
                # are a bit far from the current mode (the only mode that exists in the optimization)
                # but still possible when the multimodal nature is concerned.
                # Or, we can simply give up geometric gating and use semantic gating only.
                # The large geometric gate is an inelegant remedy after all.
                # if squared_mahala_dist <= self.geo_gate and sem_likelihood > self.sem_gate:
                if sem_likelihood > self.sem_gate:
                    errors_left.append(error)

                    # Measurement likelihood (based on noise cov)
                    meas_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=self.noise_cov)

                    # Geometric likelihood (based on innov)
                    geo_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=innov)

                    meas_likelihoods.append(meas_likelihood)
                    asso_prob = geo_likelihood * sem_likelihood
                    asso_probs.append(asso_prob)
                else:
                    errors_left.append(None)
                    meas_likelihoods.append(0)
                    asso_probs.append(0)

            asso_probs = np.asarray(asso_probs)
            meas_likelihoods = np.asarray(meas_likelihoods)

            # Compute weights based on total probability theorem
            if asso_probs.sum():
                weights_left = (1-self.prob_null) * \
                    (asso_probs/np.sum(asso_probs))
            else:
                weights_left = np.zeros(asso_probs.shape)

            # Weight measurement likelihoods
            weighted_meas_likelihood = weights_left*meas_likelihoods

            # Add weight and weighted likelihood of null hypothesis
            weights_left = np.insert(weights_left, 0, [self.prob_null, 0])
            weighted_meas_likelihood = np.insert(
                weighted_meas_likelihood, 0, [null_weighted_meas_likelihood, 0])

            asso_table[0, :] = weighted_meas_likelihood

            # Right marking
            errors_right = []
            asso_probs = []
            meas_likelihoods = []
            for exp_coeffs, exp_type in zip(expected_coeffs_list, expected_type_list):
                # Compute innovation matrix
                expected_c0, expected_c1 = exp_coeffs
                H = compute_H(self.px, expected_c0, expected_c1)
                innov = H @ self.pose_uncert @ H.T + self.noise_cov

                # Compute squared mahalanobis distance
                error = np.asarray(exp_coeffs).reshape(
                    2, -1) - measured_coeffs_right
                squared_mahala_dist = error.T @ np.linalg.inv(
                    innov) @ error

                # Semantic likelihood
                if self.semantic:
                    # Conditional probability on type
                    sem_likelihood = self._conditional_prob_type(
                        exp_type, measured_type_right)
                else:
                    # Truning off semantic association is equivalent to always
                    # set semantic likelihood to 1.0
                    sem_likelihood = 1.0

                # Gating (geometric and semantic)
                # Reject both geometrically and semantically unlikely associations
                # Note:
                # Since location distribution across lanes is essentially multimodal,
                # geometric gating often used when assuming location is unimodal is not
                # very reasonable and will inevitablly reject possible associations.
                # Here the geometric gate is set very large so we can preserve associations that
                # are a bit far from the current mode (the only mode that exists in the optimization)
                # but still possible when the multimodal nature is concerned.
                # Or, we can simply give up geometric gating and use semantic gating only.
                # The large geometric gate is an inelegant remedy after all.
                # if squared_mahala_dist <= self.geo_gate and sem_likelihood > self.sem_gate:
                if sem_likelihood > self.sem_gate:
                    errors_right.append(error)

                    # Measurement likelihood (based on noise cov)
                    meas_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=self.noise_cov)

                    # Geometric likelihood (based on innov)
                    geo_likelihood = multivariate_normal.pdf(
                        error.reshape(-1), cov=innov)

                    meas_likelihoods.append(meas_likelihood)
                    asso_prob = geo_likelihood * sem_likelihood
                    asso_probs.append(asso_prob)
                else:
                    errors_right.append(None)
                    meas_likelihoods.append(0)
                    asso_probs.append(0)

            asso_probs = np.asarray(asso_probs)
            meas_likelihoods = np.asarray(meas_likelihoods)

            # Compute weights based on total probability theorem
            if asso_probs.sum():
                weights_right = (1-self.prob_null) * \
                    (asso_probs/np.sum(asso_probs))
            else:
                weights_right = np.zeros(asso_probs.shape)

            # Weight measurement likelihoods
            weighted_meas_likelihood = weights_right*meas_likelihoods

            # Add weight and weighted likelihood of null hypothesis
            weights_right = np.insert(weights_right, 0, [0., self.prob_null])
            weighted_meas_likelihood = np.insert(
                weighted_meas_likelihood, 0, [0., null_weighted_meas_likelihood, ])

            asso_table[1, :] = weighted_meas_likelihood

            # GNN association
            # This is performed at every optimization step, so this factor is essentially
            # a max-mixture factor. It's just now the max operation is replaced by the
            # linear sum assignment operation.

            # Take log so the association result maximizes the product of likelihoods
            # of the associations of the both sides
            log_asso_table = np.log(asso_table)
            _, col_idc = lsa(log_asso_table, maximize=True)

            asso_idx_left = col_idc[0]
            asso_idx_right = col_idc[1]

            # Left assocation
            if asso_idx_left == 0:
                # Null hypothesis
                self._null_hypo_left = True
            else:
                self._null_hypo_left = False
                chosen_error_left = errors_left[asso_idx_left-2]
                self.chosen_expected_coeffs_left = expected_coeffs_list[asso_idx_left-2]
                self._scale_left = weights_left[asso_idx_left]

            # Right assocation
            if asso_idx_right == 1:
                # Null hypothesis
                self._null_hypo_right = True
            else:
                self._null_hypo_right = False
                chosen_error_right = errors_right[asso_idx_right-2]
                self.chosen_expected_coeffs_right = expected_coeffs_list[asso_idx_right-2]
                self._scale_right = weights_right[asso_idx_right]

        if self._null_hypo_left:
            chosen_error_left = null_error
            self.chosen_expected_coeffs_left = null_expected_c0c1_left
        if self._null_hypo_right:
            chosen_error_right = null_error
            self.chosen_expected_coeffs_right = null_expected_c0c1_right

        chosen_error = np.concatenate((chosen_error_left, chosen_error_right))

        return chosen_error
Esempio n. 21
0
    # # Make constraints at each xk
    # c_eq = np.zeros(N)
    # for ii, xk in np.ndenumerate(x):
    #     c_eq[ii] = count(x, xk)

    # Assuming we've guessed the right indices, can we find the right values?
    c0 = np.ones(k)
    res = basinhopping(obj, c0, minimizer_kwargs={'args': (
        idx_true,
        x,
    )})
    print(res)

    print(res['x'], c_true[idx_true])

    # Now solve the assignment problem
    c = np.zeros(N)
    c[idx_true] = res['x']
    xhat = inverse(c)
    C = cdist(xhat[:, None], x[:, None])
    rows, cols = lsa(C)

    # Show the people the coefficients!
    plt.plot(-np.sort(-np.abs(forward(xhat))), label='xhat')
    plt.plot(-np.sort(-np.abs(forward(x[cols]))), '--', label='An x_pi')
    plt.plot(-np.sort(-np.abs(forward(np.sort(x)))), ':', label='sort(x)')
    plt.legend()
    plt.title('Sorted DCT Coefficients')
    plt.show()