def generate_intersections(lines):
    """
    Runs extract_intersections on all combinations of lines
    Writes the resulting intersections to file as well as returning

    Args:
        lines: the lines from the shapefile

    Returns:
        inters: intersections - a list of point, dict tuples
            the dict contains the newly created ids of the
            intersecting segments
    """
    inters = []
    i = 0

    # Total combinations of two road segments
    def nCr(n, r):
        f = math.factorial
        return f(n) / f(r) / f(n - r)

    tot = nCr(len(lines), 2)
    # Look at all pairs of segments to extract intersections
    for segment1, segment2 in itertools.combinations(lines, 2):
        track(i, 10000, tot)
        if segment1[1].intersects(segment2[1]):
            inter = segment1[1].intersection(segment2[1])
            inters.extend(
                extract_intersections(inter, {
                    'id_1': segment1[0],
                    'id_2': segment2[0]
                }))
        i += 1

    return inters
Exemple #2
0
def get_candidates(buffered, buffered_index, lines):
    """
    Gets candidate matches: lines that overlap the buffer of
    lines from the other map
    Args:
        buffered - a list of tuples containing the buffer,
            the linestring, and the properties for the lines for one map
        buffered_index - the rtree index
        lines - the lines for the other map
    Returns:
        a list of dicts containing a line, the properties,
        and the candidate overlapping lines
    """
    results = []

    print "Getting candidate overlapping lines"

    # Go through each line from the osm map
    for i, line in enumerate(lines):

        overlapping = []
        util.track(i, 1000, len(lines))

        # First, get candidates from new map that overlap the buffer
        # from the original map
        for idx in buffered_index.intersection(line[0].bounds):
            buffer = buffered[idx][0]

            # If the new line overlaps the old line
            # then check whether it is entirely within the old buffer
            if buffer.intersects(line[0]):

                # Add the linestring and the features to the overlap list
                overlapping.append((buffered[idx][1], buffered[idx][2]))

        results.append({
            'line': line[0],
            'properties': line[1],
            'candidates': overlapping,
        })

    return results
Exemple #3
0
def get_int_mapping(lines, buffered, buffered_index):
    """
    Gets the mappings between intersections
    Args:
        lines - the set of lines in an intersection
        buffered - the buffered lines for the intersections in the other map
        buffered_index - the rtree index
    """
    print "Getting intersection mappings"

    line_results = []
    # Go through each line from the osm map
    for i, line in enumerate(lines):
        util.track(i, 1000, len(lines))
        line_buffer = line[0].buffer(10)

        best_match = {}
        best_overlap = 0
        for idx in buffered_index.intersection(line_buffer.bounds):
            buffer = buffered[idx][0]
            # If the new buffered intersection intersects the old one
            # figure out how much overlap, and take the best one
            if buffer.intersects(line[0]):
                total_area = unary_union([line_buffer, buffer]).area
                overlap = max(line_buffer.area / total_area,
                              buffer.area / total_area)

                if overlap > best_overlap and overlap > .20:
                    best_overlap = overlap
                    best_match = buffered[idx][2]

        line_results.append([line[0], line[1], best_match])

    total = len(line_results)
    percent_matched = 100 - 100 * float(
        len([x for x in line_results if not x[2]])) / float(total)
    print "Found matches for " + str(percent_matched) + "% of intersections"
    return line_results
Exemple #4
0
    def process_frame(self, im_gray):

        tracked_keypoints, _ = util.track(self.im_prev, im_gray,
                                          self.active_keypoints)
        (center, scale_estimate, rotation_estimate,
         tracked_keypoints) = self.estimate(tracked_keypoints)

        # Detect keypoints, compute descriptors
        keypoints_cv = self.detector.detect(im_gray)
        keypoints_cv, features = self.descriptor.compute(im_gray, keypoints_cv)

        # Create list of active keypoints
        active_keypoints = zeros((0, 3))

        # Get the best two matches for each feature
        matches_all = self.matcher.knnMatch(features, self.features_database,
                                            2)
        # Get all matches for selected features
        if not any(isnan(center)):
            selected_matches_all = self.matcher.knnMatch(
                features, self.selected_features, len(self.selected_features))

        # For each keypoint and its descriptor
        if len(keypoints_cv) > 0:
            transformed_springs = scale_estimate * util.rotate(
                self.springs, -rotation_estimate)
            for i in range(len(keypoints_cv)):

                # Retrieve keypoint location
                location = np.array(keypoints_cv[i].pt)

                # First: Match over whole image
                # Compute distances to all descriptors
                matches = matches_all[i]
                distances = np.array([m.distance for m in matches])

                # Convert distances to confidences, do not weight
                combined = 1 - distances / self.DESC_LENGTH

                classes = self.database_classes

                # Get best and second best index
                bestInd = matches[0].trainIdx
                secondBestInd = matches[1].trainIdx

                # Compute distance ratio according to Lowe
                ratio = (1 - combined[0]) / (1 - combined[1])

                # Extract class of best match
                keypoint_class = classes[bestInd]

                # If distance ratio is ok and absolute distance is ok and keypoint class is not background
                if ratio < self.THR_RATIO and combined[
                        0] > self.THR_CONF and keypoint_class != 0:
                    # Add keypoint to active keypoints
                    new_kpt = append(location, keypoint_class)
                    active_keypoints = append(active_keypoints,
                                              array([new_kpt]),
                                              axis=0)

                # In a second step, try to match difficult keypoints
                # If structural constraints are applicable
                if not any(isnan(center)):

                    # Compute distances to initial descriptors
                    matches = selected_matches_all[i]
                    distances = np.array([m.distance for m in matches])
                    # Re-order the distances based on indexing
                    idxs = np.argsort(np.array([m.trainIdx for m in matches]))
                    distances = distances[idxs]

                    # Convert distances to confidences
                    confidences = 1 - distances / self.DESC_LENGTH

                    # Compute the keypoint location relative to the object center
                    relative_location = location - center

                    # Compute the distances to all springs
                    displacements = util.L2norm(transformed_springs -
                                                relative_location)

                    # For each spring, calculate weight
                    weight = displacements < self.THR_OUTLIER  # Could be smooth function

                    combined = weight * confidences

                    classes = self.selected_classes

                    # Sort in descending order
                    sorted_conf = argsort(combined)[::-1]  # reverse

                    # Get best and second best index
                    bestInd = sorted_conf[0]
                    secondBestInd = sorted_conf[1]

                    # Compute distance ratio according to Lowe
                    ratio = (1 - combined[bestInd]) / (1 -
                                                       combined[secondBestInd])

                    # Extract class of best match
                    keypoint_class = classes[bestInd]

                    # If distance ratio is ok and absolute distance is ok and keypoint class is not background
                    if ratio < self.THR_RATIO and combined[
                            bestInd] > self.THR_CONF and keypoint_class != 0:

                        # Add keypoint to active keypoints
                        new_kpt = append(location, keypoint_class)

                        # Check whether same class already exists
                        if active_keypoints.size > 0:
                            same_class = np.nonzero(
                                active_keypoints[:, 2] == keypoint_class)
                            active_keypoints = np.delete(active_keypoints,
                                                         same_class,
                                                         axis=0)

                        active_keypoints = append(active_keypoints,
                                                  array([new_kpt]),
                                                  axis=0)

        # If some keypoints have been tracked
        if tracked_keypoints.size > 0:

            # Extract the keypoint classes
            tracked_classes = tracked_keypoints[:, 2]

            # If there already are some active keypoints
            if active_keypoints.size > 0:

                # Add all tracked keypoints that have not been matched
                associated_classes = active_keypoints[:, 2]
                missing = ~np.in1d(tracked_classes, associated_classes)
                active_keypoints = append(active_keypoints,
                                          tracked_keypoints[missing, :],
                                          axis=0)

            # Else use all tracked keypoints
            else:
                active_keypoints = tracked_keypoints

        # Update object state estimate
        _ = active_keypoints
        self.center = center
        self.scale_estimate = scale_estimate
        self.rotation_estimate = rotation_estimate
        self.tracked_keypoints = tracked_keypoints
        self.active_keypoints = active_keypoints
        self.im_prev = im_gray
        self.keypoints_cv = keypoints_cv
        _ = time.time()

        self.tl = (nan, nan)
        self.tr = (nan, nan)
        self.br = (nan, nan)
        self.bl = (nan, nan)

        self.bb = array([nan, nan, nan, nan])

        self.has_result = False
        if not any(
                isnan(self.center)
        ) and self.active_keypoints.shape[0] > self.num_initial_keypoints / 10:
            self.has_result = True

            tl = util.array_to_int_tuple(center + scale_estimate * util.rotate(
                self.center_to_tl[None, :], rotation_estimate).squeeze())
            tr = util.array_to_int_tuple(center + scale_estimate * util.rotate(
                self.center_to_tr[None, :], rotation_estimate).squeeze())
            br = util.array_to_int_tuple(center + scale_estimate * util.rotate(
                self.center_to_br[None, :], rotation_estimate).squeeze())
            bl = util.array_to_int_tuple(center + scale_estimate * util.rotate(
                self.center_to_bl[None, :], rotation_estimate).squeeze())

            min_x = min((tl[0], tr[0], br[0], bl[0]))
            min_y = min((tl[1], tr[1], br[1], bl[1]))
            max_x = max((tl[0], tr[0], br[0], bl[0]))
            max_y = max((tl[1], tr[1], br[1], bl[1]))

            self.tl = tl
            self.tr = tr
            self.bl = bl
            self.br = br

            self.bb = np.array([min_x, min_y, max_x - min_x, max_y - min_y])
Exemple #5
0
	def process_frame(self, im_gray):

		tracked_keypoints, status = util.track(self.im_prev, im_gray, self.active_keypoints)
		(center, scale_estimate, rotation_estimate, tracked_keypoints) = self.estimate(tracked_keypoints)

		#Detect keypoints, compute descriptors
		keypoints_cv = self.detector.detect(im_gray) 
		keypoints_cv, features = self.descriptor.compute(im_gray, keypoints_cv)

		#Create list of active keypoints
		active_keypoints = zeros((0,3)) 

		#For each keypoint and its descriptor
		if len(keypoints_cv) > 0:
			for (keypoint_cv, feature) in zip(keypoints_cv, features):

				#Retrieve keypoint location
				location = np.array(keypoint_cv.pt)

				#First: Match over whole image
				#Compute distances to all descriptors
				matches = self.matcher.match(self.features_database, feature[None,:])
				distances = np.array([m.distance for m in matches])

				#Convert distances to confidences, do not weight
				combined = 1 - distances / self.DESC_LENGTH

				classes = self.database_classes

				#Sort in descending order
				sorted_conf = argsort(combined)[::-1] #reverse

				#Get best and second best index
				bestInd = sorted_conf[0]
				secondBestInd = sorted_conf[1]

				#Compute distance ratio according to Lowe
				ratio = (1-combined[bestInd]) / (1-combined[secondBestInd])

				#Extract class of best match
				keypoint_class = classes[bestInd]

				#If distance ratio is ok and absolute distance is ok and keypoint class is not background
				if ratio < self.THR_RATIO and combined[bestInd] > self.THR_CONF and keypoint_class != 0:

					#Add keypoint to active keypoints
					new_kpt = append(location, keypoint_class)
					active_keypoints = append(active_keypoints, array([new_kpt]), axis=0)

				#In a second step, try to match difficult keypoints
				#If structural constraints are applicable
				if not any(isnan(center)):

					#Compute distances to initial descriptors
					matches = self.matcher.match(self.selected_features, feature[None,:])
					distances = np.array([m.distance for m in matches])

					#Convert distances to confidences
					confidences = 1 - distances / self.DESC_LENGTH

					#Compute the keypoint location relative to the object center
					relative_location = location - center

					#Compute the distances to all springs
					displacements = util.L2norm(scale_estimate * util.rotate(self.springs, -rotation_estimate) - relative_location)

					#For each spring, calculate weight
					weight = displacements < self.THR_OUTLIER #Could be smooth function

					combined = weight * confidences

					classes = self.selected_classes

					#Sort in descending order
					sorted_conf = argsort(combined)[::-1] #reverse

					#Get best and second best index
					bestInd = sorted_conf[0]
					secondBestInd = sorted_conf[1]

					#Compute distance ratio according to Lowe
					ratio = (1-combined[bestInd]) / (1-combined[secondBestInd])

					#Extract class of best match
					keypoint_class = classes[bestInd]

					#If distance ratio is ok and absolute distance is ok and keypoint class is not background
					if ratio < self.THR_RATIO and combined[bestInd] > self.THR_CONF and keypoint_class != 0:

						#Add keypoint to active keypoints
						new_kpt = append(location, keypoint_class)

						#Check whether same class already exists
						if active_keypoints.size > 0:
							same_class = np.nonzero(active_keypoints[:,2] == keypoint_class)
							active_keypoints = np.delete(active_keypoints, same_class, axis=0)

						active_keypoints = append(active_keypoints, array([new_kpt]), axis=0)

		#If some keypoints have been tracked
		if tracked_keypoints.size > 0:

			#Extract the keypoint classes
			tracked_classes = tracked_keypoints[:,2]

			#If there already are some active keypoints
			if active_keypoints.size > 0:

				#Add all tracked keypoints that have not been matched
				associated_classes = active_keypoints[:,2]
				missing = ~np.in1d(tracked_classes, associated_classes)
				active_keypoints = append(active_keypoints, tracked_keypoints[missing,:], axis=0)

			#Else use all tracked keypoints
			else:
				active_keypoints = tracked_keypoints

		#Update object state estimate
		active_keypoints_before = active_keypoints
		self.center = center
		self.scale_estimate = scale_estimate
		self.rotation_estimate = rotation_estimate
		self.tracked_keypoints = tracked_keypoints
		self.active_keypoints = active_keypoints
		self.im_prev = im_gray
		self.keypoints_cv = keypoints_cv
		toc = time.time()

		self.tl = (nan,nan)
		self.tr = (nan,nan)
		self.br = (nan,nan)
		self.bl = (nan,nan)

		self.bb = array([nan,nan,nan,nan])

		self.has_result = False
		if not any(isnan(self.center)) and self.active_keypoints.shape[0] > self.num_initial_keypoints / 10:
			self.has_result = True

			tl = util.array_to_int_tuple(center + scale_estimate*util.rotate(self.center_to_tl[None,:], rotation_estimate).squeeze())
			tr = util.array_to_int_tuple(center + scale_estimate*util.rotate(self.center_to_tr[None,:], rotation_estimate).squeeze())
			br = util.array_to_int_tuple(center + scale_estimate*util.rotate(self.center_to_br[None,:], rotation_estimate).squeeze())
			bl = util.array_to_int_tuple(center + scale_estimate*util.rotate(self.center_to_bl[None,:], rotation_estimate).squeeze())

			min_x = min((tl[0],tr[0],br[0],bl[0]))
			min_y = min((tl[1],tr[1],br[1],bl[1]))
			max_x = max((tl[0],tr[0],br[0],bl[0]))
			max_y = max((tl[1],tr[1],br[1],bl[1]))

			self.tl = tl
			self.tr = tr
			self.bl = bl
			self.br = br

			self.bb = np.array([min_x, min_y, max_x - min_x, max_y - min_y])
Exemple #6
0
    def track_update(self, t, dt, z_k, size_k):
        # here the format of z_k is
        # t: time of receiving the observations
        # dt: time between this time and last time of receiving observations
        # z_k: a list of position data, expressed as [[x1, y1], [x2, y2], ...]

        # FoV analysis, to classifiy obs data into 2 parts, inside FoV or outside
        z_k, outside_z_k, size_k = self.obs_fov(z_k, size_k)

        # adaptive motion model here
        self.F = np.matrix([[1, 0, dt, 0], [0, 1, 0, dt], [0, 0, 1, 0],
                            [0, 0, 0, 1]])

        next_index_list = []

        ellips_inputs_k = []
        bb_output_k = []

        # 1. extract all obs inside elliposidual gates of initialized and confirmed tracks

        obs_matrix = np.zeros((len(self.track_list_next_index), len(z_k)))

        for i in range(len(self.track_list_next_index)):
            k = self.track_list_next_index[i]
            kf = self.track_list[k].kf
            S = np.dot(np.dot(kf.H, kf.P_k_k_min), kf.H.T) + kf.R
            pred_z = np.dot(kf.H, kf.x_k_k_min)

            #### usage for plotting ellipse
            self.track_list[k].S = S
            self.track_list[k].pred_z = pred_z

            for j in range(len(z_k)):
                z = z_k[j]
                z_til = pred_z - np.matrix(z).T

                obs_matrix[i][j] = self.elliposidualGating(z_til, S)

        # create a mitrix to check the relationship between obs and measurements

        obs_sum = obs_matrix.sum(axis=0)
        # matrix operation, exclude all observations whose sum is 0, which means its not in any gate
        index = np.where(obs_sum > 0)[0]
        obs_outside_index = np.where(obs_sum == 0)[0]

        # obs matrix inside the gate -- obs_matrix_gate
        obs_matrix_gate = obs_matrix[:, index]

        # 2. deal with observations inside all gates
        # i. initialize each observation, find out how many gates each observation is in

        obs_class = []
        # now in obs_class, each element is a class with its .track includes all track numbers from 0 - m_k
        # then we need to analyse the gate. First for jth obs z_j in its ith gate we need to calculate
        # g_ij = N(z_j - z_ij|0, S_ij) here z_ij and S_ij is the updated KF given observation z_j

        for i in range(len(z_k)):

            if i in index:
                try:
                    a_mea = measurement(z_k[i], i)
                except:
                    print(i, z_k)
                for j in np.where(obs_matrix[:, i] != 0)[0]:
                    # track is the track id for obs i, in obs_matrix the column value is not 0
                    track_id = self.track_list_next_index[j]
                    a_mea.inside_track(track_id)
                    temp_kf = copy.deepcopy(self.track_list[track_id].kf)
                    temp_kf.update(z_k[i])
                    var = multivariate_normal(mean=np.squeeze(
                        temp_kf.z_bar.reshape(2).tolist()[0]),
                                              cov=temp_kf.S_k)
                    a_mea.g_ij.append(var.pdf(z_k[i]))
                obs_class.append(a_mea)
            else:
                obs_class.append([])

        # ii. for each gate/track, analysis if there exists observations joint different tracks, find out the relation between different tracks

        track_class = []
        for i in range(len(self.track_list_next_index)):
            k = self.track_list_next_index[i]
            a_track = self.track_list[k]  # pointer to track_list class
            a_track.get_measurement(np.where(obs_matrix[i] != 0)[0])
            a_track.measurement.append(-1)
            track_class.append(a_track)

        for i in range(len(track_class)):

            # Case 1. if there is no obs inside track gate,
            # make Kalman's update x_k_k= x_k_k_min, P_k_k = P_k_k_min
            if track_class[i].deleted:
                continue

            if len(track_class[i].measurement) == 1:
                # there is only false alarm in observation, then only do prediction

                kf = track_class[i].kf
                #  apparently this is wrong, we need to make observation in Kalman update
                #  x_k_k_min, then update the covariance.
                kf.x_k_k = kf.x_k_k_min
                kf.P_k_k = kf.P_k_k_min
                #             kf.update([kf.x_k_k_min[0, 0], kf.x_k_k_min[1, 0]])

                track_class[i].update(t, kf, False)

                # only keep the confirmed ones
                if track_class[i].confirmed:
                    # ellips_inputs_k.append([track_class[i].S, track_class[i].pred_z])
                    ellips_inputs_k.append(track_class[i])
                    # ellips_inputs_k.append([track_class[i].id, track_class[i].kf.x_k_k, track_class[i].kf.P_k_k])
                    bb_output_k.append(track_class[i].bb_box_size)

                if not (track_class[i].deleted or track_class[i].abandoned):
                    next_index_list.append(self.track_list_next_index[i])
                continue

            # Case 2. there is obs inside the track gate
            # calculate the beta for ith track
            # need the number of measurements inside the gate
            beta = self.cal_beta(len(track_class[i].measurement) - 1)

            table_key = [self.track_list_next_index[i]]

            # begin find all observations inside this gate that is related to other gates (joint)

            for obs_id in track_class[i].measurement:
                if obs_id != -1:
                    obs = obs_class[obs_id]
                    table_key += obs.track

            table_key = list(set(table_key))

            # for each track, figure out how many observations inside the track

            # inverse the table
            table_key_inv = []
            for j in table_key:
                table_key_inv.append(self.track_list_next_index.index(j))

            table_key_matrix = obs_matrix[table_key_inv]

            ######################
            #         if (table_key_matrix == table_key_matrix[0]).all():
            #             # there are overlaps of tracks, we only remain the one has oldest history
            if (table_key_matrix == table_key_matrix[0]
                ).all() and len(table_key_matrix) > 2:
                # there are overlaps of tracks, we only remain the one has oldest history
                seed = min(table_key)
                for key in table_key:
                    if key != seed:
                        self.track_list[key].deletion(t)

            ##### !!!!!!!!
            #         in order to avoid multiple tracks tracking same target, we need to analysis the
            #         obs_matrix

            obs_num_tracks = obs_matrix.sum(axis=1)[table_key_inv]

            # number of joint tracks
            N_T = len(table_key)
            # number of observations total
            total_obs = []
            for track_id in table_key:
                a_track = self.track_list[track_id]
                total_obs += a_track.measurement

            total_obs = list(set(total_obs))
            N_o = len(total_obs) - 1

            common_factor = common_fact(beta, self.P_D, N_T, N_o)

            # iii. after merged all related tracks, we generat a hypothesis matrix/table based on the table
            # generated by the table_key
            obs_num_tracks_ = obs_num_tracks + 1
            total_row = int(obs_num_tracks_.prod())

            # create title for the table

            hyp_matrix = {}
            for a_key in table_key:
                hyp_matrix[str(a_key)] = []
            hyp_matrix["p"] = []

            for row_num in range(total_row):
                key_num = len(table_key)
                col_num = 0
                # build one row of hypothesis
                while key_num > 0:
                    if col_num == len(table_key) - 1:
                        obs_id = int(row_num)
                        product = 1
                    else:
                        product = obs_num_tracks_[(col_num + 1):].prod()
                        obs_id = int(row_num // product)

                    value = self.track_list[
                        table_key[col_num]].measurement[obs_id]

                    key = str(table_key[col_num])
                    hyp_matrix[key].append(value)
                    row_num = row_num % product
                    col_num += 1
                    key_num -= 1

                # now we want to calculate the probability of this row's hypothesis
                hyp_list = []

                prob = common_factor
                for key in hyp_matrix.keys():
                    if key != 'p':
                        hyp_list.append(hyp_matrix[key][-1])

                # print('hyp_list, ', hyp_list)
                # calculate the prob of this hypothesis
                if checkIfDuplicates(hyp_list):
                    # this is not one vaild hypothesis.
                    prob = 0
                else:
                    # this is the valid hypothesis, we should calculate the prob
                    for key in hyp_matrix.keys():
                        if key != 'p':
                            track_id = int(key)
                            obs_id = hyp_matrix[key][-1]
                            # print('obs id ', obs_id)
                            if obs_id == -1:
                                prob *= (1 - self.P_D) * beta
                            else:
                                # print(obs_class[obs_id].table, print(obs_class[obs_id].id))
                                index = obs_class[obs_id].track.index(track_id)
                                prob *= self.P_D * obs_class[obs_id].g_ij[index]
                hyp_matrix['p'].append(prob)

            # iv. Then gather the prob in this track, and update the kF
            obs_in_i_track = track_class[i].measurement
            obs_in_i_track_prob = []
            hyp_in_i_track = np.array(hyp_matrix[str(
                self.track_list_next_index[i])])
            hyp_in_i_track_prob = np.array(hyp_matrix['p'])
            for obs in obs_in_i_track:
                index_ = np.where(hyp_in_i_track == obs)
                w_ij_list = hyp_in_i_track_prob[index_]
                obs_in_i_track_prob.append(w_ij_list.sum())

            # then normalize all w_ij s
            obs_in_i_track_prob_norm = normalize(obs_in_i_track_prob)

            # well, we then just need to update the KF of ith track
            kf = track_class[i].kf
            x_k = []
            P_k = []
            bb_k = []
            for obs in obs_in_i_track:
                if obs == -1:
                    x_k.append(kf.x_k_k)
                    P_k.append(kf.P_k_k)
                else:

                    z = np.array(z_k[obs]).T

                    # update the kf
                    temp_kf = copy.deepcopy(kf)

                    temp_kf.update(z)

                    x_k.append(temp_kf.x_k_k)
                    P_k.append(temp_kf.P_k_k)
                    bb_k.append(size_k[obs])

            x_k_pda = 0 * temp_kf.x_k_k
            P_k_pda = 0 * temp_kf.P_k_k
            for j in range(len(obs_in_i_track_prob_norm)):
                x_k_pda += obs_in_i_track_prob_norm[j] * x_k[j]

            for j in range(len(obs_in_i_track_prob_norm)):
                P_k_pda += obs_in_i_track_prob_norm[j] * (
                    P_k[j] + np.dot(x_k_pda - x_k[j], x_k_pda.T - x_k[j].T))

            # update this to kf
            kf.x_k_k = x_k_pda
            kf.P_k_k = P_k_pda

            if np.linalg.det(kf.P_k_k[0:2, 0:2]) > self.common_P:
                # print("track get deleted here~~")
                track_class[i].update(t, kf, False)
                track_class[i].deleted = True
                continue

            track_class[i].update(t, kf, True)

            # only keep the confirmed ones
            if track_class[i].confirmed:

                # ellips_inputs_k.append([track_class[i].id, track_class[i].kf.x_k_k, track_class[i].kf.P_k_k])
                ellips_inputs_k.append(track_class[i])
                # pick the bounding box which is the cloest to the associated x_k_k
                bb_output_k.append(bb_k[self.find_cloest(x_k_pda, x_k[1:])])
                track_class[i].bb_box_size = bb_output_k[-1]
            # save the activated ones for next recursion
            if not (track_class[i].deleted or track_class[i].abandoned):
                next_index_list.append(self.track_list_next_index[i])

        # 3. deal with observations outside all gates

        # now initialize all observations outside of the gate
        for i in obs_outside_index:
            z = z_k[i] + [0, 0]
            x0 = np.matrix(z).T
            kf = EKFcontrol(self.F, self.H, x0, self.P, self.Q, self.R)
            id_ = len(self.track_list)
            new_track = track(t, id_, kf, self.DeletionThreshold,
                              self.ConfirmationThreshold)
            new_track.kf.predict()
            new_track.bb_box_size = size_k[i]  # bb size initialization
            self.track_list.append(new_track)
            next_index_list.append(id_)

        #     swap the track list to the new list.
        self.track_list_next_index = next_index_list

        return ellips_inputs_k, bb_output_k
Exemple #7
0
    def process_frame(self, im_gray):
        tracked_keypoints, status, flow = util.track(self.im_prev, im_gray,
                                                     self.active_keypoints,
                                                     1.0)

        (center, scale_estimate, rotation_estimate, tracked_keypoints,
         flow) = self.estimate(tracked_keypoints, flow)

        # creat mask
        mask = np.zeros_like(im_gray, dtype=np.uint8)
        if tracked_keypoints.size > 0 and not any(isnan(center)):
            tl = util.array_to_float_tuple(
                center + scale_estimate * util.rotate(
                    self.center_to_tl[None, :], rotation_estimate).squeeze())
            tr = util.array_to_float_tuple(
                center + scale_estimate * util.rotate(
                    self.center_to_tr[None, :], rotation_estimate).squeeze())
            br = util.array_to_float_tuple(
                center + scale_estimate * util.rotate(
                    self.center_to_br[None, :], rotation_estimate).squeeze())
            bl = util.array_to_float_tuple(
                center + scale_estimate * util.rotate(
                    self.center_to_bl[None, :], rotation_estimate).squeeze())

            cv.fillPoly(
                mask,
                np.array([tl, tr, br, bl], dtype=np.int32).reshape(-1, 4, 2),
                (255, 255, 255))
            mask = cv.dilate(mask, np.ones((19, 19), np.uint8), iterations=2)

            # Detect keypoints, compute descriptors
            keypoints_cv = self.detector.detect(im_gray, mask=mask)
        else:
            # Detect keypoints, compute descriptors
            keypoints_cv = self.detector.detect(im_gray)
        keypoints_cv, features = self.descriptor.compute(im_gray, keypoints_cv)

        # # double check for transformation estimation
        # mask = np.zeros_like(self.im_prev, dtype=np.uint8)
        # cv.fillPoly(mask, np.array([self.tl, self.tr, self.br, self.bl], dtype=np.int32).reshape(-1, 4, 2), (255, 255, 255))
        # keypoints_cv_prev = self.detector.detect(self.im_prev, mask=mask)
        # keypoints_cv_prev, features_prev = self.descriptor.compute(self.im_prev, keypoints_cv_prev)

        # H, status = self.estimate_homography(keypoints_cv_prev, features_prev, keypoints_cv, features)

        # if H is not None:
        #     tl = util.array_to_float_tuple(cv.perspectiveTransform(np.float32(self.tl).reshape(1, 1, -1), H).reshape(-1))
        #     tr = util.array_to_float_tuple(cv.perspectiveTransform(np.float32(self.tr).reshape(1, 1, -1), H).reshape(-1))
        #     br = util.array_to_float_tuple(cv.perspectiveTransform(np.float32(self.br).reshape(1, 1, -1), H).reshape(-1))
        #     bl = util.array_to_float_tuple(cv.perspectiveTransform(np.float32(self.bl).reshape(1, 1, -1), H).reshape(-1))

        # Create list of active keypoints
        active_keypoints = zeros((0, 3))

        # Get all matches for selected features
        if not any(isnan(center)):
            selected_matches_all = self.matcher.knnMatch(
                features, self.selected_features, len(self.selected_features))

        # import pdb
        # pdb.set_trace()

        # For each keypoint and its descriptor
        matched_ratio = 0.0
        if len(keypoints_cv) > 0:
            transformed_springs = scale_estimate * util.rotate(
                self.springs, -rotation_estimate)
            for i in range(len(keypoints_cv)):

                # Retrieve keypoint location
                location = np.array(keypoints_cv[i].pt)

                # If structural constraints are applicable
                if not any(isnan(center)):

                    # Compute distances to initial descriptors
                    matches = selected_matches_all[i]
                    distances = np.array([m.distance for m in matches])
                    # Re-order the distances based on indexing
                    idxs = np.argsort(np.array([m.trainIdx for m in matches]))
                    distances = distances[idxs]

                    # Convert distances to confidences
                    confidences = 1 - distances / self.DESC_LENGTH

                    # Compute the keypoint location relative to the object center
                    relative_location = location - center

                    # Compute the distances to all springs
                    displacements = util.L2norm(transformed_springs -
                                                relative_location)

                    # For each spring, calculate weight
                    weight = displacements < self.THR_OUTLIER  # Could be smooth function

                    combined = weight * confidences

                    classes = self.selected_classes

                    # Sort in descending order
                    sorted_conf = argsort(combined)[::-1]  # reverse

                    # Get best and second best index
                    bestInd = sorted_conf[0]
                    secondBestInd = sorted_conf[1]

                    # Compute distance ratio according to Lowe
                    ratio = (1 - combined[bestInd] +
                             1e-8) / (1 - combined[secondBestInd] + 1e-8)

                    # Extract class of best match
                    keypoint_class = classes[bestInd]

                    # If distance ratio is ok and absolute distance is ok and keypoint class is not background
                    if ratio < self.THR_RATIO and combined[
                            bestInd] > self.THR_CONF and keypoint_class != 0:
                        matched_ratio += 1
                        # Add keypoint to active keypoints
                        new_kpt = append(location, keypoint_class)

                        # Check whether same class already exists
                        if active_keypoints.size > 0:
                            same_class = np.nonzero(
                                active_keypoints[:, 2] == keypoint_class)
                            active_keypoints = np.delete(active_keypoints,
                                                         same_class,
                                                         axis=0)

                        active_keypoints = append(active_keypoints,
                                                  array([new_kpt]),
                                                  axis=0)

        # If some keypoints have been tracked
        if tracked_keypoints.size > 0:

            # Extract the keypoint classes
            tracked_classes = tracked_keypoints[:, 2]

            # If there already are some active keypoints
            if active_keypoints.size > 0:

                # Add all tracked keypoints that have not been matched
                associated_classes = active_keypoints[:, 2]
                missing = ~np.in1d(tracked_classes, associated_classes)
                active_keypoints = append(active_keypoints,
                                          tracked_keypoints[missing, :],
                                          axis=0)

            # Else use all tracked keypoints
            else:
                active_keypoints = tracked_keypoints

        # Update object state estimate
        _ = active_keypoints
        self.center = center
        self.scale_estimate = scale_estimate
        self.rotation_estimate = rotation_estimate
        self.tracked_keypoints = tracked_keypoints
        self.active_keypoints = active_keypoints
        self.keypoints_cv = keypoints_cv
        _ = time.time()

        self.bb = array([nan, nan, nan, nan])

        self.has_result = False
        if not any(
                isnan(self.center)
        ) and self.active_keypoints.shape[0] > self.num_initial_keypoints / 10:
            self.has_result = True

            min_x = min((tl[0], tr[0], br[0], bl[0]))
            min_y = min((tl[1], tr[1], br[1], bl[1]))
            max_x = max((tl[0], tr[0], br[0], bl[0]))
            max_y = max((tl[1], tr[1], br[1], bl[1]))

            self.tl = tl
            self.tr = tr
            self.bl = bl
            self.br = br

            self.bb = np.array([min_x, min_y, max_x - min_x, max_y - min_y])

        self.matched_ratio = matched_ratio / len(self.selected_features)
        self.im_prev = im_gray
Exemple #8
0
def get_mapping(lines, features):
    """
    Attempts to map one or more segments of the second map to the first map
    Args:
        lines - a list of dicts containing the line from the first map,
            the properties, the candidate overlapping lines from the new map,
            and nearby segments on the original map because we may want to
            combine two for the purposes of mapping
    """
    print len(lines)
    result_counts = [0, 0]
    buff = 5

    # keep track of which new segments matched at which size buffer
    buff_match = {}

    new_id = 0
    while buff <= 20:
        print "Looking at buffer " + str(buff)
        for i in range(len(lines)):
            util.track(i, 1000, len(lines))
            if 'matches' in lines[i].keys():
                continue

            matched_candidates = []

            for j, candidate in enumerate(lines[i]['candidates']):
                if 'id' not in lines[i]['candidates'][j][1].keys():
                    lines[i]['candidates'][j][1]['id'] = new_id
                    new_id += 1
                match = True

                for coord in candidate[0].coords:
                    if not Point(coord).within(lines[i]['line'].buffer(buff)):
                        match = False
                if match:
                    matched_candidates.append((candidate, buff))

                    if lines[i]['candidates'][j][1]['id'] \
                       not in buff_match.keys():
                        buff_match[lines[i]['candidates'][j][1]['id']] = buff

            if matched_candidates:
                lines[i]['matches'] = matched_candidates

        buff *= 2

    # Now go through the lines that still aren't matched
    # this time, see if they are a subset of any of their candidates
    for i in range(len(lines)):
        matched_candidates = []
        if 'matches' in lines[i].keys():
            continue
        for j, candidate in enumerate(lines[i]['candidates']):
            if 'id' not in lines[i]['candidates'][j][1].keys():
                lines[i]['candidates'][j][1]['id'] = new_id
                new_id += 1

            match = True
            for coord in lines[i]['line'].coords:
                if not Point(coord).within(candidate[0].buffer(20)):
                    match = False
            if match:
                matched_candidates.append((candidate, 20))
                if lines[i]['candidates'][j][1]['id'] not in buff_match.keys():
                    buff_match[lines[i]['candidates'][j][1]['id']] = buff
        if matched_candidates:
            lines[i]['matches'] = matched_candidates

    # Remove matches that matched better on a different segment
    # But only if there's a match for that segment already
    for i in range(len(lines)):
        if 'matches' in lines[i].keys():
            matches = lines[i]['matches']
            new_matches = []
            for (m, buff) in matches:
                if buff_match[m[1]['id']] == buff:
                    new_matches.append(m)
            if new_matches:
                lines[i]['matches'] = new_matches
            else:
                # Remove buffer info
                lines[i]['matches'] = [m[0] for m in lines[i]['matches']]

    orig = []
    matched = []
    for i, line in enumerate(lines):
        if 'matches' in line.keys() and line['matches']:
            result_counts[0] += 1

            orig.append((line['line'], line['properties']))

            # Every single match for this line
            add_match_features(line, features)

            # Add each matching line
            # Only used for debugging purposes, take out eventually
            for m in line['matches']:
                matched.append((m[0], m[1]))

        else:
            for f in features:
                line['properties'][f] = 0
            result_counts[1] += 1

    percent_matched = float(
        result_counts[0]) / (float(result_counts[0] + result_counts[1])) * 100
    print 'Found matches for ' + str(percent_matched) + '% of segments'

    print result_counts