def main(): if len(sys.argv) != 2: print "Error!" print "Correct usage is:" print "\tpython neighborhood_point_density.py test_case_id" return test_case_id = int(sys.argv[1]) track_data = "data/beijing_test_road_types/track_%d.dat"%test_case_id center = const.TEST_CENTER[test_case_id-1] sigma_gps_point = 10.0 sigma_neighborhood = 100.0 n_bin = 20 tracks = gps_track.load_tracks(track_data) n_angle_bin = 32 angles, angle_results = angle_histogram(tracks, n_angle_bin) fig = plt.figure(figsize=(16,9)) ax = fig.add_subplot(111) ax.plot(angles, angle_results, '.-') plt.show()
def main(): if len(sys.argv) != 2: print "Error! Correct usage is:" print "\tpython road_probability_estimation.py [input traj file]" return tracks = gps_track.load_tracks(sys.argv[1]) # Rasterization N = 5000 img, display_img = image_array_from_points(tracks, N, [const.RANGE_SW, const.RANGE_NE]) new_img = skimage.filter.gaussian_filter(img, sigma=3) print "new max = ", np.amax(new_img) peaks = skimage.feature.peak_local_max(img, min_distance = 20) fig = plt.figure(figsize=(9,9)) ax = fig.add_subplot(111) ax.imshow(display_img.T, cmap='gray') ax.plot(peaks[:,0], peaks[:,1], '+r') ax.set_xlim([0,N]) ax.set_ylim([0,N]) plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) bound_box = [(446057, 4423750), (447057, 4424750)] gps_track.visualize_tracks(tracks, bound_box = bound_box, style='.') return #CENTER_PT = (447820, 4423040) # example 1 #CENTER_PT = (446557, 4424250) #example 2 #CENTER_PT = (447379, 4422790) #example 3 #CENTER_PT = (449765, 4424340) #example 4 BOX_SIZE = 1000 # Without direction #point_collection = extract_GPS_points_in_region(tracks, CENTER_PT, BOX_SIZE) # With direction data = extract_GPS_point_in_region_with_direction(tracks, CENTER_PT, BOX_SIZE) fig = plt.figure(figsize=(9,9)) ax = fig.add_subplot(111, aspect='equal') ax.plot(data[:,0], data[:,1], '.', color='gray') plt.show() with open("test_data/point_collection_with_direction/example_4.dat", "w") as fout: cPickle.dump(data, fout, protocol=2) return
def main(): parser = OptionParser() parser.add_option("-t", "--track", dest="track_file", help="GPS track file name", metavar="TRACK_FILE", type="string") parser.add_option("-o", "--output", dest="output_filename", help="Output point cloud file name, e.g., output_point_cloud.dat", type="string") parser.add_option("--test_case", dest="test_case", type="int", help="Test cases: 0: region-0; 1: region-1; 2: SF-region.", default=0) (options, args) = parser.parse_args() if not options.track_file: parser.error("Track file not given.") if not options.output_filename: parser.error("Output pointcloud filename not given.") R = const.R if options.test_case == 0: LOC = const.Region_0_LOC elif options.test_case == 1: LOC = const.Region_1_LOC elif options.test_case == 2: LOC = const.SF_LOC else: parser.error("Test case indexed %d not supported!"%options.test_case) tracks = gps_track.load_tracks(options.track_file) point_cloud = PointCloud(np.array([]), np.array([])) point_cloud.extract_point_cloud(tracks, LOC, R) point_cloud.visualize_point_cloud(LOC, R) point_cloud.save(options.output_filename)
def main(): tracks = gps_track.load_tracks(sys.argv[1]) bound_box = [(446057, 4423750), (447057, 4424750)] gps_track.visualize_tracks(tracks, bound_box=bound_box, style='.') return #CENTER_PT = (447820, 4423040) # example 1 #CENTER_PT = (446557, 4424250) #example 2 #CENTER_PT = (447379, 4422790) #example 3 #CENTER_PT = (449765, 4424340) #example 4 BOX_SIZE = 1000 # Without direction #point_collection = extract_GPS_points_in_region(tracks, CENTER_PT, BOX_SIZE) # With direction data = extract_GPS_point_in_region_with_direction(tracks, CENTER_PT, BOX_SIZE) fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(111, aspect='equal') ax.plot(data[:, 0], data[:, 1], '.', color='gray') plt.show() with open("test_data/point_collection_with_direction/example_4.dat", "w") as fout: cPickle.dump(data, fout, protocol=2) return
def main(): tracks = gps_track.load_tracks(sys.argv[1]) track = tracks[2] segments, seg_from = dp_segmentation([track], 15) print "There are %d segments."%(len(segments)) color_strings = ['b', 'r', 'c', 'm', 'y', 'g'] colors = [] for i in range(0, len(segments)): colors.append(color_strings[i%6]) fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(111, aspect='equal') ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], 'k+-') collection = LineCollection(segments, colors=colors, linewidth=3) ax.add_collection(collection) ax.set_xlim([const.SF_small_RANGE_SW[0], const.SF_small_RANGE_NE[0]]) ax.set_ylim([const.SF_small_RANGE_SW[1], const.SF_small_RANGE_NE[1]]) #ax.set_xlim([const.RANGE_SW[0], const.RANGE_NE[0]]) #ax.set_ylim([const.RANGE_SW[1], const.RANGE_NE[1]]) plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) track = tracks[2] segments, seg_from = dp_segmentation([track], 5) print "There are %d segments."%(len(segments)) color_strings = ['b', 'r', 'c', 'm', 'y', 'g'] colors = [] for i in range(0, len(segments)): colors.append(color_strings[i%6]) fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(111, aspect='equal') ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], 'k+-') collection = LineCollection(segments, colors=colors, linewidth=3) ax.add_collection(collection) #ax.set_xlim([const.SF_small_RANGE_SW[0], const.SF_small_RANGE_NE[0]]) #ax.set_ylim([const.SF_small_RANGE_SW[1], const.SF_small_RANGE_NE[1]]) #ax.set_xlim([const.RANGE_SW[0], const.RANGE_NE[0]]) #ax.set_ylim([const.RANGE_SW[1], const.RANGE_NE[1]]) plt.show()
def main(): if len(sys.argv) != 2: print "Error! Correct usage is:" print "\tpython road_probability_estimation.py [input traj file]" return tracks = gps_track.load_tracks(sys.argv[1]) # Rasterization N = 5000 img, display_img = image_array_from_points( tracks, N, [const.RANGE_SW, const.RANGE_NE]) new_img = skimage.filter.gaussian_filter(img, sigma=3) print "new max = ", np.amax(new_img) peaks = skimage.feature.peak_local_max(img, min_distance=20) fig = plt.figure(figsize=(9, 9)) ax = fig.add_subplot(111) ax.imshow(display_img.T, cmap='gray') ax.plot(peaks[:, 0], peaks[:, 1], '+r') ax.set_xlim([0, N]) ax.set_ylim([0, N]) plt.show()
def extract_tracks_from_file(input_filename, center, R, BBOX_WIDTH, BBOX_SW, BBOX_NE, MIN_PT_COUNT): files = [input_filename] extracted_tracks = [] count = 0 for filename in files: print "Now processing ",filename input_tracks = gps_track.load_tracks(filename) for track in input_tracks: # Iterate over its point to_record = False for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box delta_e = track.utm[pt_idx][0] - center[0] delta_n = track.utm[pt_idx][1] - center[1] dist = math.sqrt(delta_e**2 + delta_n**2) if dist <= R: to_record = True break if not to_record: continue recorded_track = gps_track.Track() is_recording = False for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box if track.utm[pt_idx][0] >= BBOX_SW[0] and \ track.utm[pt_idx][0] <= BBOX_NE[0] and \ track.utm[pt_idx][1] >= BBOX_SW[1] and \ track.utm[pt_idx][1] <= BBOX_NE[1]: if not is_recording: # Start recording is_recording = True recorded_track.car_id = track.car_id if pt_idx > 0: recorded_track.add_point(track.utm[pt_idx-1]) recorded_track.add_point(track.utm[pt_idx]) else: # Append point recorded_track.add_point(track.utm[pt_idx]) else: # Point is outside the bounding box if is_recording: # Stop recording is_recording = False recorded_track.add_point(track.utm[pt_idx]) if len(recorded_track.utm) >= MIN_PT_COUNT: # Save the recorded track extracted_tracks.append(recorded_track) recorded_track = gps_track.Track() count += 1 return extracted_tracks
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_test_tracks.py [input_track_file] [out_track_file]" return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.remove_gaps_in_tracks(input_tracks, 10, 300, 20, (const.SF_RANGE_SW, const.SF_RANGE_NE)) print "There are %d extracted tracks."%len(output_tracks) gps_track.save_tracks(output_tracks, sys.argv[2])
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_test_tracks.py [input_track_file] [out_track_file]" return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.remove_gaps_in_tracks( input_tracks, 10, 300, 20, (const.SF_RANGE_SW, const.SF_RANGE_NE)) print "There are %d extracted tracks." % len(output_tracks) gps_track.save_tracks(output_tracks, sys.argv[2])
def main(): parser = OptionParser() parser.add_option("-t","--tracks", dest="track_data", help="Input GPS track filename.", type="string", metavar="Tracks") parser.add_option("-o","--output_dir", dest="output_dir", help="Output directory.", type="string", metavar="Output_dir") parser.add_option("-m", "--mode", dest="output_mode", type="int", help="Output mode: 0: default output mode, which agrees with mapconstructionportal.org trip format; 1: output agrees with James2012 data format.", default=0) (options, args) = parser.parse_args() if not options.track_data: parser.error("No input track data file not found!") if not options.output_dir: parser.error("Output directory is not specified!") if not os.path.exists(options.output_dir): parser.error("Output directory does not exist! Please create it first!") if options.output_mode != 0 and options.output_mode != 1: parser.error("Unsupported output mode. (Output mode has to be 0 or 1)") if options.output_mode == 1: print "WARNING: please check if you are using the correct utm_projector." output_directory = re.sub('\/$', '', options.output_dir) output_directory += "/" tracks = gps_track.load_tracks(options.track_data) # Write to file for i in range(0, len(tracks)): output_filename = output_directory + "trip_%d.txt"%i track = tracks[i] if len(track.utm) <2: continue if options.output_mode == 0: with open(output_filename, "w") as fout: for utm in track.utm: utm_time = utm[2] / 1e6 fout.write("%.2f %.2f %.2f\n"%(utm[0], utm[1], utm_time)) else: with open(output_filename, "w") as fout: pt_id = 0 for utm in track.utm: lon, lat = const.SF_utm_projector(utm[0], utm[1], inverse=True) utm_time = utm[2] / 1e6 if pt_id == 0: fout.write("%d,%.6f,%.6f,%.1f,None,%d\n"%(pt_id, lat, lon, utm_time, pt_id+1)) elif pt_id < len(track.utm) - 1: fout.write("%d,%.6f,%.6f,%.1f,%d,%d\n"%(pt_id, lat, lon, utm_time, pt_id-1, pt_id+1)) else: fout.write("%d,%.6f,%.6f,%.1f,%d,None\n"%(pt_id, lat, lon, utm_time, pt_id-1)) pt_id += 1
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_test_tracks.py [input_track_file] [out_track_file]" return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.extract_tracks_by_region( input_tracks, sys.argv[2], (const.SF_small_RANGE_SW, const.SF_small_RANGE_NE)) # Visualization fig = plt.figure(figsize=(16, 16)) ax = fig.add_subplot(111, aspect='equal') for track in output_tracks: ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-') plt.show()
def main(): parser = OptionParser() parser.add_option("-t", "--track", dest="track_file", help="GPS track file name", metavar="TRACK_FILE", type="string") parser.add_option( "-o", "--output", dest="output_filename", help="Output point cloud file name, e.g., output_point_cloud.dat", type="string") parser.add_option( "--test_case", dest="test_case", type="int", help="Test cases: 0: region-0; 1: region-1; 2: SF-region.", default=0) (options, args) = parser.parse_args() if not options.track_file: parser.error("Track file not given.") if not options.output_filename: parser.error("Output pointcloud filename not given.") R = const.R if options.test_case == 0: LOC = const.Region_0_LOC elif options.test_case == 1: LOC = const.Region_1_LOC elif options.test_case == 2: LOC = const.SF_LOC else: parser.error("Test case indexed %d not supported!" % options.test_case) tracks = gps_track.load_tracks(options.track_file) point_cloud = PointCloud(np.array([]), np.array([])) point_cloud.extract_point_cloud(tracks, LOC, R) point_cloud.visualize_point_cloud(LOC, R) point_cloud.save(options.output_filename)
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_test_tracks.py [input_track_file] [out_track_file]" return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.extract_tracks_by_region(input_tracks, sys.argv[2], (const.SF_small_RANGE_SW, const.SF_small_RANGE_NE)) # Visualization fig = plt.figure(figsize=(16,16)) ax = fig.add_subplot(111, aspect='equal') for track in output_tracks: ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-' ) plt.show()
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython track_converter.py [input_track_file] [output_directory_name]." return output_directory = sys.argv[2] output_directory = re.sub('\/$', '', output_directory) output_directory += '/' utm_projector = pyproj.Proj(proj='utm', zone=10, south=False, ellps='WGS84') tracks = gps_track.load_tracks(sys.argv[1]) count = 0 loc_id = 0 for track in tracks: output_filename = output_directory + "trip_%d.txt" % (count) f = open(output_filename, 'w') for pt_idx in range(0, len(track.utm)): pt = track.utm[pt_idx] lon, lat = utm_projector(pt[0], pt[1], inverse=True) time = pt[2] / 1e6 prev_id = 'None' next_id = 'None' cur_id = "%d" % loc_id if pt_idx > 0: prev_id = "%d" % (loc_id - 1) if pt_idx < len(track.utm) - 1: next_id = "%d" % (loc_id + 1) f.write("%s,%.6f,%.6f,%.1f,%s,%s\n" % (cur_id, lat, lon, time, prev_id, next_id)) loc_id += 1 f.close() count += 1
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython track_converter.py [input_track_file] [output_directory_name]." return output_directory = sys.argv[2] output_directory = re.sub('\/$', '', output_directory) output_directory += '/' utm_projector = pyproj.Proj(proj='utm', zone=10, south=False, ellps='WGS84') tracks = gps_track.load_tracks(sys.argv[1]) count = 0 loc_id = 0 for track in tracks: output_filename = output_directory + "trip_%d.txt"%(count) f = open(output_filename, 'w') for pt_idx in range(0, len(track.utm)): pt = track.utm[pt_idx] lon, lat = utm_projector(pt[0], pt[1], inverse=True) time = pt[2] / 1e6 prev_id = 'None' next_id = 'None' cur_id = "%d"%loc_id if pt_idx > 0: prev_id = "%d"%(loc_id - 1) if pt_idx < len(track.utm) - 1: next_id = "%d"%(loc_id + 1) f.write("%s,%.6f,%.6f,%.1f,%s,%s\n"%(cur_id, lat, lon, time, prev_id, next_id)) loc_id += 1 f.close() count += 1
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython road_detector.py [input_track.dat]" return tracks = gps_track.load_tracks(sys.argv[1]) tracks1 = gps_track.load_tracks(sys.argv[2]) tracks.extend(tracks1) RANGE_SW = (446000, 4421450) RANGE_NE = (451000, 4426450) point_collection = [] for track in tracks: for pt in track.utm: if pt[0] <= RANGE_NE[0] and pt[0] >= RANGE_SW[0]: if pt[1] <= RANGE_NE[1] and pt[1] >= RANGE_SW[1]: point_collection.append((pt[0], pt[1])) print "There are %d GPS points." % len(point_collection) qtree = scipy.spatial.KDTree(point_collection) print "Quad tree completed." training_loc = [] training_feature = [] count = 0 query_radius = [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500 ] while True: #rand_easting = random.randint(RANGE_SW[0]+500, RANGE_NE[0]-500) #rand_northing = random.randint(RANGE_SW[1]+500, RANGE_NE[1]-500) #query_loc = (rand_easting, rand_northing) while True: ind = random.randint(0, len(point_collection) - 1) query_loc = point_collection[ind] if query_loc[0] <= RANGE_NE[0] - 500 and query_loc[ 0] >= RANGE_SW[0] + 500: if query_loc[1] <= RANGE_NE[1] - 500 and query_loc[ 1] >= RANGE_SW[1] + 500: break training_loc.append(query_loc) print "Query location is: ", query_loc fig = plt.figure(figsize=(16, 16)) ax1 = fig.add_subplot(111, aspect='equal') ax1.plot([p[0] for p in point_collection], [p[1] for p in point_collection], '.', color='gray') ax1.plot(query_loc[0], query_loc[1], 'r+', markersize=12) ax1.set_xlim([RANGE_SW[0], RANGE_NE[0]]) ax1.set_ylim([RANGE_SW[1], RANGE_NE[1]]) hist_result = compute_hist(query_loc, query_radius, qtree) training_feature.append(list(hist_result)) out_filename = "tmp\\" + "%d" % count + ".png" plt.savefig(out_filename) plt.close(fig) count += 1 if count == 100: break with open("training_loc.dat", "wb") as fout: cPickle.dump(training_loc, fout, protocol=2) with open("training_feature.dat", "wb") as fout: cPickle.dump(training_feature, fout, protocol=2)
def main(): parser = OptionParser() parser.add_option("-s", "--sample_point_cloud", dest="sample_point_cloud", help="Input sample point cloud filename", metavar="SAMPLE_POINT_CLOUD", type="string") parser.add_option("-r", "--road_segment", dest="road_segment", help="Input road segment filename", metavar="ROAD_SEGMENT", type="string") parser.add_option("-t", "--track", dest="tracks", help="Input GPS track file", metavar="TRACK_FILE", type="string") parser.add_option( "--test_case", dest="test_case", type="int", help="Test cases: 0: region-0; 1: region-1; 2: SF-region.", default=0) (options, args) = parser.parse_args() if not options.sample_point_cloud: parser.error("Input sample_point_cloud filename not found!") if not options.road_segment: parser.error("Input road segment file not found!") if not options.tracks: parser.error("Input GPS Track file not specified.") R = const.R if options.test_case == 0: LOC = const.Region_0_LOC elif options.test_case == 1: LOC = const.Region_1_LOC elif options.test_case == 2: LOC = const.SF_LOC else: parser.error("Test case indexed %d not supported!" % options.test_case) with open(options.sample_point_cloud, 'rb') as fin: sample_point_cloud = cPickle.load(fin) with open(options.road_segment, 'rb') as fin: road_segments = cPickle.load(fin) tracks = gps_track.load_tracks(options.tracks) # Compute points on road segments sample_idx_on_roads = {} sample_pt_kdtree = spatial.cKDTree(sample_point_cloud.locations) SEARCH_RADIUS = 30.0 ANGLE_THRESHOLD = np.pi / 6.0 for seg_idx in range(0, len(road_segments)): segment = road_segments[seg_idx] sample_idx_on_roads[seg_idx] = set([]) start_pt = segment.center - segment.half_length * segment.direction end_pt = segment.center + segment.half_length * segment.direction n_pt_to_add = int(1.5 * segment.half_length / SEARCH_RADIUS + 0.5) px = np.linspace(start_pt[0], end_pt[0], n_pt_to_add) py = np.linspace(start_pt[1], end_pt[1], n_pt_to_add) nearby_sample_idxs = [] for i in range(0, n_pt_to_add): pt = np.array([px[i], py[i]]) tmp_idxs = sample_pt_kdtree.query_ball_point(pt, SEARCH_RADIUS) nearby_sample_idxs.extend(tmp_idxs) nearby_sample_idxs = set(nearby_sample_idxs) for sample_idx in nearby_sample_idxs: if np.dot(sample_point_cloud.directions[sample_idx], segment.direction) < np.cos(ANGLE_THRESHOLD): continue vec = sample_point_cloud.locations[sample_idx] - segment.center if abs(np.dot(vec, segment.norm_dir)) <= segment.half_width: sample_idx_on_roads[seg_idx].add(sample_idx) segment_graph_to_map(tracks, road_segments, sample_point_cloud, LOC, R) return all_road_patches = [] for selected_track_idx in range(0, 10): print selected_track_idx road_patches = generate_road_patch_from_track( tracks[selected_track_idx], road_segments, sample_point_cloud, sample_idx_on_roads) all_road_patches.extend(road_patches) arrow_params = const.arrow_params fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') #ax.plot(sample_point_cloud.locations[:,0], # sample_point_cloud.locations[:,1], # '.', color='gray') count = 0 for road_patch in all_road_patches: color = const.colors[count % 7] count += 1 polygon = road_patch.road_polygon() for i in np.arange(len(road_patch.center_line) - 1): if np.linalg.norm(road_patch.directions[i]) < 0.1: continue ax.arrow(road_patch.center_line[i, 0], road_patch.center_line[i, 1], 10 * road_patch.directions[i, 0], 10 * road_patch.directions[i, 1], width=0.5, head_width=4, fc=color, ec=color, head_length=6, overhang=0.5, **arrow_params) patch = PolygonPatch(polygon, facecolor=color, edgecolor=color, alpha=0.5, zorder=0) ax.add_patch(patch) ax.set_xlim([LOC[0] - R, LOC[0] + R]) ax.set_ylim([LOC[1] - R, LOC[1] + R]) plt.show() return track_on_road = project_tracks_to_road(tracks, road_segments) compute_segment_graph = False if compute_segment_graph: segment_graph = track_induced_segment_graph(tracks, road_segments) nx.write_gpickle(segment_graph, "test_segment_graph.gpickle") else: segment_graph = nx.read_gpickle("test_segment_graph.gpickle") max_node_count = -np.inf max_node = -1 for node in segment_graph.nodes(): out_edges = segment_graph.out_edges(node) sum_val = 0.0 for edge in out_edges: sum_val += segment_graph[edge[0]][edge[1]]['count'] if sum_val > max_node_count: max_node_count = sum_val max_node = node print "Totally %d edges." % (len(segment_graph.edges())) #segment_graph = generate_segment_graph(road_segments) arrow_params = const.arrow_params fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') ax.plot(sample_point_cloud.locations[:, 0], sample_point_cloud.locations[:, 1], '.', color='gray') #ax.plot([pt[0] for pt in selected_track.utm], # [pt[1] for pt in selected_track.utm], 'r.-') #for track_idx in track_on_road[selected_seg_idx]: # ax.plot([pt[0] for pt in tracks[track_idx].utm], # [pt[1] for pt in tracks[track_idx].utm], '.') segment = road_segments[max_node] p0 = segment.center - segment.half_length * segment.direction + segment.half_width * segment.norm_dir p1 = segment.center + segment.half_length * segment.direction + segment.half_width * segment.norm_dir p2 = segment.center + segment.half_length * segment.direction - segment.half_width * segment.norm_dir p3 = segment.center - segment.half_length * segment.direction - segment.half_width * segment.norm_dir ax.plot([p0[0], p1[0]], [p0[1], p1[1]], 'r-') ax.plot([p1[0], p2[0]], [p1[1], p2[1]], 'r-') ax.plot([p2[0], p3[0]], [p2[1], p3[1]], 'r-') ax.plot([p3[0], p0[0]], [p3[1], p0[1]], 'r-') arrow_p0 = segment.center - segment.half_length * segment.direction ax.arrow(arrow_p0[0], arrow_p0[1], 2 * segment.half_length * segment.direction[0], 2 * segment.half_length * segment.direction[1], width=4, head_width=20, fc='r', ec='r', head_length=40, overhang=0.5, **arrow_params) for seg_idx in segment_graph.successors(max_node): segment = road_segments[seg_idx] p0 = segment.center - segment.half_length * segment.direction + segment.half_width * segment.norm_dir p1 = segment.center + segment.half_length * segment.direction + segment.half_width * segment.norm_dir p2 = segment.center + segment.half_length * segment.direction - segment.half_width * segment.norm_dir p3 = segment.center - segment.half_length * segment.direction - segment.half_width * segment.norm_dir ax.plot([p0[0], p1[0]], [p0[1], p1[1]], 'b-') ax.plot([p1[0], p2[0]], [p1[1], p2[1]], 'b-') ax.plot([p2[0], p3[0]], [p2[1], p3[1]], 'b-') ax.plot([p3[0], p0[0]], [p3[1], p0[1]], 'b-') arrow_p0 = segment.center - segment.half_length * segment.direction ax.arrow(arrow_p0[0], arrow_p0[1], 2 * segment.half_length * segment.direction[0], 2 * segment.half_length * segment.direction[1], width=4, head_width=20, fc='b', ec='b', head_length=40, overhang=0.5, **arrow_params) #for i in np.arange(len(nearby_seg_idxs)): # seg_idx = nearby_seg_idxs[i] # segment = road_segments[seg_idx] # arrow_p0 = segment.center - segment.half_length*segment.direction # color = const.colors[i%7] # #if segment_graph[selected_seg_idx][seg_idx]['weight'] == -1.0: # # color = 'g' # ax.arrow(arrow_p0[0], # arrow_p0[1], # 2*segment.half_length*segment.direction[0], # 2*segment.half_length*segment.direction[1], # width=2, head_width=10, fc=color, ec=color, # head_length=20, overhang=0.5, **arrow_params) ax.set_xlim([LOC[0] - R, LOC[0] + R]) ax.set_ylim([LOC[1] - R, LOC[1] + R]) plt.show() return
def main(): tracks = gps_track.load_tracks(sys.argv[1]) #gps_track.visualize_tracks(tracks, style='k.') #return window_size = 250 window_center = (447240, 4424780) window_SW = (window_center[0] - window_size, window_center[1] - window_size) window_NE = (window_center[0] + window_size, window_center[1] + window_size) # Extract GPS points in window GPS_points = [] point_idxs = [] for track_idx in range(0, len(tracks)): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): pt = (track.utm[pt_idx][0], track.utm[pt_idx][1]) if pt[0]>=window_SW[0] and pt[0]<=window_NE[0] and \ pt[1]>=window_SW[1] and pt[1]<=window_NE[1]: GPS_points.append(pt) point_idxs.append((track_idx, pt_idx)) print "In total %d points" % len(GPS_points) GPS_points = np.array(GPS_points) n_points = len(GPS_points) # Compute similarity matrix L = np.zeros((n_points, n_points)) in_track_bonus = 100 dist_sigma = 20 # in meters for i in range(0, n_points): L[i, i] = 1.0 track_i_idxs = point_idxs[i] for j in range(0, n_points): if i == j: L[i, i] = 1.0 track_j_idxs = point_idxs[j] bonus = 1.0 if track_i_idxs[0] == track_j_idxs[0]: if track_j_idxs[1] - track_i_idxs[1] <= 1: bonus *= in_track_bonus dist = np.linalg.norm(GPS_points[i] - GPS_points[j]) / bonus L[i, j] = np.exp(-1 * dist * dist / 2.0 / dist_sigma / dist_sigma) if L[i, j] < 0.01: L[i, j] = 0.0 M = np.array(L, copy=True) for i in range(0, L.shape[0]): M[i, :] /= sum(M[i, :]) # Synthetic dataset #n_points = 1000 #synthetic_points = np.zeros((n_points,2)) #d = 5 #for i in range(0, int(0.4*n_points)): # theta = np.random.rand()*2*np.pi # r = np.random.rand() # synthetic_points[i,0] = -1.0*d + r*np.cos(theta) # synthetic_points[i,1] = r*np.sin(theta) #for i in range(int(0.4*n_points), int(0.8*n_points)): # theta = np.random.rand()*2*np.pi # r = np.random.rand() # synthetic_points[i,0] = d + r*np.cos(theta) # synthetic_points[i,1] = r*np.sin(theta) #for i in range(int(0.8*n_points), n_points): # synthetic_points[i,0] = 2*d*(np.random.rand()-0.5) # synthetic_points[i,1] = 0.2*(np.random.rand()-0.5) #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #ax.plot(synthetic_points[:,0], synthetic_points[:,1], '.') #plt.show() #return #L = np.zeros((n_points,n_points)) #dist_sigma = 2 # in meters #for i in range(0, n_points): # L[i,i] = 1.0 # for j in range(i+1, n_points): # dist = np.linalg.norm(synthetic_points[i,:]-synthetic_points[j,:]) # L[i,j] = np.exp(-1*dist*dist/2.0/dist_sigma/dist_sigma) # if L[i,j] < 0.1: # L[i,j] = 0 # L[j,i] = L[i,j] #M = np.array(L, copy=True) #for i in range(0, L.shape[0]): # M[i,:] /= sum(M[i,:]) # Compute Eigen Vectors of M print "Start eigen." S = np.array(M, copy=True) eigs, v = np.linalg.eig(S) print "test = ", np.linalg.norm(np.dot(M, v[:, 1]) - eigs[1] * v[:, 1]) sorted_idxs = np.argsort(eigs)[::-1] s = 1 index = sorted_idxs[s] #test = np.dot(M, v[:,index]) - eigs[index]*v[:,index] #print "test norm = ", np.amax(test) # Compute New embedding k = 200 t = 10 Y = np.zeros((n_points, k)) print "New projection." for i in range(0, n_points): for s in range(0, k): Y[i, s] = (eigs[sorted_idxs[s]]**t) * v[i, sorted_idxs[s]] #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #ax.plot(Y[:,0], Y[:,1], 'b.') #plt.show() # Clustering print "Start clustering." kmeans = KMeans(init='k-means++', n_clusters=8, n_init=10) kmeans.fit(Y) labels = kmeans.labels_ cluster_centers = kmeans.cluster_centers_ unique_labels = np.unique(labels) print unique_labels #db = DBSCAN(eps=0.5, min_samples=10).fit(Y) #core_samples = db.core_sample_indices_ #labels = db.labels_ #n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) #print "Estimated number of clusters: %d"%n_clusters_ #unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels))) fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') for k, col in zip(unique_labels, colors): my_members = labels == k ax.plot(GPS_points[my_members, 0], GPS_points[my_members, 1], '.', color=col, markersize=10) plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) window_size = 250.0 window_center = (447217, 4424780) window_SW = (window_center[0]-window_size, window_center[1]-window_size) window_NE = (window_center[0]+window_size, window_center[1]+window_size) #gps_track.visualize_tracks(tracks, bound_box=[window_SW, window_NE], style='k.') gps_track.visualize_tracks(tracks, style='k.') return # Extract GPS points in window GPS_points = [] point_idxs = [] for track_idx in range(0, len(tracks)): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): pt = (track.utm[pt_idx][0], track.utm[pt_idx][1]) if pt[0]>=window_SW[0] and pt[0]<=window_NE[0] and \ pt[1]>=window_SW[1] and pt[1]<=window_NE[1]: new_pt = (pt[0]-window_center[0], pt[1]-window_center[1]) GPS_points.append(new_pt) point_idxs.append((track_idx, pt_idx)) print "In total %d points"%len(GPS_points) GPS_points = np.array(GPS_points) t = 5.0 h = 10.0 sigma = 10.0 deformed_points = [] for pt in GPS_points: r_sum = pt[0]**2 + pt[1]**2 ratio = np.exp(-1.0*r_sum/2.0/sigma/sigma) new_e = pt[0]*(1 + t*ratio) new_n = pt[1]*(1 + t*ratio) new_z = h*ratio deformed_points.append((new_e, new_n, new_z)) deformed_points = np.array(deformed_points) N = 100 x = np.linspace(-window_size, window_size, N) y = np.linspace(-window_size, window_size, N) #xx, yy = np.meshgrid(x, y) xx = np.zeros((N,N)) yy = np.zeros((N,N)) zz = np.zeros((N,N)) for i in np.arange(N): for j in np.arange(N): r_sum = x[i]**2 + y[j]**2 ratio = np.exp(-1*r_sum/2/sigma/sigma) xx[i,j] = x[i]*(1 + t*ratio) yy[i,j] = y[j]*(1 + t*ratio) zz[i,j] = h*ratio - 0.3 fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, projection='3d') ax.plot_wireframe(xx, yy, zz, color='gray') ax.scatter(deformed_points[:,0], deformed_points[:,1], deformed_points[:,2], 'r') plt.show() return fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, projection='3d') N = 50 x = np.linspace(-5.0, 5.0, N) y = np.linspace(-5.0, 5.0, N) #xx, yy = np.meshgrid(x, y) xx = np.zeros((N,N)) yy = np.zeros((N,N)) zz = np.zeros((N,N)) sigma = 1.0 h = 5.0 t = 5.0 for i in np.arange(N): for j in np.arange(N): r_sum = x[i]**2 + y[j]**2 ratio = np.exp(-1*r_sum/2/sigma/sigma) xx[i,j] = x[i]*(1 + t*ratio) yy[i,j] = y[j]*(1 + t*ratio) zz[i,j] = h*ratio ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False) plt.show()
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython road_detector.py [input_track.dat]" return tracks = gps_track.load_tracks(sys.argv[1]) tracks1 = gps_track.load_tracks(sys.argv[2]) tracks.extend(tracks1) RANGE_SW = (446000, 4421450) RANGE_NE = (451000, 4426450) point_collection = [] for track in tracks: for pt in track.utm: if pt[0] <= RANGE_NE[0] and pt[0] >= RANGE_SW[0]: if pt[1] <= RANGE_NE[1] and pt[1] >= RANGE_SW[1]: point_collection.append((pt[0], pt[1])) print "There are %d GPS points."%len(point_collection) qtree = scipy.spatial.KDTree(point_collection) print "Quad tree completed." training_loc = [] training_feature = [] count = 0 query_radius = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500] while True: #rand_easting = random.randint(RANGE_SW[0]+500, RANGE_NE[0]-500) #rand_northing = random.randint(RANGE_SW[1]+500, RANGE_NE[1]-500) #query_loc = (rand_easting, rand_northing) while True: ind = random.randint(0, len(point_collection)-1) query_loc = point_collection[ind] if query_loc[0] <= RANGE_NE[0]-500 and query_loc[0] >= RANGE_SW[0]+500: if query_loc[1] <= RANGE_NE[1]-500 and query_loc[1] >= RANGE_SW[1]+500: break training_loc.append(query_loc) print "Query location is: ", query_loc fig = plt.figure(figsize=(16,16)) ax1 = fig.add_subplot(111, aspect='equal') ax1.plot([p[0] for p in point_collection], [p[1] for p in point_collection], '.', color='gray') ax1.plot(query_loc[0], query_loc[1], 'r+', markersize=12) ax1.set_xlim([RANGE_SW[0], RANGE_NE[0]]) ax1.set_ylim([RANGE_SW[1], RANGE_NE[1]]) hist_result = compute_hist(query_loc, query_radius, qtree) training_feature.append(list(hist_result)) out_filename = "tmp\\"+"%d"%count + ".png" plt.savefig(out_filename) plt.close(fig) count += 1 if count == 100: break with open("training_loc.dat", "wb") as fout: cPickle.dump(training_loc, fout, protocol=2) with open("training_feature.dat", "wb") as fout: cPickle.dump(training_feature, fout, protocol=2)
def main(): parser = OptionParser() parser.add_option("-t", "--tracks", dest="track_data", help="Input GPS track filename.", type="string", metavar="Tracks") parser.add_option("-o", "--output_dir", dest="output_dir", help="Output directory.", type="string", metavar="Output_dir") parser.add_option( "-m", "--mode", dest="output_mode", type="int", help= "Output mode: 0: default output mode, which agrees with mapconstructionportal.org trip format; 1: output agrees with James2012 data format.", default=0) (options, args) = parser.parse_args() if not options.track_data: parser.error("No input track data file not found!") if not options.output_dir: parser.error("Output directory is not specified!") if not os.path.exists(options.output_dir): parser.error( "Output directory does not exist! Please create it first!") if options.output_mode != 0 and options.output_mode != 1: parser.error("Unsupported output mode. (Output mode has to be 0 or 1)") if options.output_mode == 1: print "WARNING: please check if you are using the correct utm_projector." output_directory = re.sub('\/$', '', options.output_dir) output_directory += "/" tracks = gps_track.load_tracks(options.track_data) # Write to file for i in range(0, len(tracks)): output_filename = output_directory + "trip_%d.txt" % i track = tracks[i] if len(track.utm) < 2: continue if options.output_mode == 0: with open(output_filename, "w") as fout: for utm in track.utm: utm_time = utm[2] / 1e6 fout.write("%.2f %.2f %.2f\n" % (utm[0], utm[1], utm_time)) else: with open(output_filename, "w") as fout: pt_id = 0 for utm in track.utm: lon, lat = const.SF_utm_projector(utm[0], utm[1], inverse=True) utm_time = utm[2] / 1e6 if pt_id == 0: fout.write("%d,%.6f,%.6f,%.1f,None,%d\n" % (pt_id, lat, lon, utm_time, pt_id + 1)) elif pt_id < len(track.utm) - 1: fout.write( "%d,%.6f,%.6f,%.1f,%d,%d\n" % (pt_id, lat, lon, utm_time, pt_id - 1, pt_id + 1)) else: fout.write("%d,%.6f,%.6f,%.1f,%d,None\n" % (pt_id, lat, lon, utm_time, pt_id - 1)) pt_id += 1
def main(): if len(sys.argv) != 2: print "Error! Correct usage is:" print "\tpython ordered_statistics.py [input_tracks]" return tracks = gps_track.load_tracks(sys.argv[1]) GRID_SIZE = 1000 MAX_ORDER = 6 TIME_STEP = 30 # Zero-th order rastered_samples = np.zeros((GRID_SIZE, GRID_SIZE)) for track in tracks: for pt in track.utm: (i,j) = point_to_ij(pt, (const.RANGE_SW, const.RANGE_NE), GRID_SIZE) rastered_samples[i,j] += 1 display_rastered_samples = np.log10(rastered_samples) feature = [] for order in range(1, MAX_ORDER+1): print "Now in order ", order TIME_THRES_HIGH = TIME_STEP * order TIME_THRES_LOW = TIME_STEP * (order - 1) this_order_feature = {} for track in tracks: for pt_ind in range(0, len(track.utm)): nxt_pt_ind = pt_ind + 1 while nxt_pt_ind < len(track.utm): # Check time diff time_diff = track.utm[nxt_pt_ind][2] - track.utm[pt_ind][2] if time_diff <= TIME_THRES_HIGH: if time_diff >= TIME_THRES_LOW: (from_i, from_j) = point_to_ij(track.utm[pt_ind], (const.RANGE_SW, const.RANGE_NE), GRID_SIZE) (to_i, to_j) = point_to_ij(track.utm[nxt_pt_ind], (const.RANGE_SW, const.RANGE_NE), GRID_SIZE) key = "%d,%d,%d,%d"%(from_i, from_j, to_i, to_j) if this_order_feature.has_key(key): this_order_feature[key] += 1 else: this_order_feature[key] = 1 else: break nxt_pt_ind += 1 feature.append(this_order_feature) print "Calculation completed." # Pick a point WINDOW_SIZE = 40 while True: ind_i = random.randint(WINDOW_SIZE, GRID_SIZE-WINDOW_SIZE) ind_j = random.randint(WINDOW_SIZE, GRID_SIZE-WINDOW_SIZE) if rastered_samples[ind_i, ind_j] >= 20: break point_feature = [] key_prefix = "%d,%d,"%(ind_i, ind_j) key_postfix = ",%d,%d"%(ind_i, ind_j) for order in range(0, MAX_ORDER): f = feature[order] this_point_feature = np.zeros((2*WINDOW_SIZE+1, 2*WINDOW_SIZE+1)) for key in f.keys(): if key.startswith(key_prefix): ind_str = key.split(',') to_i = int(ind_str[2]) - ind_i + WINDOW_SIZE to_j = int(ind_str[3]) - ind_j + WINDOW_SIZE if to_i == WINDOW_SIZE and to_j == WINDOW_SIZE: continue if to_i >=0 and to_i < 2*WINDOW_SIZE+1 and\ to_j >= 0 and to_j < 2*WINDOW_SIZE+1: this_point_feature[to_i, to_j] += 1 if key.endswith(key_postfix): ind_str = key.split(',') from_i = int(ind_str[2]) - ind_i + WINDOW_SIZE from_j = int(ind_str[3]) - ind_j + WINDOW_SIZE if from_i == WINDOW_SIZE and from_j == WINDOW_SIZE: continue if from_i >=0 and from_i < 2*WINDOW_SIZE+1 and\ from_j >= 0 and from_j < 2*WINDOW_SIZE+1: this_point_feature[from_i, from_j] += 1 point_feature.append(this_point_feature) # Visualization # fig = plt.figure() # ax = fig.add_subplot(231, aspect='equal') # ax.imshow(display_rastered_samples) # ax.plot(ind_j, ind_i, 'r+', markersize=12) # ax.set_xlim([0,GRID_SIZE]) # ax.set_ylim([0,GRID_SIZE]) # # ax.set_xlim([ind_j-WINDOW_SIZE,ind_j+WINDOW_SIZE]) # # ax.set_ylim([ind_i-WINDOW_SIZE,ind_i+WINDOW_SIZE]) # # ax = fig.add_subplot(232, aspect='equal') # ax.imshow(point_feature[0]) # ax.set_xlim([0,2*WINDOW_SIZE]) # ax.set_ylim([0,2*WINDOW_SIZE]) # # ax = fig.add_subplot(233, aspect='equal') # ax.imshow(point_feature[1]) # ax.set_xlim([0,2*WINDOW_SIZE]) # ax.set_ylim([0,2*WINDOW_SIZE]) # # ax = fig.add_subplot(234, aspect='equal') # ax.imshow(point_feature[2]) # ax.set_xlim([0,2*WINDOW_SIZE]) # ax.set_ylim([0,2*WINDOW_SIZE]) # # ax = fig.add_subplot(235, aspect='equal') # ax.imshow(point_feature[3]) # ax.set_xlim([0,2*WINDOW_SIZE]) # ax.set_ylim([0,2*WINDOW_SIZE]) # # ax = fig.add_subplot(236, aspect='equal') # ax.imshow(point_feature[4]) # ax.set_xlim([0,2*WINDOW_SIZE]) # ax.set_ylim([0,2*WINDOW_SIZE]) # Visualization fig = plt.figure(figsize=(32,16)) ax = fig.add_subplot(231, aspect='equal') ax.imshow(display_rastered_samples) ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) PLOT_THRES = 2 ax = fig.add_subplot(232, aspect='equal') count = 0 x_ind = [] y_ind = [] for key in feature[0]: if feature[0][key] < PLOT_THRES: continue count += 1 # if count == 1000: # break ind_str = key.split(',') from_i = int(ind_str[0]) from_j = int(ind_str[1]) to_i = int(ind_str[2]) to_j = int(ind_str[3]) x_ind.append(from_i) x_ind.append(to_i) x_ind.append(None) y_ind.append(from_j) y_ind.append(to_j) y_ind.append(None) ax.plot(y_ind, x_ind, 'r-') ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) print "order 0, count = ",count ax = fig.add_subplot(233, aspect='equal') ax.imshow(rastered_samples, cmap=CM.gray_r) x_ind = [] y_ind = [] count = 0 for key in feature[1]: if feature[1][key] < PLOT_THRES: continue count += 1 ind_str = key.split(',') from_i = int(ind_str[0]) from_j = int(ind_str[1]) to_i = int(ind_str[2]) to_j = int(ind_str[3]) x_ind.append(from_i) x_ind.append(to_i) x_ind.append(None) y_ind.append(from_j) y_ind.append(to_j) y_ind.append(None) ax.plot(y_ind, x_ind, 'r-') ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) print "order 1, count = ",count ax = fig.add_subplot(234, aspect='equal') ax.imshow(rastered_samples, cmap=CM.gray_r) x_ind = [] y_ind = [] count = 0 for key in feature[2]: if feature[2][key] < PLOT_THRES: continue count += 1 ind_str = key.split(',') from_i = int(ind_str[0]) from_j = int(ind_str[1]) to_i = int(ind_str[2]) to_j = int(ind_str[3]) x_ind.append(from_i) x_ind.append(to_i) x_ind.append(None) y_ind.append(from_j) y_ind.append(to_j) y_ind.append(None) ax.plot(y_ind, x_ind, 'r-') ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) print "order 2, count = ",count ax = fig.add_subplot(235, aspect='equal') ax.imshow(rastered_samples, cmap=CM.gray_r) x_ind = [] y_ind = [] count = 0 for key in feature[3]: if feature[3][key] < PLOT_THRES: continue count += 1 ind_str = key.split(',') from_i = int(ind_str[0]) from_j = int(ind_str[1]) to_i = int(ind_str[2]) to_j = int(ind_str[3]) x_ind.append(from_i) x_ind.append(to_i) x_ind.append(None) y_ind.append(from_j) y_ind.append(to_j) y_ind.append(None) ax.plot(y_ind, x_ind, 'r-') ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) print "order 3, count = ",count ax = fig.add_subplot(236, aspect='equal') ax.imshow(rastered_samples, cmap=CM.gray_r) x_ind = [] y_ind = [] count = 0 for key in feature[4]: if feature[4][key] < PLOT_THRES: continue count += 1 ind_str = key.split(',') from_i = int(ind_str[0]) from_j = int(ind_str[1]) to_i = int(ind_str[2]) to_j = int(ind_str[3]) x_ind.append(from_i) x_ind.append(to_i) x_ind.append(None) y_ind.append(from_j) y_ind.append(to_j) y_ind.append(None) ax.plot(y_ind, x_ind, 'r-') ax.set_xlim([0,GRID_SIZE]) ax.set_ylim([0,GRID_SIZE]) print "order 4, count = ",count plt.show()
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_tracks_in_region.py [input_directory] [out_track_file]" return # Index for the test region, 0..9 index = 4 BBOX_SW = const.BB_SW[index] BBOX_NE = const.BB_NE[index] input_directory = re.sub('\/$', '', sys.argv[1]) input_directory += '/' files = glob.glob(input_directory + '*.dat') if len(files) == 0: print "Error! Empty input directory: %s" % input_directory extracted_tracks = [] count = 0 MIN_PT_COUNT = 4 for filename in files: print "Now processing ", filename input_tracks = gps_track.load_tracks(filename) for track in input_tracks: # Iterate over its point is_recording = False recorded_track = gps_track.Track() for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box if track.utm[pt_idx][0] >= BBOX_SW[0] and \ track.utm[pt_idx][0] <= BBOX_NE[0] and \ track.utm[pt_idx][1] >= BBOX_SW[1] and \ track.utm[pt_idx][1] <= BBOX_NE[1]: if not is_recording: # Start recording is_recording = True recorded_track.car_id = track.car_id if pt_idx > 0: recorded_track.add_point(track.utm[pt_idx - 1]) recorded_track.add_point(track.utm[pt_idx]) else: # Append point recorded_track.add_point(track.utm[pt_idx]) else: # Point is outside the bounding box if is_recording: # Stop recording is_recording = False recorded_track.add_point(track.utm[pt_idx]) if len(recorded_track.utm) >= MIN_PT_COUNT: # Save the recorded track extracted_tracks.append(recorded_track) recorded_track = gps_track.Track() count += 1 if count == 4: break # Visualize extracted GPS tracks print "%d tracks extracted" % len(extracted_tracks) gps_track.visualize_tracks(extracted_tracks, bound_box=[BBOX_SW, BBOX_NE], style='.') gps_track.save_tracks(extracted_tracks, sys.argv[2]) return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.extract_tracks_by_region( input_tracks, sys.argv[2], (const.SF_small_RANGE_SW, const.SF_small_RANGE_NE)) # Visualization fig = plt.figure(figsize=(16, 16)) ax = fig.add_subplot(111, aspect='equal') for track in output_tracks: ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-') plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) # Write 3D for Yangyan: skeleton #delta_easting = const.RANGE_NE[0] - const.RANGE_SW[0] #delta_northing = const.RANGE_NE[1] - const.RANGE_SW[1] #f = open("test_region_3D.txt", "w") #for track in tracks: # for pt in track.utm: # #if pt[0]<=const.RANGE_SW[0]+3000 and pt[0]>=const.RANGE_SW[0]+2000: # # if pt[1]<=const.RANGE_SW[1]+3000 and pt[1]>=const.RANGE_SW[1]+2000: # pe = (pt[0] - const.RANGE_SW[0]) / delta_easting * 10 # pn = (pt[1] - const.RANGE_SW[1]) / delta_northing * 10 # f.write("%.6f %.6f %.6f\n"%(pe,pn,0.01*np.random.rand())) #f.close() #return N_ROW = 1000 # Divide northing N_COL = 1000 # Divide southing """ test case index: 0: Beijing 1: SF small """ test_case = 0 if test_case == 0: rasterized_tracks, point_count_array, track_indexing_hash =\ rasterize_tracks(tracks, [const.RANGE_SW, const.RANGE_NE], (N_ROW, N_COL)) else: rasterized_tracks, point_count_array, track_indexing_hash =\ rasterize_tracks(tracks, [const.SF_small_RANGE_SW, const.SF_small_RANGE_NE], (N_ROW, N_COL)) dense_array = np.array(point_count_array.todense()) lines = probabilistic_hough_line(dense_array,line_length=50) print len(lines) fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') ax.imshow(dense_array>0, cmap='gray') for line in lines: ax.plot([line[0][0],line[1][0]], [line[0][1],line[1][1]],'-r') ax.set_xlim([0, N_ROW]) ax.set_ylim([N_COL, 0]) plt.show() return intersections = [] angle_threshold = 0.71 for line_i in range(0, len(lines)): for line_j in range(line_i+1, len(lines)): line1 = lines[line_i] line2 = lines[line_j] vec1 = 1.0*np.array([line1[1][0]-line1[0][0], line1[1][1]-line1[0][1]]) vec2 = 1.0*np.array([line2[1][0]-line2[0][0], line2[1][1]-line2[0][1]]) vec1 /= np.linalg.norm(vec1) vec2 /= np.linalg.norm(vec2) if abs(np.dot(vec1, vec2)) < angle_threshold: pc = line_segment_intersection(line1, line2) if pc[0] != np.inf: intersections.append(pc) new_img = np.zeros((N_ROW, N_COL)) for itr in intersections: new_img[itr[0],itr[1]] += 1 peaks = corner_peaks(new_img, min_distance=20) ax = fig.add_subplot(122, aspect='equal') ax.imshow(dense_array>0, cmap='gray') for peak in peaks: ax.plot(peak[0], peak[1], '.r', markersize=12) ax.set_xlim([0, N_ROW]) ax.set_ylim([N_COL, 0]) plt.show() return window_size = 40 i = 0 img_data = [] hog_features = [] hog_imgs = [] for peak in peaks: if peak[0]-window_size<0 or peak[0]+window_size>N_ROW or\ peak[1]-window_size<0 or peak[1]+window_size>N_COL: continue fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') ax.imshow(dense_array>0, cmap='gray') ax.plot(peak[0], peak[1], 'rx', markersize=12) ax.set_xlim([peak[0]-window_size, peak[0]+window_size]) ax.set_ylim([peak[1]+window_size, peak[1]-window_size]) plt.savefig("test_fig/fig_%d.png"%i) plt.close() new_img = dense_array[(peak[1]-window_size):(peak[1]+window_size), \ (peak[0]-window_size):(peak[0]+window_size)] img_data.append(new_img) hog_array, hog_image = hog(np.array(new_img>0), pixels_per_cell=(4, 4), visualise=True) hog_features.append(hog_array) hog_imgs.append(hog_image) i += 1 with open("test_fig/img_data.dat", "wb") as fout: cPickle.dump(img_data, fout, protocol=2 ) with open("test_fig/hog_features.dat", 'wb') as fout: cPickle.dump(hog_features, fout, protocol=2) with open("test_fig/hog_imgs.dat", 'wb') as fout: cPickle.dump(hog_imgs, fout, protocol=2) return colors = cycle('bgrcmybgrcmybgrcmykbgrcmy') #ax.imshow(dense_array>0, cmap='gray') window_size = 80 chosen_ij = (752, 814) #chosen_ij = (370, 408) search_range = 4 G = nx.Graph() active_track_idxs = {} for i in range(chosen_ij[1]-window_size, chosen_ij[1]+window_size): for j in range(chosen_ij[0]-window_size, chosen_ij[0]+window_size): if dense_array[i,j] > 0: # Add graph nodes G.add_node((i,j)) # Add edge to nearby nodes for k in range(i-search_range, i+search_range+1): if k < chosen_ij[1]-window_size or k >= chosen_ij[1]+window_size: continue for l in range(j-search_range, j+search_range+1): if l < chosen_ij[0]-window_size or l >= chosen_ij[0]+window_size: continue if dense_array[k,l] > 0: G.add_edge((i,j),(k,l),{'w':1.0}) for idx in track_indexing_hash[(i,j)].keys(): active_track_idxs[idx] = 1 ax.set_xlim([chosen_ij[0]-window_size, chosen_ij[0]+window_size]) ax.set_ylim([chosen_ij[1]-window_size, chosen_ij[1]+window_size]) # Iterate over active tracks to add constraints to_break = False count = 0 preserved_edges = {} for idx in active_track_idxs.keys(): # Iterate over it's nodes for loc_idx in range(0, len(rasterized_tracks[idx])-1): cur_loc = rasterized_tracks[idx][loc_idx] nxt_loc = rasterized_tracks[idx][loc_idx+1] if abs(cur_loc[0]-nxt_loc[0])+abs(cur_loc[1]-nxt_loc[1])<2*search_range: continue cur_loc_ok = False nxt_loc_ok = False if cur_loc[0]>=chosen_ij[1]-window_size and cur_loc[0]<chosen_ij[1]+window_size: if cur_loc[1]>=chosen_ij[0]-window_size and\ cur_loc[1]<chosen_ij[0]+window_size: cur_loc_ok = True if nxt_loc[0]>=chosen_ij[1]-window_size and nxt_loc[0]<chosen_ij[1]+window_size: if nxt_loc[1]>=chosen_ij[0]-window_size and\ nxt_loc[1]<chosen_ij[0]+window_size: nxt_loc_ok = True if cur_loc_ok and nxt_loc_ok: can_be_connected = nx.algorithms.has_path(G, cur_loc, nxt_loc) if can_be_connected: count += 1 path = nx.shortest_path(G, source=cur_loc, target=nxt_loc, weight='w') # Record edge for node_idx in range(0,len(path)-1): edge = (path[node_idx],path[node_idx+1]) preserved_edges[edge] = 1 # Reduce edge weight G[path[node_idx]][path[node_idx+1]]['w'] /= 1.05 #ax.plot([cur_loc[1],nxt_loc[1]], # [cur_loc[0],nxt_loc[0]], # 'rx-') if to_break: break print "Count = ",count for edge in preserved_edges.keys(): ax.plot([edge[0][1],edge[1][1]], [edge[0][0],edge[1][0]],'-r') plt.show() return chosen_i = np.random.randint(window_size, N_ROW-window_size) chosen_j = np.random.randint(window_size, N_COL-window_size) test_image = np.array(dense_array[chosen_ij[1]-window_size:chosen_ij[1]+window_size,\ chosen_ij[0]-window_size:chosen_ij[0]+window_size] > 0) fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') ax.imshow(test_image>0, cmap='gray') plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) window_size = 250.0 window_center = (447217, 4424780) window_SW = (window_center[0] - window_size, window_center[1] - window_size) window_NE = (window_center[0] + window_size, window_center[1] + window_size) #gps_track.visualize_tracks(tracks, bound_box=[window_SW, window_NE], style='k.') gps_track.visualize_tracks(tracks, style='k.') return # Extract GPS points in window GPS_points = [] point_idxs = [] for track_idx in range(0, len(tracks)): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): pt = (track.utm[pt_idx][0], track.utm[pt_idx][1]) if pt[0]>=window_SW[0] and pt[0]<=window_NE[0] and \ pt[1]>=window_SW[1] and pt[1]<=window_NE[1]: new_pt = (pt[0] - window_center[0], pt[1] - window_center[1]) GPS_points.append(new_pt) point_idxs.append((track_idx, pt_idx)) print "In total %d points" % len(GPS_points) GPS_points = np.array(GPS_points) t = 5.0 h = 10.0 sigma = 10.0 deformed_points = [] for pt in GPS_points: r_sum = pt[0]**2 + pt[1]**2 ratio = np.exp(-1.0 * r_sum / 2.0 / sigma / sigma) new_e = pt[0] * (1 + t * ratio) new_n = pt[1] * (1 + t * ratio) new_z = h * ratio deformed_points.append((new_e, new_n, new_z)) deformed_points = np.array(deformed_points) N = 100 x = np.linspace(-window_size, window_size, N) y = np.linspace(-window_size, window_size, N) #xx, yy = np.meshgrid(x, y) xx = np.zeros((N, N)) yy = np.zeros((N, N)) zz = np.zeros((N, N)) for i in np.arange(N): for j in np.arange(N): r_sum = x[i]**2 + y[j]**2 ratio = np.exp(-1 * r_sum / 2 / sigma / sigma) xx[i, j] = x[i] * (1 + t * ratio) yy[i, j] = y[j] * (1 + t * ratio) zz[i, j] = h * ratio - 0.3 fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, projection='3d') ax.plot_wireframe(xx, yy, zz, color='gray') ax.scatter(deformed_points[:, 0], deformed_points[:, 1], deformed_points[:, 2], 'r') plt.show() return fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, projection='3d') N = 50 x = np.linspace(-5.0, 5.0, N) y = np.linspace(-5.0, 5.0, N) #xx, yy = np.meshgrid(x, y) xx = np.zeros((N, N)) yy = np.zeros((N, N)) zz = np.zeros((N, N)) sigma = 1.0 h = 5.0 t = 5.0 for i in np.arange(N): for j in np.arange(N): r_sum = x[i]**2 + y[j]**2 ratio = np.exp(-1 * r_sum / 2 / sigma / sigma) xx[i, j] = x[i] * (1 + t * ratio) yy[i, j] = y[j] * (1 + t * ratio) zz[i, j] = h * ratio ax.plot_surface(xx, yy, zz, rstride=1, cstride=1, cmap=cm.jet, linewidth=0, antialiased=False) plt.show()
def main(): tracks = gps_track.load_tracks(sys.argv[1]) #gps_track.visualize_tracks(tracks, style='k.') #return window_size = 250 window_center = (447240, 4424780) window_SW = (window_center[0]-window_size, window_center[1]-window_size) window_NE = (window_center[0]+window_size, window_center[1]+window_size) # Extract GPS points in window GPS_points = [] point_idxs = [] for track_idx in range(0, len(tracks)): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): pt = (track.utm[pt_idx][0], track.utm[pt_idx][1]) if pt[0]>=window_SW[0] and pt[0]<=window_NE[0] and \ pt[1]>=window_SW[1] and pt[1]<=window_NE[1]: GPS_points.append(pt) point_idxs.append((track_idx, pt_idx)) print "In total %d points"%len(GPS_points) GPS_points = np.array(GPS_points) n_points = len(GPS_points) # Compute similarity matrix L = np.zeros((n_points,n_points)) in_track_bonus = 100 dist_sigma = 20 # in meters for i in range(0, n_points): L[i,i] = 1.0 track_i_idxs = point_idxs[i] for j in range(0, n_points): if i == j: L[i,i] = 1.0 track_j_idxs = point_idxs[j] bonus = 1.0 if track_i_idxs[0] == track_j_idxs[0]: if track_j_idxs[1]-track_i_idxs[1]<=1: bonus *= in_track_bonus dist = np.linalg.norm(GPS_points[i]-GPS_points[j]) / bonus L[i,j] = np.exp(-1*dist*dist/2.0/dist_sigma/dist_sigma) if L[i,j] < 0.01: L[i,j] = 0.0 M = np.array(L, copy=True) for i in range(0, L.shape[0]): M[i,:] /= sum(M[i,:]) # Synthetic dataset #n_points = 1000 #synthetic_points = np.zeros((n_points,2)) #d = 5 #for i in range(0, int(0.4*n_points)): # theta = np.random.rand()*2*np.pi # r = np.random.rand() # synthetic_points[i,0] = -1.0*d + r*np.cos(theta) # synthetic_points[i,1] = r*np.sin(theta) #for i in range(int(0.4*n_points), int(0.8*n_points)): # theta = np.random.rand()*2*np.pi # r = np.random.rand() # synthetic_points[i,0] = d + r*np.cos(theta) # synthetic_points[i,1] = r*np.sin(theta) #for i in range(int(0.8*n_points), n_points): # synthetic_points[i,0] = 2*d*(np.random.rand()-0.5) # synthetic_points[i,1] = 0.2*(np.random.rand()-0.5) #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #ax.plot(synthetic_points[:,0], synthetic_points[:,1], '.') #plt.show() #return #L = np.zeros((n_points,n_points)) #dist_sigma = 2 # in meters #for i in range(0, n_points): # L[i,i] = 1.0 # for j in range(i+1, n_points): # dist = np.linalg.norm(synthetic_points[i,:]-synthetic_points[j,:]) # L[i,j] = np.exp(-1*dist*dist/2.0/dist_sigma/dist_sigma) # if L[i,j] < 0.1: # L[i,j] = 0 # L[j,i] = L[i,j] #M = np.array(L, copy=True) #for i in range(0, L.shape[0]): # M[i,:] /= sum(M[i,:]) # Compute Eigen Vectors of M print "Start eigen." S = np.array(M, copy=True) eigs, v = np.linalg.eig(S) print "test = ",np.linalg.norm(np.dot(M, v[:,1])-eigs[1]*v[:,1]) sorted_idxs = np.argsort(eigs)[::-1] s = 1 index = sorted_idxs[s] #test = np.dot(M, v[:,index]) - eigs[index]*v[:,index] #print "test norm = ", np.amax(test) # Compute New embedding k = 200 t = 10 Y = np.zeros((n_points, k)) print "New projection." for i in range(0, n_points): for s in range(0, k): Y[i, s] = (eigs[sorted_idxs[s]]**t) * v[i, sorted_idxs[s]] #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #ax.plot(Y[:,0], Y[:,1], 'b.') #plt.show() # Clustering print "Start clustering." kmeans = KMeans(init='k-means++', n_clusters=8, n_init=10) kmeans.fit(Y) labels = kmeans.labels_ cluster_centers = kmeans.cluster_centers_ unique_labels = np.unique(labels) print unique_labels #db = DBSCAN(eps=0.5, min_samples=10).fit(Y) #core_samples = db.core_sample_indices_ #labels = db.labels_ #n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) #print "Estimated number of clusters: %d"%n_clusters_ #unique_labels = set(labels) colors = plt.cm.Spectral(np.linspace(0,1, len(unique_labels))) fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') for k, col in zip(unique_labels, colors): my_members = labels == k ax.plot(GPS_points[my_members, 0], GPS_points[my_members, 1], '.', color=col, markersize=10) plt.show()
def main(): if len(sys.argv) != 5: print "ERROR! Correct usage is:" return GRID_SIZE = 2.5 # in meters # Target location and radius # test_point_cloud.dat LOC = (447772, 4424300) R = 500 # test_point_cloud1.dat #LOC = (446458, 4422150) #R = 500 # San Francisco #LOC = (551281, 4180430) #R = 500 with open(sys.argv[1], "rb") as fin: point_cloud = cPickle.load(fin) print "there are %d points in the point cloud." % point_cloud.locations.shape[ 0] tracks = gps_track.load_tracks(sys.argv[2]) compute_canonical_dir = True if compute_canonical_dir: LINE_GAP = 40 SEARCH_RANGE = 5 P_REMOVAL = 0.1 lines = extract_line_segments(point_cloud, GRID_SIZE, LOC, R, LINE_GAP, SEARCH_RANGE, P_REMOVAL) visualize_extracted_lines(point_cloud, lines, LOC, R) return line_vecs = [] line_norms = [] for line in lines: line_vec = np.array( (line[1][0] - line[0][0], line[1][1] - line[0][1])) line_vec_norm = np.linalg.norm(line_vec) line_vec /= line_vec_norm line_vecs.append(line_vec) line_norm = np.array((-1 * line_vec[1], line_vec[0])) line_norms.append(line_norm) line_vecs = np.array(line_vecs) line_norms = np.array(line_norms) #angle_distance = 1.1 - np.dot(line_vecs, line_vecs.T) dist_threshold = 5 point_directions = [] print "start computing" for pt_idx in range(0, point_cloud.locations.shape[0]): pt = point_cloud.locations[pt_idx] # search nearby lines vec1s = pt - lines[:, 0] vec2s = pt - lines[:, 1] signs = np.einsum('ij,ij->i', vec1s, vec2s) dist = np.abs(np.einsum('ij,ij->i', vec1s, line_norms)) nearby_segments = [] directions = [] for j in np.arange(len(signs)): if signs[j] < 0.0: if dist[j] < dist_threshold: if len(directions) == 0: directions.append(line_vecs[j]) else: find_match = False for dir_idx in range(0, len(directions)): normalized_vec = directions[ dir_idx] / np.linalg.norm( directions[dir_idx]) dot_value = np.dot(line_vecs[j], normalized_vec) if abs(dot_value) > 0.91: find_match = True break if not find_match: directions.append(line_vecs[j]) normalized_dirs = [] for ind in range(0, len(directions)): vec = directions[ind] / np.linalg.norm(directions[ind]) normalized_dirs.append(vec) point_directions.append(normalized_dirs) # Grid sample sample_point_cloud, sample_canonical_directions =\ filter_point_cloud_using_grid(point_cloud, point_directions, 10, LOC, R) # Correct direction using tracks # build sample point kdtree sample_point_kdtree = spatial.cKDTree(sample_point_cloud.locations) expanded_directions = [] votes_directions = [] for i in range(0, len(sample_canonical_directions)): directions = [] votes = [] for direction in sample_canonical_directions[i]: directions.append(direction) votes.append(0) directions.append(-1 * direction) votes.append(0) expanded_directions.append(directions) votes_directions.append(votes) for i in range(0, point_cloud.locations.shape[0]): # find nearby sample point dist, sample_idx = sample_point_kdtree.query( point_cloud.locations[i]) for direction_idx in range(0, len(expanded_directions[sample_idx])): direction = expanded_directions[sample_idx][direction_idx] dot_product = np.dot(direction, point_cloud.directions[i]) if dot_product >= 0.866: votes_directions[sample_idx][direction_idx] += 1 threshold = 1 revised_canonical_directions = [] for i in range(0, len(expanded_directions)): revised_dir = [] for dir_idx in range(0, len(expanded_directions[i])): if votes_directions[i][dir_idx] >= threshold: revised_dir.append(expanded_directions[i][dir_idx]) revised_canonical_directions.append(revised_dir) print "end computing" with open(sys.argv[3], 'wb') as fout: cPickle.dump(sample_point_cloud, fout, protocol=2) with open(sys.argv[4], 'wb') as fout: cPickle.dump(revised_canonical_directions, fout, protocol=2) return else: with open(sys.argv[3], 'rb') as fin: sample_point_cloud = cPickle.load(fin) with open(sys.argv[4], 'rb') as fin: revised_canonical_directions = cPickle.load(fin) visualize_sample_point_cloud(sample_point_cloud, revised_canonical_directions, point_cloud, LOC, R) #track_idx = 0 #arrow_params = {'length_includes_head':True, 'shape':'full', 'head_starts_at_zero':False} ##track = tracks[track_idx] #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #for i in range(0, sample_point_cloud.locations.shape[0]): # for direction in revised_canonical_directions[i]: # ax.arrow(sample_point_cloud.locations[i][0], # sample_point_cloud.locations[i][1], # 20*direction[0], # 20*direction[1], # width=0.5, head_width=5, fc='gray', ec='gray', # head_length=10, overhang=0.5, **arrow_params) #plt.show() return count = 0 track = tracks[track_idx] query_distance = 50 # in meter clusters = [] count = 0 compute_sample_cluster = False if compute_sample_cluster: sample_clusters = [] for track_idx in range(0, 1000): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): if len(track.utm) <= 1: continue pt = np.array((track.utm[pt_idx][0], track.utm[pt_idx][1])) #ax.plot(pt[0], pt[1], 'or') #if pt_idx < len(track.utm) - 1: # u = track.utm[pt_idx+1][0]-pt[0] # v = track.utm[pt_idx+1][1]-pt[1] # if abs(u) + abs(v) > 2: # ax.arrow(pt[0], pt[1], u, # v, width=0.5, head_width=5, fc='r', ec='r', # head_length=10, overhang=0.5, **arrow_params) in_dir = np.array([0.0, 0.0]) out_dir = np.array([0.0, 0.0]) if pt_idx < len(track.utm) - 1: out_dir = np.array((track.utm[pt_idx + 1][0] - pt[0], track.utm[pt_idx + 1][1] - pt[1])) vec_norm = np.linalg.norm(out_dir) if vec_norm < 1: out_dir = np.array([0.0, 0.0]) else: out_dir /= vec_norm if pt_idx >= 1: in_dir = np.array((pt[0] - track.utm[pt_idx - 1][0], pt[1] - track.utm[pt_idx - 1][1])) vec_norm = np.linalg.norm(in_dir) if vec_norm < 1: in_dir = np.array([0.0, 0.0]) else: in_dir /= vec_norm # search nearby sample points neighbor_idxs = sample_point_kdtree.query_ball_point( pt, query_distance) # Filter sample by angle filtered_sample_by_angle = [] filtered_sample_by_angle_directions = [] for sample_idx in neighbor_idxs: for direction in revised_canonical_directions[sample_idx]: if np.dot(direction, in_dir) >= 0.8: filtered_sample_by_angle.append(sample_idx) filtered_sample_by_angle_directions.append( np.copy(direction)) break if np.dot(direction, out_dir) >= 0.8: filtered_sample_by_angle.append(sample_idx) filtered_sample_by_angle_directions.append( np.copy(direction)) break # Filter sample by distance filtered_sample = [] filtered_sample_directions = [] pt_in_norm = np.array([-1 * in_dir[1], in_dir[0]]) pt_out_norm = np.array([-1 * out_dir[1], out_dir[0]]) for s in range(0, len(filtered_sample_by_angle)): sample_idx = filtered_sample_by_angle[s] vec = sample_point_cloud.locations[sample_idx] - pt if abs(np.dot(vec, pt_in_norm)) < query_distance * 0.4: filtered_sample.append(sample_idx) filtered_sample_directions.append( filtered_sample_by_angle_directions[s]) continue if abs(np.dot(vec, pt_out_norm)) < query_distance * 0.4: filtered_sample.append(sample_idx) filtered_sample_directions.append( filtered_sample_by_angle_directions[s]) continue if len(filtered_sample) == 0: continue new_cluster = SampleCluster(filtered_sample, filtered_sample_directions, sample_point_cloud) # Check with existing clusters found_merge = False for cluster in sample_clusters: similarity = cluster.compute_similarity(new_cluster) if similarity >= 0.5: cluster.merge_cluster(new_cluster) found_merge = True break if not found_merge: sample_clusters.append(new_cluster) #for sample_idx in filtered_sample: # ax.plot(sample_point_cloud.locations[sample_idx][0], # sample_point_cloud.locations[sample_idx][1], # '.', color=const.colors[pt_idx%7]) with open(sys.argv[4], 'wb') as fout: cPickle.dump(sample_clusters, fout, protocol=2) return else: with open(sys.argv[4], 'rb') as fin: sample_clusters = cPickle.load(fin) # cluster sample_clusters compute_dbscan = False if compute_dbscan: N = len(sample_clusters) distance_matrix = np.zeros((N, N)) for i in range(0, N): for j in range(i + 1, N): cluster1 = sample_clusters[i] cluster2 = sample_clusters[j] similarity = cluster1.compute_similarity(cluster2) if similarity < 1e-3: similarity = 1e-3 distance_matrix[i, j] = 1.0 / similarity distance_matrix[j, i] = 1.0 / similarity print "max=", np.amax(distance_matrix) print "min=", np.amin(distance_matrix) print "DBSCAN started." t_start = time.time() db = DBSCAN(eps=2, min_samples=1, metric='precomputed').fit(distance_matrix) print "DBSCAN took %d sec." % (int(time.time() - t_start)) core_samples = db.core_sample_indices_ labels = db.labels_ n_cluster = len(set(labels)) - (1 if -1 in labels else 0) print "There are %d clusters." % n_cluster unique_labels = set(labels) new_clusters = [] for k in unique_labels: if k == -1: continue class_members = [index[0] for index in np.argwhere(labels == k)] starting_cluster = sample_clusters[class_members[0]] for j in range(1, len(class_members)): starting_cluster.merge_cluster( sample_clusters[class_members[j]]) new_clusters.append(starting_cluster) with open(sys.argv[5], "wb") as fout: cPickle.dump(new_clusters, fout, protocol=2) return else: with open(sys.argv[5], "rb") as fin: new_clusters = cPickle.load(fin) for cluster_idx in range(0, len(new_clusters)): cluster = new_clusters[cluster_idx] color = const.colors[cluster_idx % 7] ax.plot(sample_point_cloud.locations[cluster.member_samples.keys(), 0], sample_point_cloud.locations[cluster.member_samples.keys(), 1], '.', color=color) ax.plot(cluster.mass_center[0], cluster.mass_center[1], 'o', color=color) if np.linalg.norm(cluster.direction) > 0.1: ax.arrow(cluster.mass_center[0], cluster.mass_center[1], 100 * cluster.direction[0], 100 * cluster.direction[1], width=3, head_width=20, fc=color, ec=color, head_length=20, overhang=0.5, **arrow_params) #for track in tracks: # count += 1 # if count == 10: # break # ax.plot([pt[0] for pt in track.utm], # [pt[1] for pt in track.utm], # 'r.', markersize=12) # for i in range(1, len(track.utm)): # vec_e = track.utm[i][0] - track.utm[i-1][0] # vec_n = track.utm[i][1] - track.utm[i-1][1] # if abs(vec_e) + abs(vec_n) < 1.0: # continue # ax.arrow(track.utm[i-1][0], track.utm[i-1][1], # vec_e, vec_n, # width=1, head_width=10, fc='b', ec='b', # head_length=20, overhang=0.5, **arrow_params) ax.set_xlim([LOC[0] - R, LOC[0] + R]) ax.set_ylim([LOC[1] - R, LOC[1] + R]) print "# clusters:", len(sample_clusters) plt.show() return for pt in track.utm: if pt[0]>=LOC[0]-R and pt[0]<=LOC[0]+R \ and pt[1]>=LOC[1]-R and pt[1]<=LOC[1]+R: # Search point dist, nearby_idx = point_cloud_kdtree.query( np.array([pt[0], pt[1]])) for j in range(0, len(point_directions[nearby_idx])): direction = point_directions[nearby_idx][j] ax.plot(pt[0], pt[1], 'or') ax.arrow(point_cloud.locations[nearby_idx][0], point_cloud.locations[nearby_idx][1], 20 * direction[0], 20 * direction[1], fc='r', ec='r', width=0.5, head_width=5, head_length=10, overhang=0.5, **arrow_params) if np.linalg.norm(point_cloud.directions[nearby_idx]) > 0.1: ax.arrow(point_cloud.locations[nearby_idx][0], point_cloud.locations[nearby_idx][1], 20 * point_cloud.directions[nearby_idx][0], 20 * point_cloud.directions[nearby_idx][1], width=0.5, head_width=5, fc='b', ec='b', head_length=10, overhang=0.5, **arrow_params) #ax.set_xlim([LOC[0]-R, LOC[0]+R]) #ax.set_ylim([LOC[1]-R, LOC[1]+R]) plt.show() return #track = tracks[track_idx] #count = 0 #for pt in track.utm: # if pt[0]>=LOC[0]-R and pt[0]<=LOC[0]+R \ # and pt[1]>=LOC[1]-R and pt[1]<=LOC[1]+R: # count += 1 # # search nearby lines # for line in lines: # vec = np.array((line[1][0]-line[0][0], line[1][1]-line[0][1])) # vec_norm = np.linalg.norm(vec) # vec /= vec_norm # vec1 = np.array((pt[0]-line[0][0], pt[1]-line[0][1])) # vec2 = np.array((pt[0]-line[1][0], pt[1]-line[1][1])) # if np.dot(vec1, vec2) < 0: # norm_vec = np.array((-1.0*vec[1], vec[0])) # dist = abs(np.dot(vec1, norm_vec)) # if dist < 10.0: # ax.plot([line[0][0], line[1][0]], # [line[0][1], line[1][1]], # '-', color=const.colors[count%7]) ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-r', linewidth=3) ax.plot(track.utm[0][0], track.utm[0][1], 'or') #for line in lines: # ax.plot([line[0][0], line[1][0]], # [line[0][1], line[1][1]], # 'r-') ax.set_xlim([LOC[0] - R, LOC[0] + R]) ax.set_ylim([LOC[1] - R, LOC[1] + R]) plt.show() return
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython road_detector.py [input_track.dat]" return tracks = gps_track.load_tracks(sys.argv[1]) tracks1 = gps_track.load_tracks(sys.argv[2]) tracks.extend(tracks1) RANGE_SW = (446000, 4421450) RANGE_NE = (451000, 4426450) point_collection = [] for track in tracks: for pt in track.utm: if pt[0] <= RANGE_NE[0] and pt[0] >= RANGE_SW[0]: if pt[1] <= RANGE_NE[1] and pt[1] >= RANGE_SW[1]: point_collection.append((pt[0], pt[1])) print "There are %d GPS points." % len(point_collection) qtree = scipy.spatial.KDTree(point_collection) print "Quad tree completed." # Training SVM with open("training_loc_0.dat", "rb") as fin: training_loc = cPickle.load(fin) with open("training_feature_0.dat", "rb") as fin: training_feature = cPickle.load(fin) with open("training_loc_1.dat", "rb") as fin: training_loc1 = cPickle.load(fin) with open("training_feature_1.dat", "rb") as fin: training_feature1 = cPickle.load(fin) training_loc.extend(training_loc1) training_feature.extend(training_feature1) with open("training_label.dat", "rb") as fin: training_label = cPickle.load(fin) svc = sklearn.svm.SVC(kernel='sigmoid').fit(training_feature, training_label) # Make Prediction road_point = [] non_road_point = [] query_radius = [ 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 200, 300, 400, 500 ] count = 0 # Start Prediction print "Start prediction!" for pt in point_collection: rand_easting = random.randint(RANGE_SW[0] + 500, RANGE_NE[0] - 500) rand_northing = random.randint(RANGE_SW[1] + 500, RANGE_NE[1] - 500) pt = (rand_easting, rand_northing) # if pt[0] <= RANGE_NE[0]-500 and pt[0] >= RANGE_SW[0]+500 \ # and pt[1] <= RANGE_NE[1]-500 and pt[1] >= RANGE_SW[1]+500: # print count hist_result = road_detector.compute_hist(pt, query_radius, qtree) prediction = svc.predict([hist_result]) if prediction[0] == 0: non_road_point.append(pt) elif prediction[0] == 1: road_point.append(pt) else: print "Warning! Invalid prediction!" count += 1 if count % 500 == 0: print "Now at ", count break print "There are %d road point, %d non-road point." % (len(road_point), len(non_road_point)) fig = plt.figure(figsize=(16, 16)) ax = fig.add_subplot(111, aspect='equal') ax.plot([p[0] for p in point_collection], [p[1] for p in point_collection], '.', color='gray') ax.plot([p[0] for p in road_point], [p[1] for p in road_point], '.', color='red') ax.plot([p[0] for p in non_road_point], [p[1] for p in non_road_point], '.', color='blue') ax.set_xlim([RANGE_SW[0], RANGE_NE[0]]) ax.set_ylim([RANGE_SW[1], RANGE_NE[1]]) plt.show()
def main(): if len(sys.argv) != 5: print "ERROR! Correct usage is:" return GRID_SIZE = 2.5 # in meters # Target location and radius # test_point_cloud.dat LOC = (447772, 4424300) R = 500 # test_point_cloud1.dat #LOC = (446458, 4422150) #R = 500 # San Francisco #LOC = (551281, 4180430) #R = 500 with open(sys.argv[1], "rb") as fin: point_cloud = cPickle.load(fin) print "there are %d points in the point cloud."%point_cloud.locations.shape[0] tracks = gps_track.load_tracks(sys.argv[2]) compute_canonical_dir = True if compute_canonical_dir: LINE_GAP = 40 SEARCH_RANGE = 5 P_REMOVAL = 0.1 lines = extract_line_segments(point_cloud, GRID_SIZE, LOC, R, LINE_GAP, SEARCH_RANGE, P_REMOVAL) visualize_extracted_lines(point_cloud, lines, LOC, R) return line_vecs = [] line_norms = [] for line in lines: line_vec = np.array((line[1][0]-line[0][0], line[1][1]-line[0][1])) line_vec_norm = np.linalg.norm(line_vec) line_vec /= line_vec_norm line_vecs.append(line_vec) line_norm = np.array((-1*line_vec[1], line_vec[0])) line_norms.append(line_norm) line_vecs = np.array(line_vecs) line_norms = np.array(line_norms) #angle_distance = 1.1 - np.dot(line_vecs, line_vecs.T) dist_threshold = 5 point_directions = [] print "start computing" for pt_idx in range(0, point_cloud.locations.shape[0]): pt = point_cloud.locations[pt_idx] # search nearby lines vec1s = pt - lines[:,0] vec2s = pt - lines[:,1] signs = np.einsum('ij,ij->i', vec1s, vec2s) dist = np.abs(np.einsum('ij,ij->i', vec1s, line_norms)) nearby_segments = [] directions = [] for j in np.arange(len(signs)): if signs[j] < 0.0: if dist[j] < dist_threshold: if len(directions) == 0: directions.append(line_vecs[j]) else: find_match = False for dir_idx in range(0, len(directions)): normalized_vec = directions[dir_idx] / np.linalg.norm(directions[dir_idx]) dot_value = np.dot(line_vecs[j], normalized_vec) if abs(dot_value) > 0.91: find_match = True break if not find_match: directions.append(line_vecs[j]) normalized_dirs = [] for ind in range(0, len(directions)): vec = directions[ind] / np.linalg.norm(directions[ind]) normalized_dirs.append(vec) point_directions.append(normalized_dirs) # Grid sample sample_point_cloud, sample_canonical_directions =\ filter_point_cloud_using_grid(point_cloud, point_directions, 10, LOC, R) # Correct direction using tracks # build sample point kdtree sample_point_kdtree = spatial.cKDTree(sample_point_cloud.locations) expanded_directions = [] votes_directions = [] for i in range(0, len(sample_canonical_directions)): directions = [] votes = [] for direction in sample_canonical_directions[i]: directions.append(direction) votes.append(0) directions.append(-1*direction) votes.append(0) expanded_directions.append(directions) votes_directions.append(votes) for i in range(0, point_cloud.locations.shape[0]): # find nearby sample point dist, sample_idx = sample_point_kdtree.query(point_cloud.locations[i]) for direction_idx in range(0, len(expanded_directions[sample_idx])): direction = expanded_directions[sample_idx][direction_idx] dot_product = np.dot(direction, point_cloud.directions[i]) if dot_product >= 0.866: votes_directions[sample_idx][direction_idx] += 1 threshold = 1 revised_canonical_directions = [] for i in range(0, len(expanded_directions)): revised_dir = [] for dir_idx in range(0, len(expanded_directions[i])): if votes_directions[i][dir_idx] >= threshold: revised_dir.append(expanded_directions[i][dir_idx]) revised_canonical_directions.append(revised_dir) print "end computing" with open(sys.argv[3], 'wb') as fout: cPickle.dump(sample_point_cloud, fout, protocol=2) with open(sys.argv[4], 'wb') as fout: cPickle.dump(revised_canonical_directions, fout, protocol=2) return else: with open(sys.argv[3], 'rb') as fin: sample_point_cloud = cPickle.load(fin) with open(sys.argv[4], 'rb') as fin: revised_canonical_directions = cPickle.load(fin) visualize_sample_point_cloud(sample_point_cloud, revised_canonical_directions, point_cloud, LOC, R) #track_idx = 0 #arrow_params = {'length_includes_head':True, 'shape':'full', 'head_starts_at_zero':False} ##track = tracks[track_idx] #fig = plt.figure(figsize=const.figsize) #ax = fig.add_subplot(111, aspect='equal') #for i in range(0, sample_point_cloud.locations.shape[0]): # for direction in revised_canonical_directions[i]: # ax.arrow(sample_point_cloud.locations[i][0], # sample_point_cloud.locations[i][1], # 20*direction[0], # 20*direction[1], # width=0.5, head_width=5, fc='gray', ec='gray', # head_length=10, overhang=0.5, **arrow_params) #plt.show() return count = 0 track = tracks[track_idx] query_distance = 50 # in meter clusters = [] count = 0 compute_sample_cluster = False if compute_sample_cluster: sample_clusters = [] for track_idx in range(0, 1000): track = tracks[track_idx] for pt_idx in range(0, len(track.utm)): if len(track.utm) <= 1: continue pt = np.array((track.utm[pt_idx][0], track.utm[pt_idx][1])) #ax.plot(pt[0], pt[1], 'or') #if pt_idx < len(track.utm) - 1: # u = track.utm[pt_idx+1][0]-pt[0] # v = track.utm[pt_idx+1][1]-pt[1] # if abs(u) + abs(v) > 2: # ax.arrow(pt[0], pt[1], u, # v, width=0.5, head_width=5, fc='r', ec='r', # head_length=10, overhang=0.5, **arrow_params) in_dir = np.array([0.0, 0.0]) out_dir = np.array([0.0, 0.0]) if pt_idx < len(track.utm) - 1: out_dir = np.array((track.utm[pt_idx+1][0]-pt[0], track.utm[pt_idx+1][1]-pt[1])) vec_norm = np.linalg.norm(out_dir) if vec_norm < 1: out_dir = np.array([0.0, 0.0]) else: out_dir /= vec_norm if pt_idx >= 1: in_dir = np.array((pt[0]-track.utm[pt_idx-1][0], pt[1]-track.utm[pt_idx-1][1])) vec_norm = np.linalg.norm(in_dir) if vec_norm < 1: in_dir = np.array([0.0, 0.0]) else: in_dir /= vec_norm # search nearby sample points neighbor_idxs = sample_point_kdtree.query_ball_point(pt, query_distance) # Filter sample by angle filtered_sample_by_angle = [] filtered_sample_by_angle_directions = [] for sample_idx in neighbor_idxs: for direction in revised_canonical_directions[sample_idx]: if np.dot(direction, in_dir) >= 0.8: filtered_sample_by_angle.append(sample_idx) filtered_sample_by_angle_directions.append(np.copy(direction)) break if np.dot(direction, out_dir) >= 0.8: filtered_sample_by_angle.append(sample_idx) filtered_sample_by_angle_directions.append(np.copy(direction)) break # Filter sample by distance filtered_sample = [] filtered_sample_directions = [] pt_in_norm = np.array([-1*in_dir[1], in_dir[0]]) pt_out_norm = np.array([-1*out_dir[1], out_dir[0]]) for s in range(0, len(filtered_sample_by_angle)): sample_idx = filtered_sample_by_angle[s] vec = sample_point_cloud.locations[sample_idx] - pt if abs(np.dot(vec, pt_in_norm)) < query_distance*0.4: filtered_sample.append(sample_idx) filtered_sample_directions.append(filtered_sample_by_angle_directions[s]) continue if abs(np.dot(vec, pt_out_norm)) < query_distance*0.4: filtered_sample.append(sample_idx) filtered_sample_directions.append(filtered_sample_by_angle_directions[s]) continue if len(filtered_sample) == 0: continue new_cluster = SampleCluster(filtered_sample, filtered_sample_directions, sample_point_cloud) # Check with existing clusters found_merge = False for cluster in sample_clusters: similarity = cluster.compute_similarity(new_cluster) if similarity >= 0.5: cluster.merge_cluster(new_cluster) found_merge = True break if not found_merge: sample_clusters.append(new_cluster) #for sample_idx in filtered_sample: # ax.plot(sample_point_cloud.locations[sample_idx][0], # sample_point_cloud.locations[sample_idx][1], # '.', color=const.colors[pt_idx%7]) with open(sys.argv[4], 'wb') as fout: cPickle.dump(sample_clusters, fout, protocol=2) return else: with open(sys.argv[4], 'rb') as fin: sample_clusters = cPickle.load(fin) # cluster sample_clusters compute_dbscan = False if compute_dbscan: N = len(sample_clusters) distance_matrix = np.zeros((N, N)) for i in range(0, N): for j in range(i+1, N): cluster1 = sample_clusters[i] cluster2 = sample_clusters[j] similarity = cluster1.compute_similarity(cluster2) if similarity < 1e-3: similarity = 1e-3 distance_matrix[i,j] = 1.0 / similarity distance_matrix[j,i] = 1.0 / similarity print "max=",np.amax(distance_matrix) print "min=",np.amin(distance_matrix) print "DBSCAN started." t_start = time.time() db = DBSCAN(eps=2, min_samples=1, metric='precomputed').fit(distance_matrix) print "DBSCAN took %d sec."%(int(time.time() - t_start)) core_samples = db.core_sample_indices_ labels = db.labels_ n_cluster = len(set(labels)) - (1 if -1 in labels else 0) print "There are %d clusters."%n_cluster unique_labels = set(labels) new_clusters = [] for k in unique_labels: if k==-1: continue class_members = [index[0] for index in np.argwhere(labels==k)] starting_cluster = sample_clusters[class_members[0]] for j in range(1, len(class_members)): starting_cluster.merge_cluster(sample_clusters[class_members[j]]) new_clusters.append(starting_cluster) with open(sys.argv[5], "wb") as fout: cPickle.dump(new_clusters, fout, protocol=2) return else: with open(sys.argv[5], "rb") as fin: new_clusters = cPickle.load(fin) for cluster_idx in range(0, len(new_clusters)): cluster = new_clusters[cluster_idx] color = const.colors[cluster_idx%7] ax.plot(sample_point_cloud.locations[cluster.member_samples.keys(), 0], sample_point_cloud.locations[cluster.member_samples.keys(), 1], '.', color=color) ax.plot(cluster.mass_center[0], cluster.mass_center[1], 'o', color=color) if np.linalg.norm(cluster.direction) > 0.1: ax.arrow(cluster.mass_center[0], cluster.mass_center[1], 100*cluster.direction[0], 100*cluster.direction[1], width=3, head_width=20, fc=color, ec=color, head_length=20, overhang=0.5, **arrow_params) #for track in tracks: # count += 1 # if count == 10: # break # ax.plot([pt[0] for pt in track.utm], # [pt[1] for pt in track.utm], # 'r.', markersize=12) # for i in range(1, len(track.utm)): # vec_e = track.utm[i][0] - track.utm[i-1][0] # vec_n = track.utm[i][1] - track.utm[i-1][1] # if abs(vec_e) + abs(vec_n) < 1.0: # continue # ax.arrow(track.utm[i-1][0], track.utm[i-1][1], # vec_e, vec_n, # width=1, head_width=10, fc='b', ec='b', # head_length=20, overhang=0.5, **arrow_params) ax.set_xlim([LOC[0]-R, LOC[0]+R]) ax.set_ylim([LOC[1]-R, LOC[1]+R]) print "# clusters:",len(sample_clusters) plt.show() return for pt in track.utm: if pt[0]>=LOC[0]-R and pt[0]<=LOC[0]+R \ and pt[1]>=LOC[1]-R and pt[1]<=LOC[1]+R: # Search point dist, nearby_idx = point_cloud_kdtree.query(np.array([pt[0], pt[1]])) for j in range(0, len(point_directions[nearby_idx])): direction = point_directions[nearby_idx][j] ax.plot(pt[0], pt[1], 'or') ax.arrow(point_cloud.locations[nearby_idx][0], point_cloud.locations[nearby_idx][1], 20*direction[0], 20*direction[1], fc='r', ec='r', width=0.5, head_width=5, head_length=10, overhang=0.5, **arrow_params) if np.linalg.norm(point_cloud.directions[nearby_idx]) > 0.1: ax.arrow(point_cloud.locations[nearby_idx][0], point_cloud.locations[nearby_idx][1], 20*point_cloud.directions[nearby_idx][0], 20*point_cloud.directions[nearby_idx][1], width=0.5, head_width=5, fc='b', ec='b', head_length=10, overhang=0.5, **arrow_params) #ax.set_xlim([LOC[0]-R, LOC[0]+R]) #ax.set_ylim([LOC[1]-R, LOC[1]+R]) plt.show() return #track = tracks[track_idx] #count = 0 #for pt in track.utm: # if pt[0]>=LOC[0]-R and pt[0]<=LOC[0]+R \ # and pt[1]>=LOC[1]-R and pt[1]<=LOC[1]+R: # count += 1 # # search nearby lines # for line in lines: # vec = np.array((line[1][0]-line[0][0], line[1][1]-line[0][1])) # vec_norm = np.linalg.norm(vec) # vec /= vec_norm # vec1 = np.array((pt[0]-line[0][0], pt[1]-line[0][1])) # vec2 = np.array((pt[0]-line[1][0], pt[1]-line[1][1])) # if np.dot(vec1, vec2) < 0: # norm_vec = np.array((-1.0*vec[1], vec[0])) # dist = abs(np.dot(vec1, norm_vec)) # if dist < 10.0: # ax.plot([line[0][0], line[1][0]], # [line[0][1], line[1][1]], # '-', color=const.colors[count%7]) ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-r', linewidth=3) ax.plot(track.utm[0][0], track.utm[0][1], 'or') #for line in lines: # ax.plot([line[0][0], line[1][0]], # [line[0][1], line[1][1]], # 'r-') ax.set_xlim([LOC[0]-R, LOC[0]+R]) ax.set_ylim([LOC[1]-R, LOC[1]+R]) plt.show() return
def extract_tracks(input_directory, center, R, BBOX_WIDTH, BBOX_SW, BBOX_NE, MIN_PT_COUNT, N_DAY): input_directory = re.sub('\/$', '', input_directory) input_directory += '/' files = glob.glob(input_directory+'*.dat') if len(files) == 0: print "Error! Empty input directory: %s"%input_directory extracted_tracks = [] count = 0 for filename in files: print "Now processing ",filename input_tracks = gps_track.load_tracks(filename) for track in input_tracks: # Iterate over its point to_record = False for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box delta_e = track.utm[pt_idx][0] - center[0] delta_n = track.utm[pt_idx][1] - center[1] dist = math.sqrt(delta_e**2 + delta_n**2) if dist <= R: to_record = True break if not to_record: continue recorded_track = gps_track.Track() is_recording = False for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box if track.utm[pt_idx][0] >= BBOX_SW[0] and \ track.utm[pt_idx][0] <= BBOX_NE[0] and \ track.utm[pt_idx][1] >= BBOX_SW[1] and \ track.utm[pt_idx][1] <= BBOX_NE[1]: if not is_recording: # Start recording is_recording = True recorded_track.car_id = track.car_id if pt_idx > 0: recorded_track.add_point(track.utm[pt_idx-1]) recorded_track.add_point(track.utm[pt_idx]) else: # Append point recorded_track.add_point(track.utm[pt_idx]) else: # Point is outside the bounding box if is_recording: # Stop recording is_recording = False recorded_track.add_point(track.utm[pt_idx]) if len(recorded_track.utm) >= MIN_PT_COUNT: # Save the recorded track extracted_tracks.append(recorded_track) recorded_track = gps_track.Track() count += 1 if count == N_DAY: break return extracted_tracks
def main(): if len(sys.argv) != 3: print "Error! Correct usage is:" print "\tpython extract_tracks_in_region.py [input_directory] [out_track_file]" return # Index for the test region, 0..9 index = 4 BBOX_SW = const.BB_SW[index] BBOX_NE = const.BB_NE[index] input_directory = re.sub('\/$', '', sys.argv[1]) input_directory += '/' files = glob.glob(input_directory+'*.dat') if len(files) == 0: print "Error! Empty input directory: %s"%input_directory extracted_tracks = [] count = 0 MIN_PT_COUNT = 4 for filename in files: print "Now processing ",filename input_tracks = gps_track.load_tracks(filename) for track in input_tracks: # Iterate over its point is_recording = False recorded_track = gps_track.Track() for pt_idx in range(0, len(track.utm)): # Check if the point falls inside the bounding box if track.utm[pt_idx][0] >= BBOX_SW[0] and \ track.utm[pt_idx][0] <= BBOX_NE[0] and \ track.utm[pt_idx][1] >= BBOX_SW[1] and \ track.utm[pt_idx][1] <= BBOX_NE[1]: if not is_recording: # Start recording is_recording = True recorded_track.car_id = track.car_id if pt_idx > 0: recorded_track.add_point(track.utm[pt_idx-1]) recorded_track.add_point(track.utm[pt_idx]) else: # Append point recorded_track.add_point(track.utm[pt_idx]) else: # Point is outside the bounding box if is_recording: # Stop recording is_recording = False recorded_track.add_point(track.utm[pt_idx]) if len(recorded_track.utm) >= MIN_PT_COUNT: # Save the recorded track extracted_tracks.append(recorded_track) recorded_track = gps_track.Track() count += 1 if count == 4: break # Visualize extracted GPS tracks print "%d tracks extracted"%len(extracted_tracks) gps_track.visualize_tracks(extracted_tracks, bound_box = [BBOX_SW, BBOX_NE], style='.') gps_track.save_tracks(extracted_tracks, sys.argv[2]) return input_tracks = gps_track.load_tracks(sys.argv[1]) output_tracks = gps_track.extract_tracks_by_region(input_tracks, sys.argv[2], (const.SF_small_RANGE_SW, const.SF_small_RANGE_NE)) # Visualization fig = plt.figure(figsize=(16,16)) ax = fig.add_subplot(111, aspect='equal') for track in output_tracks: ax.plot([pt[0] for pt in track.utm], [pt[1] for pt in track.utm], '.-' ) plt.show()
def main(): parser = OptionParser() parser.add_option("-s", "--sample_point_cloud", dest="sample_point_cloud", help="Input sample point cloud filename", metavar="SAMPLE_POINT_CLOUD", type="string") parser.add_option("-r", "--road_segment", dest="road_segment", help="Input road segment filename", metavar="ROAD_SEGMENT", type="string") parser.add_option("-t", "--track", dest="tracks", help="Input GPS track file", metavar="TRACK_FILE", type="string") parser.add_option("--test_case", dest="test_case", type="int", help="Test cases: 0: region-0; 1: region-1; 2: SF-region.", default=0) (options, args) = parser.parse_args() if not options.sample_point_cloud: parser.error("Input sample_point_cloud filename not found!") if not options.road_segment: parser.error("Input road segment file not found!") if not options.tracks: parser.error("Input GPS Track file not specified.") R = const.R if options.test_case == 0: LOC = const.Region_0_LOC elif options.test_case == 1: LOC = const.Region_1_LOC elif options.test_case == 2: LOC = const.SF_LOC else: parser.error("Test case indexed %d not supported!"%options.test_case) with open(options.sample_point_cloud, 'rb') as fin: sample_point_cloud = cPickle.load(fin) with open(options.road_segment, 'rb') as fin: road_segments = cPickle.load(fin) tracks = gps_track.load_tracks(options.tracks) # Compute points on road segments sample_idx_on_roads = {} sample_pt_kdtree = spatial.cKDTree(sample_point_cloud.locations) SEARCH_RADIUS = 30.0 ANGLE_THRESHOLD = np.pi / 6.0 for seg_idx in range(0, len(road_segments)): segment = road_segments[seg_idx] sample_idx_on_roads[seg_idx] = set([]) start_pt = segment.center - segment.half_length*segment.direction end_pt = segment.center + segment.half_length*segment.direction n_pt_to_add = int(1.5 * segment.half_length / SEARCH_RADIUS + 0.5) px = np.linspace(start_pt[0], end_pt[0], n_pt_to_add) py = np.linspace(start_pt[1], end_pt[1], n_pt_to_add) nearby_sample_idxs = [] for i in range(0, n_pt_to_add): pt = np.array([px[i], py[i]]) tmp_idxs = sample_pt_kdtree.query_ball_point(pt, SEARCH_RADIUS) nearby_sample_idxs.extend(tmp_idxs) nearby_sample_idxs = set(nearby_sample_idxs) for sample_idx in nearby_sample_idxs: if np.dot(sample_point_cloud.directions[sample_idx], segment.direction) < np.cos(ANGLE_THRESHOLD): continue vec = sample_point_cloud.locations[sample_idx] - segment.center if abs(np.dot(vec, segment.norm_dir)) <= segment.half_width: sample_idx_on_roads[seg_idx].add(sample_idx) segment_graph_to_map(tracks, road_segments, sample_point_cloud) return all_road_patches = [] for selected_track_idx in range(0,100): print selected_track_idx road_patches = generate_road_patch_from_track(tracks[selected_track_idx], road_segments, sample_point_cloud, sample_idx_on_roads) all_road_patches.extend(road_patches) arrow_params = const.arrow_params fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') #ax.plot(sample_point_cloud.locations[:,0], # sample_point_cloud.locations[:,1], # '.', color='gray') count = 0 for road_patch in all_road_patches: color = const.colors[count%7] count += 1 polygon = road_patch.road_polygon() for i in np.arange(len(road_patch.center_line)-1): if np.linalg.norm(road_patch.directions[i]) < 0.1: continue ax.arrow(road_patch.center_line[i,0], road_patch.center_line[i,1], 10*road_patch.directions[i,0], 10*road_patch.directions[i,1], width=0.5, head_width=4, fc=color, ec=color, head_length=6, overhang=0.5, **arrow_params) patch = PolygonPatch(polygon, facecolor=color, edgecolor=color, alpha=0.5, zorder=4) ax.add_patch(patch) ax.set_xlim([LOC[0]-R, LOC[0]+R]) ax.set_ylim([LOC[1]-R, LOC[1]+R]) plt.show() return track_on_road = project_tracks_to_road(tracks, road_segments) compute_segment_graph = False if compute_segment_graph: segment_graph = track_induced_segment_graph(tracks, road_segments) nx.write_gpickle(segment_graph, "test_segment_graph.gpickle") else: segment_graph = nx.read_gpickle("test_segment_graph.gpickle") max_node_count = -np.inf max_node = -1 for node in segment_graph.nodes(): out_edges = segment_graph.out_edges(node) sum_val = 0.0 for edge in out_edges: sum_val += segment_graph[edge[0]][edge[1]]['count'] if sum_val > max_node_count: max_node_count = sum_val max_node = node print "Totally %d edges."%(len(segment_graph.edges())) #segment_graph = generate_segment_graph(road_segments) arrow_params = const.arrow_params fig = plt.figure(figsize=const.figsize) ax = fig.add_subplot(111, aspect='equal') ax.plot(sample_point_cloud.locations[:,0], sample_point_cloud.locations[:,1], '.', color='gray') #ax.plot([pt[0] for pt in selected_track.utm], # [pt[1] for pt in selected_track.utm], 'r.-') #for track_idx in track_on_road[selected_seg_idx]: # ax.plot([pt[0] for pt in tracks[track_idx].utm], # [pt[1] for pt in tracks[track_idx].utm], '.') segment = road_segments[max_node] p0 = segment.center - segment.half_length*segment.direction + segment.half_width*segment.norm_dir p1 = segment.center + segment.half_length*segment.direction + segment.half_width*segment.norm_dir p2 = segment.center + segment.half_length*segment.direction - segment.half_width*segment.norm_dir p3 = segment.center - segment.half_length*segment.direction - segment.half_width*segment.norm_dir ax.plot([p0[0], p1[0]], [p0[1],p1[1]], 'r-') ax.plot([p1[0], p2[0]], [p1[1],p2[1]], 'r-') ax.plot([p2[0], p3[0]], [p2[1],p3[1]], 'r-') ax.plot([p3[0], p0[0]], [p3[1],p0[1]], 'r-') arrow_p0 = segment.center - segment.half_length*segment.direction ax.arrow(arrow_p0[0], arrow_p0[1], 2*segment.half_length*segment.direction[0], 2*segment.half_length*segment.direction[1], width=4, head_width=20, fc='r', ec='r', head_length=40, overhang=0.5, **arrow_params) for seg_idx in segment_graph.successors(max_node): segment = road_segments[seg_idx] p0 = segment.center - segment.half_length*segment.direction + segment.half_width*segment.norm_dir p1 = segment.center + segment.half_length*segment.direction + segment.half_width*segment.norm_dir p2 = segment.center + segment.half_length*segment.direction - segment.half_width*segment.norm_dir p3 = segment.center - segment.half_length*segment.direction - segment.half_width*segment.norm_dir ax.plot([p0[0], p1[0]], [p0[1],p1[1]], 'b-') ax.plot([p1[0], p2[0]], [p1[1],p2[1]], 'b-') ax.plot([p2[0], p3[0]], [p2[1],p3[1]], 'b-') ax.plot([p3[0], p0[0]], [p3[1],p0[1]], 'b-') arrow_p0 = segment.center - segment.half_length*segment.direction ax.arrow(arrow_p0[0], arrow_p0[1], 2*segment.half_length*segment.direction[0], 2*segment.half_length*segment.direction[1], width=4, head_width=20, fc='b', ec='b', head_length=40, overhang=0.5, **arrow_params) #for i in np.arange(len(nearby_seg_idxs)): # seg_idx = nearby_seg_idxs[i] # segment = road_segments[seg_idx] # arrow_p0 = segment.center - segment.half_length*segment.direction # color = const.colors[i%7] # #if segment_graph[selected_seg_idx][seg_idx]['weight'] == -1.0: # # color = 'g' # ax.arrow(arrow_p0[0], # arrow_p0[1], # 2*segment.half_length*segment.direction[0], # 2*segment.half_length*segment.direction[1], # width=2, head_width=10, fc=color, ec=color, # head_length=20, overhang=0.5, **arrow_params) ax.set_xlim([LOC[0]-R, LOC[0]+R]) ax.set_ylim([LOC[1]-R, LOC[1]+R]) plt.show() return