def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument('-ap', '--all-positions-image', action='store_true', help='create image of all vehicle positions in the dataset') parser.add_argument('-atl', '--all-trips-lines-image', action='store_true', help='create image of all trips in the dataset') parser.add_argument('-atp', '--all-trips-points-image', action='store_true', help='create image of all trips in the dataset') parser.add_argument('--symbol', type=str, default='.', help='matplotlib symbol to indicate vehicles on the images' ' (default \'.\', larger \'o\')') args = parser.parse_args() result_dict = cmdline.read_json() if args.all_positions_image: output_file = output_file_name('all_positions', 'png') graph.make_positions_graph(result_dict, output_file, args.symbol) print(output_file) if args.all_trips_lines_image: output_file = output_file_name('all_trips', 'png') graph.make_trips_graph(result_dict, output_file) print(output_file) if args.all_trips_points_image: output_file = output_file_name('all_trips_points', 'png') graph.make_trip_origin_destination_graph(result_dict, output_file, args.symbol) print(output_file)
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument('-tz', '--tz-offset', type=float, default=0, help='offset times by TZ_OFFSET hours') parser.add_argument( '-d', '--distance', type=float, default=False, help='highlight DISTANCE meters around each car on map') parser.add_argument('--trips', action='store_true', help='show lines indicating vehicles\' trips') parser.add_argument('--speeds', action='store_true', help='show vehicles\' speeds in addition to locations') parser.add_argument( '--symbol', type=str, default='.', help='matplotlib symbol to indicate vehicles on the images' + ' (default \'.\', larger \'o\')') args = parser.parse_args() result_dict = cmdline.read_json() metadata = result_dict['metadata'] output_filename_prefix = output_file_name(metadata['city']) images_generator = video.make_video_frames(result_dict, output_filename_prefix, args.distance, args.trips, args.speeds, args.symbol, args.tz_offset) # evaluate the generator to actually generate the images; # use tqdm to display a progress bar exp_timespan = metadata['ending_time'] - metadata['starting_time'] exp_frames = exp_timespan.total_seconds() / metadata['time_step'] generated_images = list( tqdm(images_generator, total=exp_frames, leave=False)) # print animation information animate_command_text = video.make_animate_command(result_dict, output_filename_prefix, len(generated_images)) print('\nto animate:') print(animate_command_text)
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument('-tz', '--tz-offset', type=float, default=0, help='offset times when days are split by TZ_OFFSET hours') args = parser.parse_args() result_dict = cmdline.read_json() output_file = output_file_name('stats', 'csv') stats.stats(result_dict, output_file, args.tz_offset) print(output_file) # provide output name for easier reuse
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument('-v', '--by-vehicle', type=str, default=False, help='filter all results to only include data for one vehicle; ' 'accepts VINs, "random", "most_trips", "most_distance", and "most_duration".') args = parser.parse_args() input_dict = cmdline.read_json() # TODO: add more filters, like filtering by timeframe, latlng, etc # though it might be easier to provide a harness and have the filter functions be per-analysis-project # - at least for now until I figure out what the most used filters are result_dict = by_vehicle(input_dict, args.by_vehicle) cmdline.write_json(result_dict)
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument( '-ap', '--all-positions-image', action='store_true', help='create image of all vehicle positions in the dataset') parser.add_argument('-atl', '--all-trips-lines-image', action='store_true', help='create image of all trips in the dataset') parser.add_argument('-atp', '--all-trips-points-image', action='store_true', help='create image of all trips in the dataset') parser.add_argument( '--symbol', type=str, default='.', help='matplotlib symbol to indicate vehicles on the images' ' (default \'.\', larger \'o\')') args = parser.parse_args() result_dict = cmdline.read_json() if args.all_positions_image: output_file = output_file_name('all_positions', 'png') graph.make_positions_graph(result_dict, output_file, args.symbol) print(output_file) if args.all_trips_lines_image: output_file = output_file_name('all_trips', 'png') graph.make_trips_graph(result_dict, output_file) print(output_file) if args.all_trips_points_image: output_file = output_file_name('all_trips_points', 'png') graph.make_trip_origin_destination_graph(result_dict, output_file, args.symbol) print(output_file)
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument('-tz', '--tz-offset', type=float, default=0, help='offset times by TZ_OFFSET hours') parser.add_argument('-d', '--distance', type=float, default=False, help='highlight DISTANCE meters around each car on map') parser.add_argument('--trips', action='store_true', help='show lines indicating vehicles\' trips') parser.add_argument('--speeds', action='store_true', help='show vehicles\' speeds in addition to locations') parser.add_argument('--symbol', type=str, default='.', help='matplotlib symbol to indicate vehicles on the images' + ' (default \'.\', larger \'o\')') args = parser.parse_args() result_dict = cmdline.read_json() metadata = result_dict['metadata'] output_filename_prefix = output_file_name(metadata['city']) images_generator = video.make_video_frames( result_dict, output_filename_prefix, args.distance, args.trips, args.speeds, args.symbol, args.tz_offset) # evaluate the generator to actually generate the images; # use tqdm to display a progress bar exp_timespan = metadata['ending_time'] - metadata['starting_time'] exp_frames = exp_timespan.total_seconds() / metadata['time_step'] generated_images = list(tqdm(images_generator, total=exp_frames, leave=False)) # print animation information animate_command_text = video.make_animate_command( result_dict, output_filename_prefix, len(generated_images)) print('\nto animate:') print(animate_command_text)
def process_commandline(): parser = argparse.ArgumentParser() parser.add_argument( '-v', '--by-vehicle', type=str, default=False, help='filter all results to only include data for one vehicle; ' 'accepts VINs, "random", "most_trips", "most_distance", and "most_duration".' ) args = parser.parse_args() input_dict = cmdline.read_json() # TODO: add more filters, like filtering by timeframe, latlng, etc # though it might be easier to provide a harness and have the filter functions be per-analysis-project # - at least for now until I figure out what the most used filters are result_dict = by_vehicle(input_dict, args.by_vehicle) cmdline.write_json(result_dict)