return result


def parse_args():
    """
    :returns: arguments
    """
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('image_dir_path', help='Path to directory that contains images')
    parser.add_argument('chunk_count', help='Number of chunks to split input paths into', type=int)
    parser.add_argument('process_chunk', help='Chunk that will be processed', type=int)
    parser.add_argument('output_name', help='Custom name added to the output')
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()

    image_dir_path = args.image_dir_path
    chunk_count = args.chunk_count
    process_chunk = args.process_chunk
    output_name = args.output_name
    debug_output_path = './output/detection_debug'

    detections = detect_traffic_signs(image_dir_path, chunk_count=chunk_count, process_chunk=process_chunk, debug_output_path=debug_output_path)

    detections_output_path = './detections_{}_chunk_{}_of_{}.pickle'.format(output_name, process_chunk, chunk_count)
    util.pickle_save(detections_output_path, detections)
import glob
import numpy as np
from os.path import join, basename

import util


def get_pickle_files(dir_path):
    return sorted(glob.glob(join(dir_path, '*.pickle')))


DATASET_NAME = '10_right'
DIR_PATH = join('./output/scores/', DATASET_NAME)
OUTPUT_PATH = join('./output/scores/merged', DATASET_NAME + '.pickle')

pickle_files = get_pickle_files(DIR_PATH)

result = {}

for pickle_file in pickle_files:
    image_name = basename(pickle_file)[:-len('.pickle')]
    scores = util.pickle_load(pickle_file)
    result[image_name] = scores

#print(result[list(result.keys())[0]].shape)

util.pickle_save(OUTPUT_PATH, result)
#!/usr/bin/python3
# -*- coding: utf-8 -*-

from util import pickle_save
from database import ParkrunDB

#norwich_db = ParkrunDB("norwich", 440, 441)
#norwich_db.update()
#pickle_save(norwich_db, 'norwich_parkrun_2019.db')

colneylane_db = ParkrunDB("colneylane", 56, 57)
colneylane_db.update()
pickle_save(colneylane_db, 'colneylane_parkrun_2019.db')
Esempio n. 4
0

def gettripIds(index=0):
    tripIDs = []
    # For each route,
    for routeID, routeTrips in weekdayTrips.groupby('route_id'):
        # Pick a trip
        tripIDs.append(routeTrips.trip_id.values[index])
    return tripIDs

if __name__ == "__main__":

    calendar = read_csv('subwaydata/google_transit/calendar.txt')
    routes = read_csv('subwaydata/google_transit/routes.txt')
    trips = read_csv('subwaydata/google_transit/trips.txt')
    times = read_csv('subwaydata/google_transit/stop_times.txt')
    stops = read_csv('subwaydata/google_transit/stops.txt')

    weekdayServiceIDs = filter(lambda x: x.endswith('WKD'),
                               calendar.service_id)
    routeNameByID = {x['route_id']:
                     x['route_long_name'] for index, x in routes.iterrows()}
    weekdayTrips = trips[trips.service_id.isin(weekdayServiceIDs)]
    print(len(weekdayTrips))
    weekdayTimes = times[times.trip_id.isin(weekdayTrips.trip_id.unique())]

    tripIDs = gettripIds()
    graph = make_graph(tripIDs)
    import util
    util.pickle_save(graph, 'subwaydata/NYCsubway_network_graph.pkl')
Esempio n. 5
0

totalzipcodes = 0
missing = 0
for neighborhood in neighborhooddict.keys():
    for zipcode in neighborhooddict[neighborhood]:
        totalzipcodes += 1
        try:
            len(fulldata)
            tmpdata = getData(zipcode)
            if tmpdata is None:
                missing += 1
                continue
            else:
                fulldata = pd.concat([fulldata, tmpdata], axis=1)
                filename = 'data/%s_%s.pkl' % (neighborhood, zipcode)
                util.pickle_save(tmpdata, filename)
        except NameError:
            # hack to instantiate first DataFrame
            fulldata = getData(zipcode)
            filename = 'data/%s_%s.pkl' % (neighborhood, zipcode)
            util.pickle_save(fulldata, filename)

print('Missing %d zipcodes of %d total' % (missing, totalzipcodes))

for zipcode in fulldata.keys():
    plot_date(fulldata[zipcode].index,
              fulldata[zipcode], alpha=0.7, fmt='.')

plot_date(fulldata.index, fulldata.mean(axis=1))