def parse_csv(csv_file, verbose): """ Builds urls based on street address and city. Args: Takes a csv file passed in as cli argument. Returns: A list of properly formatted urls for querying lat lng using Google Geocode. """ get_url = BuildURL("https://maps.googleapis.com/maps/api/geocode/json?address=", state="GA") street = compile("\d+\s\w.*") msg = "\nWould you like to use an api key this time? If not you will be prompted to add one as the query limit\n" \ "is reached. 2500 Queries will run without one just fine: y/n " with open(csv_file, 'r') as open_csv: csv_stream_object = reader(open_csv) headers = next(csv_stream_object, None) (city_header_index, tract_header_index) = scrape_headers(headers) google_api_key = require_api(msg) print("\nPlease wait, Generating coordinate urls...\n") sleep(1) address = {'street': None, 'city': None} list_geocode_urls, list_of_cities = [], [] for row in csv_stream_object: try: list_of_cities.append(row[city_header_index]) # Build list of possible serving territories based on city # from the index position of Tax City except UnboundLocalError as err: print(err) # logger.warn("Error: {}, You must have at least 'Tax City', 'Census Tract'" # " and 'Service Address' headers in your file ".format(err)) for field in row: if search(street, field): # find street in fields address['city'] = row[row.index(field)] for city in set(list_of_cities): # Assign a city for the query from dynamic list try: if city in row: address['street'] = row[row.index(city)] except: raise list_geocode_urls.append(get_url.geo_url(address, verbose, api_key=google_api_key)) print("Done...\n") return list_geocode_urls, tract_header_index
parser.add_argument('--clobber', dest='clobber', action='store_true') parser.add_argument('--no-clobber', dest='clobber', action='store_false', help="default is no clobbering") parser.add_argument('--altnames', metavar='altnames', type=str, default='blank', action='store', help='altnames is the file containing the NED names list') parser.add_argument('--redshifts', metavar='redshifts', type=bool, default=False, action='store', help='do or do not include redshifts, default if not') parser.set_defaults(clobber=False) args = parser.parse_args() return args #----------------------------------------------------------------------------------------------------- if __name__ == "__main__": args = parse_args() targets = (args.targets, args.clobber) scrape_headers.scrape_headers(args.targets,args.altnames,args.redshifts) drive_quick_look.drive_quick_look(targets) sys.exit(""" ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~ ~~~~~~~*~*~*~*~~~~~~ all done!!!! spectra are fun! ~~~~~~~~~~~~~~~~~~~ ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~ """)
"scrapes headers and makes quicklooks for everything in 'targets.list' file") parser.add_argument('targets', metavar='list', type=str, action='store', help="""targets.list is the file to be read in; first column = flag (0,1) if target is to be used, second column = target/directory name""") parser.add_argument('--clobber', dest='clobber', action='store_true') parser.add_argument('--no-clobber', dest='clobber', action='store_false', help="default is no clobbering") parser.set_defaults(clobber=False) args = parser.parse_args() return args #----------------------------------------------------------------------------------------------------- if __name__ == "__main__": args = parse_args() targets = (args.targets, args.clobber) scrape_headers.scrape_headers(args.targets) drive_quick_look.drive_quick_look(targets) sys.exit(""" ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~ ~~~~~~~*~*~*~*~~~~~~ all done!!!! spectra are fun! ~~~~~~~~~~~~~~~~~~~ ~~~~~~~*~*~*~*~~~~~~~~ ~*~*~*~*~~~~~~~~~~~~~ """)