def processCommuteData(): frames = [] print ' Started Processing Commute Data' fileName = "Master-Commute_Data-2015" entries = OrderedDict() towns = getMATowns() """ Need to seperate list of towns into chunks of 100 """ while len(towns) > 0: origins = '' for each in towns[:100]: origins += each+', MA|' origins = origins[:-1] commuteData = getCommuteData(origins) df = parseCommuteData(commuteData) frames.append(df) towns = towns[100:] entries['Commute-Data'] = pd.concat(frames, ignore_index=True) populateMaster(os.path.join(commuteDataLocation, fileName+ext), entries) print ' Done Processing Commute Data\n'
else: currMin = str(currMin) # # # Add a leading 0 when hours is < 10 # if currHour < 10: # currHour = '0'+str(currHour) # else: # currHour = str(currHour) currTime = currHour+':'+currMin if currTime in morningTimes or currTime in afternoonTimes: print 'Processing for time ', currTime date = str(datetime.datetime.now().date()) if currTime in morningTimes: commuteData = getCommuteData(towns, 'now', work) for row in range(len(commuteData["rows"])): origin = str(commuteData['origin_addresses'][row]).split(',')[0] destination = commuteData['destination_addresses'][0] duration = convertToMin(str(commuteData['rows'][row]['elements'][0]['duration']['text'])) duration_in_traffic = convertToMin(str(commuteData['rows'][row]['elements'][0]['duration_in_traffic']['text'])) distance = str(commuteData['rows'][row]['elements'][0]['distance']['text']).split(' ')[0] data[origin]['date'] = date data[origin]['day'] = weekDay data[origin]['times'][currTime]['dur'] = duration_in_traffic data[origin]['times'][currTime]['dist'] = distance if duration_in_traffic > data[origin]['max']: data[origin]['max'] = duration_in_traffic if duration_in_traffic < data[origin]['min']: