def fetch_info_and_write(rs, db1, db2, output_csv, lvl3_at, lvl3_bt, lvl3_ad, lvl3_bd, lvl3_speed): for each_ozcta in rs.data: temp_t = tc.timer() n0 = len(rs.data[each_ozcta]) lv11_leftover = get_data_from_lv1(each_ozcta, rs.data[each_ozcta], db1, output_csv) n1 = n0 - len(lv11_leftover) n10 = len(lv11_leftover) temp_t.lap() lvl2_leftover = get_data_from_lv2(each_ozcta, lv11_leftover, db1, output_csv) n3 = len(lvl2_leftover) n2 = n10 - n3 temp_t.lap() cal_data_lv3(each_ozcta, lvl2_leftover, lvl3_at, lvl3_bt, lvl3_ad, lvl3_bd, output_csv, lvl3_speed) temp_t.lap() print("lvl1:{0}s - {3}\nlvl2:{1}s - {4}\nlvl3:{2}s - {5}\n\n".format( temp_t.time_int[0], temp_t.time_int[1], temp_t.time_int[2], n1, n2, n3)) rs.reset()
arcpy.env.workspace = inputfolder allshps = arcpy.ListFeatureClasses() usable_shp = [] for each_shp in allshps: usable_shp.append(inputfolder + '\\' + each_shp) if not os.path.exists(ODfolder + '\\error_shp.gdb'): error_gdb = arcpy.CreateFileGDB_management(ODfolder, '\\error_shp.gdb') if not os.path.exists(ODfolder + '\\OD_cost_out.gdb'): arcpy.CreateFileGDB_management(ODfolder, 'OD_cost_out.gdb') arcpy.SetProgressor("step", "Generating OD cost matrix...") totaln = int(len(usable_shp)) watch=tc.timer() n = 0 while n < totaln: u_shp = usable_shp[n] # for i in range(0,6): # if n < len(shps_list_list[i]): # u_shp.append(shps_list_list[i][n]) arcpy.AddMessage("Working on "+str(u_shp)+"...") progress = int(float(n)/totaln * 100) arcpy.SetProgressorPosition (progress) #arcpy.AddMessage(str(argv)) #get_ODCost(provider_file, u_shp, n, na_layer, ODfolder, smp, tempfolder, log_file, orgID, desID, cost, accost, restrict, hier, max_time_limit)
"\\" + dzcta2 + ".data", "a") fout.write(current_io[ozcta][dzcta2]) fout.close() f = open(input_csv, "r") field_name = f.readline()[0:-1].split(",") current_io = {} previous_ozcta = "" if lvl == 2: previous_dzcta2 = "" out_io = "" i = 0 t = timer.timer() row = f.readline() while row: row_content = row[0:-1].split(",") ozcta = row_content[field_name.index("OZCTA")] dzcta = row_content[field_name.index("DZCTA")] if ozcta == dzcta: row = f.readline() continue if lvl == 1: if previous_ozcta != ozcta: previous_ozcta = ozcta if sys.getsizeof(current_io) > 300 * 1000: write_current_io(current_io, lvl) del current_io current_io = {}
sys.path.append(os.path.split(os.path.realpath(__file__))[0]) import timer_class as tc zcta_file = arcpy.GetParameterAsText(0) selected_states = arcpy.GetParameterAsText(1) output_folder = arcpy.GetParameterAsText(2) output_file = output_folder + "\\" selected_states = selected_states.replace("\'", "").split(";") for each_state in selected_states: output_file += each_state.replace(" ", "_") + "_" output_file += "2010_zctas_pairs.csv" arcpy.AddMessage(output_file) t1 = tc.timer() arcpy.AddMessage("Gathering all zctas from the selected state...") zctas = {} states = {} sc = arcpy.SearchCursor(zcta_file) row = sc.next() while row: state = row.getValue("NAME10") if state in selected_states: zctas[row.getValue("ZCTA5CE10")] = [ row.getValue("POINT_X"), row.getValue("POINT_Y") ] if state not in states: states[state] = 1
sys.path.append(r"C:\Users\lirui\OneDrive\Desktop\OD_git\LargeODcost") import timer_class as timer #input_csv = r"D:\US_Estimated_ODMatrix\US_Estimated_ODMatrix\US_ODMatrix_3_3-6hours_Division\NA_OD_3hours.csv" input_csv = r"D:\US_Estimated_ODMatrix\US_Estimated_ODMatrix\US_ODMatrix_3_3-6hours_Division\NA_OD_3-6hours.csv" #output_folder = r"D:\US_Estimated_ODMatrix\US_OD_data_plain\hour03" output_folder = r"D:\US_Estimated_ODMatrix\US_OD_data_plain\hour36" lvl = 2 f = open(input_csv, "r") field_name = f.readline()[0:-1].split(",") on = 0 ot = 0 t = timer.timer() row = f.readline() while row: row_content = row[0:-1].split(",") ozcta = row_content[field_name.index("OZCTA")] dzcta = row_content[field_name.index("DZCTA")] if ozcta == dzcta: ot += 1 else: on += 1 if on % 10000 == 0 and on > 0: print(on) row = f.readline() f.close() t.lap()