def get_measurements_list(start_ts, stop_ts, mt_num): page_size = 500 measurements_url = utils.url_join( [utils.ripeatlas_base_url, "/measurements/"]) params = {} params["format"] = "json" params["page_size"] = str(page_size) params["is_public"] = "true" params["type"] = "traceroute" params["af"] = "4" params["start_time__gte"] = start_ts params["stop_time_lte"] = stop_ts #resource list. resources = [''] result_list = [] #first page url = utils.construct_url(measurements_url, params) page = download_ripe_atlas_list_worker(url) page_num = int(math.ceil(float(page["count"]) / page_size)) result_list.extend(page["results"]) temp_list = ["" for i in range(page_num + 1)] #build argv_list argv = [] #for i in range(2,page_num+1): for i in range(2, 3): #debug params["page"] = str(i) url = utils.construct_url(measurements_url, params) arg = (url, temp_list, i) argv.append(arg) #run with multi thread. multi_thread.run_with_multi_thread(download_ripe_atlas_list_wrapper, argv, resources, mt_num) for i in range(2, page_num + 1): result_list.extend(temp_list[i]) return result_list
def download_date(date, file_path, seg_size=20 * 1024 * 1024, mt_num=-1): utils.touch(file_path) url = construct_url_fromtime(date) opener = utils.get_iplane_opener() #get the size first. file_size = get_iplane_file_size(url, opener) if file_size == -1: return file_num = int(math.ceil(float(file_size) / seg_size)) if file_num == 0: return #to get the range list. range_list = [] for i in range(0, file_num - 1): range_list.append((i * seg_size, (i + 1) * seg_size - 1)) if (file_num == 1): i = -1 range_list.append(((i + 1) * seg_size, file_size)) #resources resources = [''] #build argv_list. argv = [] for i in range(len(range_list)): r = range_list[i] arg = (url, r[0], r[1], file_path + '.' + str(i), opener) argv.append(arg) #run with multi thread. multi_thread.run_with_multi_thread(download_iplane_restricted_wrapper, argv, resources, mt_num) #assemble segements. assemble_segements(file_path)
def download_file(url, file_path, resources, mt_num, seg_size=20 * 1024 * 1024): opener = utils.get_caida_opener(utils.caida_itdk_base_url) #get the size and segment number first. file_size = get_caida_file_size(url, opener) if file_size == -1: return utils.log('file_size: ' + str(file_size)) file_num = int(math.ceil(float(file_size) / seg_size)) if file_num == 0: return #get the range list. range_list = [] for i in range(0, file_num - 1): range_list.append((i * seg_size, (i + 1) * seg_size - 1)) if (file_num == 1): i = -1 range_list.append(((i + 1) * seg_size, file_size)) #build argv_list. argv = [] for i in range(len(range_list)): r = range_list[i] arg = (url, r[0], r[1], file_path + '.' + str(i), opener) argv.append(arg) #run with multi thread. multi_thread.run_with_multi_thread(download_caida_restricted_wrapper, argv, resources, mt_num) #assemble segements. assemble_segements(file_path)
#destination file_path. utils.touch(file_path) #resources. resources = [''] #build argv argv = [] #for i in range(len(url_list)): for i in range(40): #debug url = url_list[i] arg = (url, temp_list, i) argv.append(arg) #run with multi thread. multi_thread.run_with_multi_thread( download_ripe_atlas_detail_worker_wrapper, argv, resources, mt_num) #output utils.log("writting to file ... ") for i in range(len(result_list)): result_list[i]["results_json"] = temp_list[i] fp = open(file_path, 'wb') h = subprocess.Popen(['gzip', '-c', '-'], stdin=subprocess.PIPE, stdout=fp) #h.stdin.write(json.dumps(result_list,indent=1)) for i in range(len(result_list)): h.stdin.write(json.dumps(result_list[i]) + "\n") h.stdin.close() fp.close()
return team+"."+suffix def download_date(date, directory, mt_num=-1): #get url list. is_succeeded = False round_cnt = 1 while(not is_succeeded): try: url_list = get_url_list_from_date(date) is_succeeded = True except Exception, e: utils.log(str(e)) is_succeed = False round_cnt = round_cnt + 1 time.sleep(1*round_cnt) utils.touch(directory+'/') #resource list resources = [''] #build argv_list argv = [] for url in url_list: file_path = utils.url_join([directory, get_caida_filename_from_url(url)]) arg = (url, file_path) argv.append(arg) #run with multi thread. multi_thread.run_with_multi_thread(download_caida_restricted_wrapper, argv, resources, mt_num)