def ConcurrencyPay(context, *args): def pay(order_id): print("支付:") url = preData["eatojoy_url"] + "/user/payment/pay" headers = {"token": preData["eatojoy_user_token"]} print("请求头:") print(headers) print("请求地址:" + url) data = { "order_id": order_id, "source_id": preData["eatojoy_source_id"] } print("请求参数:") print(decode_str(data)) ret = requests.post(url, json=data, headers=headers) print("返回:") timeX = time.clock() print(timeX) print(ret.json()) try: time1 = time.clock() thread_count = len(preData["eatojoy_order_id"]) with pool(max_workers=100) as executor: future_tasks = [ executor.submit(pay, order_id) for order_id in preData["eatojoy_order_id"] ] # for order_id in eatojoy.preData["eatojoy_order_id"]: # executor.submit(pay(order_id)) time2 = time.clock() times = time2 - time1 print(times / thread_count) return True # try: # time1 = time.clock() # thread_count = len(eatojoy.preData["eatojoy_order_id"]) # for order_id in eatojoy.preData["eatojoy_order_id"]: # threading.Thread(target = pay(order_id)) # time2 = time.clock() # times = time2 - time1 # print(times / thread_count) # return True except Exception as e: print(e)
# captura o horario que começou para calcular o tempo que gastou start = datetime.now() # lista arquivos do diretório lst_arq = LerSubDir("./") # verifica quantos arquivos tem o diretório qtd_arq = len(lst_arq) # caputura a qtd total de nucleos de processadores disponíveis e subtrai 1 n_cpu = os.cpu_count() - 1 print('arquivos encontrados', qtd_arq) print('Processadores utilizados', n_cpu) # declara o executor para concurrent futures executor = pool(max_workers=n_cpu) # executa o processamento paralelo final = executor.map(lambda txt: os.rename(txt, txt[0:-3] + 'txt'), lst_arq) # deleta da memória a lista de arquivos del lst_arq # captura o horario que começou para calcular o tempo que gastou stop = datetime.now() # tempo gasto de execução print('Tempo gasto:', stop - start)
def __init__(self, instance_num=5): self.instance_num = instance_num self.glob_pool = pool(cpu_count()) self.avalible_instance = cycle([x for x in range(instance_num)]) self.start_traci()
def ConcurrencyCartAdd(context, *args): def checkout(): print("加入购物车:") url = preData["eatojoy_url"] + "/user/cart/add" headers = {"token": preData["eatojoy_user_token"]} print("请求头:") print(headers) print("请求地址:" + url) data = { "product_id": "1", "vendor_id": "2", "product_num": 1, "product_msg": "产品备注" } print("请求参数:") print(decode_str(data)) ret = requests.post(url, json=data, headers=headers) print("返回:") print(ret.json()) print("购物车列表:") url = preData["eatojoy_url"] + "/user/cart/list" headers = {"token": preData["eatojoy_user_token"]} print("请求头:") print(headers) print("请求地址:" + url) data = {"vendor_id": "2", "lon": "114.082846", "lat": "22.4167417"} print("请求参数:") print(decode_str(data)) ret = requests.get(url, json=data, headers=headers) print(ret.json()) id = ret.json()["data"]["product_list"][0]["id"] product_num = ret.json()["data"]["product_list"][0]["product_num"] order_grand_total = ret.json()["data"]["price_total"] print("返回:") print(ret.json()) print("下单:") url = preData["eatojoy_url"] + "/user/order/checkout" headers = {"token": preData["eatojoy_user_token"]} print("请求头:") print(headers) print("请求地址:" + url) data = { "product_id": id, "merchants_id": "2", "order_product_counts": product_num, "order_grand_total": order_grand_total, "take_food_time": preData["eatojoy_take_food_time"], "package_fee": 10, "coupons_fee": 0 } print("请求参数:") print(decode_str(data)) ret = requests.post(url, json=data, headers=headers) print(ret.json()) preData["eatojoy_order_id"].append(get_value("data", ret.json())) try: time1 = time.clock() with pool(max_workers=100) as executor: future_tasks = [ executor.submit(checkout) for i in range(1, 100) ] time2 = time.clock() times = time2 - time1 print(times) print(preData["eatojoy_order_id"]) return True except Exception as e: print(e)
from ChangeDetectors.netlist_txt_detect import MonitorNetlistChanges from ChangeDetectors.components_txt_detect import MonitorComponentsChanges from os import terminal_size from InternalWorkingScripts import createComponents_txt from InternalWorkingScripts import _createNetlist_txt from ChangeDetectors import MonitorComponentsChanges from ChangeDetectors import MonitorNetlistChanges from concurrent.futures import ProcessPoolExecutor as pool from multiprocessing import Process import time if __name__ == '__main__': createComponents_txt() _createNetlist_txt() with pool() as p: p.submit(MonitorComponentsChanges) p.submit(MonitorNetlistChanges)
def variance(X, P): mu = weighted_mean(X, P) with pool() as p: w2 = p.map(lambda x, p: p * x * x, X, P) return reduce(add, w2) - mu**2
def weighted_mean(X, P): with pool() as p: w1 = p.map(mul, X, P) return reduce(add, w1)
from concurrent.futures import ProcessPoolExecutor as pool def sentence_mapper(text): sentences = list() textSoup = BeautifulSoup(text, "lxml") paragraphs = textSoup.find_all("p", attrs={"class": None}) prepared = ("".join([p.text.strip().lower() for p in paragraphs[1:-1]])) for t in prepared.split("."): part = "".join([c for c in t if c.isalpha() or c.isspace()]) sentences.append(part.strip()) return sentences # parallel map with pool(4) as p: mapped_sentences = p.map(sentence_mapper, texts) # reduce sentences = reduce(add, mapped_sentences) # print first and last sentence to check the results print(sentences[0]) print(sentences[-1]) # + {"slideshow": {"slide_type": "slide"}, "cell_type": "markdown"} # ## References # # - [Using Conditional Random Fields and Python for Latin word segmentation](https://medium.com/@felixmohr/using-python-and-conditional-random-fields-for-latin-word-segmentation-416ca7a9e513)
dataframes[i_.strip("\n")] = element_lists """注意此处的缩进程度需到此处,继续缩进存在无法更新过快""" for i in condition: element_list[title] = 0 dataframe[i.strip("\n")] = element_list #pprint.pprint(dataframe) """字典更新""" dataframe_ = dict(dataframe, **dataframes) #pprint.pprint("更新后的字典 :") #pprint.pprint(dataframe_) """excel写入""" dataframes = pd.DataFrame(dataframe_) writer = pd.ExcelWriter( r"F:\360下载\提取后的excel文件(非分词结果)\{name}.xls".format(name=title)) dataframes.to_excel(writer, sheet_name='一', index=False) writer.save() print("写入成功--------------------------------") #no_jieba_get(input_path) if __name__ == "__main__": _path = glob.glob(os.path.join(input_path, "*")) #print(index,_path.replace('\u2022',"#").replace("\u30fb","##").replace("\u4dae","###").replace("\u4722","####")) #或者更改项目编码 pool_path = [_path[i:i + 1] for i in range(0, len(_path), 1)] #print(pool_path) pools = pool(100) for i in pool_path: pools.submit(no_jieba_get, i) """为什么21个就直接结束,因为内部线程没有运行完毕"""