def test_mpms_err_num_against_err_prob_with_not_errors_all_corrected( sequence, Px, Pe): hamming_msg_len = 20 min_err_num = 1 max_err_num = hamming_msg_len sample_size = 300 succ_tranx = [] for n in range(min_err_num, max_err_num + 1): count = 0 for i in range(sample_size): mpms = MPMS(Px, Pe) s, u, l = mpms.transmit(sequence, max_channel_use=500, err_num=n, msg_len=hamming_msg_len) if s == sequence: count += 1 succ_tranx.append(count / sample_size) #plot x = np.array(range(min_err_num, max_err_num + 1)) y = succ_tranx plt.plot(x, y, label='correct tranx ratio') plt.xlabel("The number of error(s)") plt.ylabel("Correct tranx ratio") plt.title("Pe={}, Px={}, size={}, len={}".format(Pe, Px, sample_size, len(seq))) plt.legend(loc='upper right') plt.savefig(os.path.join("graph", "mpms_err_num_err_prob.png")) plt.show()
def test_mpms_once_with_errors_all_corrected(sequence, Px, Pe): mpms = MPMS(Px, Pe) hamming_msg_len = 4 s, u, l = mpms.transmit(sequence, max_channel_use=300, err_num=1, msg_len=hamming_msg_len) print("Result:\n- number of channel use:{}, len: {} {}".format( u, len(sequence), l)) print("- binary sequence:{}\n- actual sequence:{}".format(s, sequence)) if s == sequence: print("Correct!") else: print("Wrong!") print("Transmission Rate: {}".format(len(sequence) / u / l)) print("Channel x linear: {}".format( BSC_capacity(Px) * hamming_msg_len / l)) print("Channel capacity: {}".format(BSC_capacity(Px)))
def spyBoard_dict(boardid_dict, pages_input=None, sleeptime=86400, processes=2, threads=2): """ 对给定的板块id列表进行监测 """ m = MPMS(getBBS, handler, processes=processes, threads_per_process=threads) for boardid in boardid_dict: if pages_input is not None: pages = pages_input else: pages = getBoardSize(boardid) print("[board {}]Try to get {} pages".format(boardid, pages)) for j in range(pages, 0, -1): thispage = getBoardPage(boardid, j) if thispage == []: break for i in thispage: m.put([boardid, i[1], "big"]) sleep(sleeptime) return
def spyNew(sleeptime=100, processes=3, threads=3): """ 对热门、新帖以及额外配置的板块列表进行监测,这是直接运行代码将调用的函数 """ global ignore_counts ignore_counts = 0 starttime = time.time() myprint("start") m = MPMS(getBBS, handler, processes=processes, threads_per_process=threads) t = 0 workload = set() thenew = getHotPost() + getNewPost() boardlist = set([int(i[0]) for i in thenew]) myprint("get new finished, len(thenew)={}, len(boardlist)={}".format(len(thenew), len(boardlist))) boardlist.update(CONFIG_INTERESTING_BOARDS) newclicksdata = [] for boardid in boardlist: newclicksdata += getBoardPage_detailed(boardid, 1) for boardid, postid, reply, clicks, lastpost in newclicksdata: if filter_pass(boardid, postid, reply, clicks, lastpost): if postid not in workload: m.put([boardid, postid, ""]) workload.add(postid) myprint("Check {} boards, ignore {} posts, using {} seconds".format(len(boardlist), ignore_counts, int(time.time() - starttime))) if time.time() - starttime > 10: myprint("too slow! add wait time") sleeptime += time.time() - starttime - 10 while len(m) > 0: myprint("Remaning queue length: {len}".format(len=len(m))) sleep(2) myprint("All done! wait 5 seconds to clean up") sleep(5) myprint("Try close the queue... If this hang on, you have to kill the python process") plus1("tryclose.log") m.close() myprint("Try join the queue... If this hang on, you have to kill the python process") plus1("tryjoin.log") m.join() plus1("join_success.log") myprint("All child process exited succesfully") sleeptime = max(0, starttime + sleeptime - time.time()) print("Sleep a while ( {sleeptime:.0f}s )...".format(sleeptime=sleeptime)) sleep(sleeptime) myprint("Sleep done! wake up and exit...") return
def test_mpms_once_with_errors_not_all_corrected(msg_len, Px, Pe): # message and hamming code msg = read_msg(msg_len)[233] hmsg_len = 10 # hamming message length hblk_len = HammingCode.calc_redundant_bits(hmsg_len) + hmsg_len print('Hamming({},{})'.format(hblk_len, hmsg_len)) # modified posterior matching mpms = MPMS(Px, Pe) s, u, l = mpms.transmit(msg, max_channel_use=500, err_num=None, msg_len=hmsg_len) print("Result:\n- number of channel use:{}, len: {} {}".format( u, len(msg), l)) print("- binary sequence:{}\n- actual sequence:{}".format(s, msg)) if s == msg: print("Correct!") print("Transmission Rate: {}".format(len(msg) / u / hblk_len)) print("Channel capacity: {}".format(BSC_capacity(Px))) print("Channel x linear: {}".format( BSC_capacity(Px) * hmsg_len / hblk_len)) else: print("Wrong!")
def main(): results = "" # we will run the benchmarks several times using the following params # 下面这些值用于多次运行,看时间 test_params = ( # (processes, threads_per_process) (20, 50), (10, 20), (5, 10), (3, 3), (1, 1) ) for processes, threads_per_process in test_params: # Init the poll # 初始化 m = MPMS( worker, collector, processes=processes, # optional, how many processes, default value is your cpu core number threads=threads_per_process, # optional, how many threads per process, default is 2 meta={"any": 1, "dict": "you", "want": {"pass": "******"}, "worker": 0.5}, ) m.start() # start and fork subprocess start_time = time() # when we started # 记录开始时间 # put task parameters into the task queue, 2000 total tasks # 把任务加入任务队列,一共2000次 for i in range(2000): m.put(i, t=time()) # optional, close the task queue. queue will be auto closed when join() # 关闭任务队列,可选. 在join()的时候会自动关闭 # m.close() # close task queue and wait all workers and handler to finish # 等待全部任务及全部结果处理完成 m.join() # write and print records # 下面只是记录和打印结果 results += "Processes:" + str(processes) + " Threads_per_process:" + str(threads_per_process) \ + " Total_threads:" + str(processes * threads_per_process) \ + " TotalTime: " + str(time() - start_time) + "\n" print(results) print('sleeping 5s before next') sleep(5)
def test_mpms_len_against_tranx_rate_with_not_errors_all_corrected( Px, Pe, cmt: str): """ Plot message length against transmission rate, write log file This function will record average transmission rate and channel use for each message length. Existent log file will be overwritten. """ # hamming code hmsg_len = 4 hblk_len = hblk_len = HammingCode.calc_redundant_bits(hmsg_len) + hmsg_len # modified posterior mathcing min_msg_len = 1 max_msg_len = 40 sample_size = 700 tranx_rate = np.zeros(max_msg_len - min_msg_len + 1) channel_use = np.zeros(max_msg_len - min_msg_len + 1) for l in range(min_msg_len, max_msg_len + 1): rate = np.zeros(sample_size) use = np.zeros(sample_size) msg = read_msg(l) for i in range(sample_size): mpms = MPMS(Px, Pe) s, u, L = mpms.transmit(msg[i], max_channel_use=500, err_num=None, msg_len=hmsg_len) if s == msg[i]: rate[i] = l / u / hblk_len use[i] = u print("Progress: {}%".format( np.round( 100 * ((l - min_msg_len) * sample_size + i) / (max_msg_len + 1 - min_msg_len) / sample_size, 2))) # tranx_rate.append(sum(rate)/len(rate)) tranx_rate[l - 1] = np.mean(rate[np.nonzero(rate)]) channel_use[l - 1] = np.mean(use[np.nonzero(use)]) # log file fn_trx = "mpms_len_tranx_rate_({},{})_{}.txt".format( hblk_len, hmsg_len, cmt) fn_trx = os.path.join('log', fn_trx) with open(fn_trx, 'w') as f: for rate in tranx_rate: f.write(str(rate) + '\n') fn_use = "mpms_len_channel_use_({},{})_{}.txt".format( hblk_len, hmsg_len, cmt) fn_use = os.path.join('log', fn_use) with open(fn_use, 'w') as f: for use in channel_use: f.write(str(use) + '\n') # plot x = np.array(range(min_msg_len, max_msg_len + 1)) y1 = tranx_rate c1 = BSC_capacity(Px) c2 = BSC_Hamming_capacity(hblk_len, hmsg_len, hamming_err_prob(Px, hmsg_len, hblk_len)) plt.plot(x, y1, label="tranx rate") plt.hlines(c1, 0, max_msg_len + 1, colors="coral", label="BSC capacity") plt.hlines(c2, 0, max_msg_len + 1, color='lime', label="Cap with Hamming") plt.xlim([0, max_msg_len + 1]) title = 'Modified Posterior Matching Scheme: Pe={}, Px={}, '.format(Pe, Px) plt.title("{}size={}, hamming({},{})".format(title, sample_size, hblk_len, hmsg_len)) plt.xlabel("Message length") plt.ylabel("Transmission rate") plt.legend(loc='lower right') figname = "mpms_len_tranx_rate_({},{})_{}.png".format( hblk_len, hmsg_len, cmt) plt.savefig(os.path.join("graph", figname)) plt.show()
thread_data.__dict__["a"] = a html = a.get("https://person.zju.edu.cn/" + item[3], result=False) uid = html.split("getQRcode.php?uid=", 2)[1].split("&", 2)[0] item.append(uid) return item def handler(meta, item): meta["fp"].write("\t".join([str(i) for i in item]) + "\n") if __name__ == "__main__": #print(sign(1579490640000, "/api/front/psons/search", {"size": 12, "page":0, "lang": "cn"})) meta = {"fp": open("personzju.txt", "w", encoding="utf-8")} m = MPMS(worker, handler, 2, 2, meta=meta) m.start() for t in get("/api/front/psons/search", { "size": 10000, "page": 0, "lang": "cn" }, cache=True)["data"]["content"]: #tprint(t["cn_name"], t["college_name"], t["work_title"], t["mapping_name"], t["access_count"]) m.put([ t["cn_name"], t["college_name"], t["work_title"], t["mapping_name"], t["access_count"] ]) while len(m) > 10: myprint("Remaning " + str(len(m)))