def getIpact(addr, month): data = readipdt.readIpdt(addr) daofmo = calendar.monthrange(2018, month)[1] for key, value in data.items(): print key, value if len(value): #创建活动记录保存目录 savepath = '/home/lzhang/SRTP_DNS/ipact/' + key mkdir.mkdir(savepath) for i in range(daofmo): #通过url抓取Ip活动记录 url = "http://211.65.197.210:8080/IPCIS/activityDatabase?IpSets=" + value[ 0] + ":32&TableName=2018-" + str(month) + "-" + str( i) + "&Mode=2" response = urllib2.urlopen(url) html = response.read() mystr = html.decode("utf8") response.close() if (mystr != "{}"): #将查到的ip活动记录写入到文件夹 file = open( savepath + '/' + str(i) + 'of' + str(month) + '.txt', 'w') file.write(mystr) file.close()
def getStackInfo(que, i, sta_codes, ah_stas, sta_names): try: print('***********正在计算“' + sta_names[i] + '”强度中***********') obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[i]) rs_en = obj.getDataByBAS()[0] rs_ah = obj.getDataByCLS(ah_sta=ah_stas[i])[0] # rs_ah=obj.getDataByCLS(ah_sta=ah_stas[i],isEndAhCount=True)[0] # return # print(rs_en) if rs_ah is not False and rs_en is not False: #确保目录存在 mkdir('./strengths/' + sta_names[i]) write = pd.ExcelWriter(r'./strengths/' + sta_names[i] + '/' + sta_data_date.split(' ')[0] + '强度数据统计.xlsx') rs_ah.to_excel(write, '簇累计充放容量数据', index=False) rs_en.to_excel(write, '堆累计充放电量数据', index=False) write.save() print('√√√√√√√√√√√√ data is saved! √√√√√√√√√√√√') else: print('data not need to save!') except: print(sta_names[i] + '‘s data get meet error!') traceback.print_exc() print(que.get()) que.task_done()
def getStackInfo(que, i, sta_codes, ah_stas, sta_names, sta_data_date, end_data_date): global need_data print('***********正在计算“' + sta_names[i] + '”强度中***********') obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[i]) rs_en = obj.getDataByBAS()[0] # rs_ah=[] # rs_ah=obj.getDataByCLS(ah_sta=ah_stas[i])[0] # rs_ah=obj.getDataByCLS(ah_sta=ah_stas[i],isEndAhCount=True)[0] # if rs_en is not False: # mkdir('./strengths/'+sta_names[i]) # write=pd.ExcelWriter(r'./strengths/'+sta_names[i]+'/'+sta_data_date.split(' ')[0]+'强度数据统计.xlsx') # rs_en.to_excel(write,'堆累计充放电量数据',index=False) # write.save() # print('√√√√√√√√√√√√ data is saved! √√√√√√√√√√√√') # print(rs_en) if rs_en is not False: # if rs_ah is not False and rs_en is not False and rs_en.ix[0,5]>0: #确保目录存在 mkdir('./201906_product_files/' + sta_names[i]) write = pd.ExcelWriter(r'./201906_product_files/' + sta_names[i] + '/' + sta_data_date.split(' ')[0] + '强度数据统计.xlsx') # rs_ah.to_excel(write,'簇累计充放容量数据',index=False) rs_en.to_excel(write, '堆累计充放电量数据', index=False) write.save() print('√√√√√√√√√√√√ data is saved! √√√√√√√√√√√√') else: print('data not need to save!') print(que.get()) que.task_done()
def __init__(self, image_h, image_v, filename, ppi=401, gl=(0, 128, 255)): self.image_h = image_h self.image_v = image_v self.ppi = ppi self.gl = gl self.filename = filename mkdir(filename)
def scandirs(self): for x,y,z in os.walk("files"): if x not in self.dirs: self.dirs.append(x) try: sock = socket.socket() sock.connect((self.host, self.port)) mkdir.mkdir(sock, x, self.username, self.password) print "{0} Outgoing mkdir {1}".format(datetime.datetime.now(), x) except Exception, e: self.dirs.remove(x) # Remove it so that it is tried again later time.sleep(0.5)
def getStackInfo(i,sta_codes,ah_stas,sta_names): print('***********正在计算“'+sta_names[i]+'”强度中***********') obj=getStaWork(sta_data_date, end_data_date,sta_code=sta_codes[i]) rs_en=obj.getDataByBAS() rs_ah=obj.getDataByCLS(ah_sta=ah_stas[i]) if rs_ah is not False and rs_en is not False and rs_en.ix[0,2]>0: #确保目录存在 mkdir('./strengths/'+sta_names[i]) write=pd.ExcelWriter(r'./strengths/'+sta_names[i]+'/'+sta_data_date.split(' ')[0]+'强度数据统计.xlsx') rs_ah.to_excel(write,'簇累计充放容量数据') rs_en.to_excel(write,'堆累计充放电量数据') write.save() print('√√√√√√√√√√√√ data is saved! √√√√√√√√√√√√')
def starter(): mkdir.mkdir("./cogs") for filename in os.listdir('./cogs'): # on vérifie que le fichier est bien l'extension python if filename.endswith('.py'): cog = filename[:-3] try: print(f'Chargement du cog : \n {cog}') client.load_extension(f'cogs.{cog}') except: print(f'Rechargement du cog : \n {cog}') client.unload_extension(f'cogs.{filename[:-3]}') client.load_extension(f'cogs.{filename[:-3]}')
def scandirs(self): for x, y, z in os.walk("files"): if x not in self.dirs: self.dirs.append(x) try: sock = socket.socket() sock.connect((self.host, self.port)) mkdir.mkdir(sock, x, self.username, self.password) #print "{0} Outgoing mkdir {1}".format(datetime.datetime.now(), x) self.syncingIndicator() except Exception, e: self.dirs.remove( x) # Remove it so that it is tried again later
def write(path, content): """write content to file. Creates directory if it doesn't exist""" fullpath = os.path.abspath(os.path.expanduser(path)) content = _convert(content) mkdir.mkdir(os.path.dirname(fullpath)) try: unicode() if isinstance(content, unicode): content = content.encode("utf-8") open(fullpath, "w").write(content) except NameError: """NameError: name 'unicode' is not defined""" open(fullpath, "w", encoding="utf-8").write(content) return fullpath
def getStackInfo(i, sta_codes, sta_names, sta_data_date, end_data_date): obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[i]) #6-3-3 3pack sbox=5,ebox=6,sba=2,eba=3,scl=2,ecl=3,sp=2,ep=3 rs = obj.getCellDataByPack() if len(rs) > 0: df_rs = pd.DataFrame(rs) df_rs = pd.concat( [pd.DataFrame([[sta_data_date, end_data_date]]), df_rs]) df_rs.columns = ['描述', '电压'] mkdir('./BMUQS/') df_rs.to_excel('./BMUQS/' + sta_data_date.split(' ')[0] + sta_names[i] + '.xlsx', index=False) print('发现如下问题[描述,12s电压],已存入BMUQS目录下') else: print('所选时间段内没有发现任何问题')
class Client: def __init__(self): self.host = "" self.port = 5555 self.dirs = [] self.files = {} self.username = "******" self.password = "******" self.usercmds = {"download": download.download} self.syncing = False def syncingIndicator(self): if self.syncing == False: self.syncing = True print "Syncing.", else: print ".", def run(self): thread.start_new_thread(self.shell, ()) while True: self.syncing = False self.scandirs() self.scanfiles() if self.syncing == True: print "\n Finished updating files! \n" time.sleep(3) def scandirs(self): for x, y, z in os.walk("files"): if x not in self.dirs: self.dirs.append(x) try: sock = socket.socket() sock.connect((self.host, self.port)) mkdir.mkdir(sock, x, self.username, self.password) #print "{0} Outgoing mkdir {1}".format(datetime.datetime.now(), x) self.syncingIndicator() except Exception, e: self.dirs.remove( x) # Remove it so that it is tried again later #time.sleep(0.5) for x, y, z in os.walk("files"): for b in y: x = x + "/" + b if x not in self.dirs: self.dirs.append(x) try: sock = socket.socket() sock.connect((self.host, self.port)) mkdir.mkdir(sock, x, self.username, self.password) #print "{0} Outgoing mkdir {1}".format(datetime.datetime.now(), x) self.syncingIndicator() except Exception, e: self.dirs.remove( x) # Remove it so that it is tried again later
def gen_problems(srm_num, formatted_file_names, urls, srm_dir): div = 1 level = 1 div_dir = "%s/d%d" %(srm_dir, div) for formatted_file_name in formatted_file_names: mkdir(div_dir) level_dir = "%s/l%d" %(div_dir, level) mkdir(level_dir) try: gen_problem(level_dir, formatted_file_name) gen_url(level_dir, urls[div][level]) except: print "failed to generate {}".format(level_dir) print urls[div][level] traceback.print_exc() level += 1 if 4 == level: div += 1 level = 1 div_dir = "%s/d%d" %(srm_dir, div)
async def envoyer_un_texte_dans_un_fichier(self, ctx, user: discord.Member, *, message): UserId = str(ctx.message.author.id) premium = reader("/home/rix56/pycharm/BOT/jsondata/premium.json") if premium[UserId] == ":white_check_mark:": if user.id == 700046053029838878: await ctx.send("Vous ne pouvez pas faire ça sur moi !") return if not ctx.message.guild: await ctx.send( "Vous ne pouvez pas faire ça dans les messages privés.") else: await ctx.send( f"{ctx.message.author.name}, le message a été envoyé à {user.mention}" ) await user.send( f"Voici un message pour vous...Il est en train d'être envoyé..." ) mkdir.mkdir("tmp") with open("/home/rix56/pycharm/BOT/tmp/tmp.txt", "x") as fichier: fichier.write(message) fichier.close() await asyncio.sleep(0.2) await user.send( file=discord.File("/home/rix56/pycharm/BOT/tmp/tmp.txt", filename="message.txt")) os.remove("/home/rix56/pycharm/BOT/tmp/tmp.txt") os.removedirs("/home/rix56/pycharm/BOT/tmp") else: error1_embed = discord.Embed(title='Erreur', color=0xE74C3C) error1_embed.set_footer( text=f'En réponse à {ctx.message.author.name} - Créé par Rix56', icon_url=ctx.message.author.avatar_url) error1_embed.add_field( name="Erreur:", value= "Cette commande est réservé aux gens ayant un compte **__Premium.__**", inline=False) await ctx.send(embed=error1_embed)
with open('None_recommend_list', 'a') as f3: f3.write(k + ':' + v + '\n') if __name__ == '__main__': start_time = time.time() # 1. 获取推荐列表(人名) path1 = '/Users/zhangwei/Desktop/sina_job/recommend/oid_name_type/20180114.txt' # 每次运行修改 path2 = './res_container/res14' # 每次运行修改 # {starname:url} full = [] full.append(star.get_mingxingurl_dict(path1)) full.append(star.get_yinyueurl_dict(path1)) # star_url = star.get_mingxingurl_dict(path) # 明星 # star_url = star.get_yinyueurl_dict(path) # 音乐 # star_url = star.getmingxingurl_test() # 测试用例 for i in full: recomend(i, path2) # 2. 获取oid推荐列表 path3 = mkdir.mkdir('./recommend_container/recommend8/') # 每次运行修改 print(type(path3)) name_oid.name_oid(path1, path2, path3) # 3. 增加反向关系,得到最终的列表 add.ad_re_relation(path3) # 4.统计结果 count.count(path3) end_time = time.time() print('程序运行了:' + str((end_time - start_time) / 60) + '分钟')
'Reviews': '0', 'Voters': {} } def printa(self): print 'ID', self.data['ID'] print 'Name', self.data['Name'] print 'Text', self.data['Text'] print 'Publish', self.data['Publish'] print 'Edited', self.data['Edited'] print 'Reviews', self.data['Reviews'] print 'done' question_id = raw_input('input question id:') driver = login_hahaha.login() url = 'https://www.zhihu.com/question/' + question_id driver.get(url) 'driver=get_question.process_question_page(driver)' question = Question() question = get_question.get_question(driver, question_id) print ' ' question = get_answers.get_answers(driver, question, question_id) for i in question.data['Answers']: answer_url = url + '/answer/' + question.data['Answers'][i].data['ID'] driver.get(answer_url) i = get_voters.get_voter_id(driver, question.data['Answers'][i]) ''' mkdir.mkdir(question) '''
"Reviews": "0", "Voters": {}, } def printa(self): print "ID", self.data["ID"] print "Name", self.data["Name"] print "Text", self.data["Text"] print "Publish", self.data["Publish"] print "Edited", self.data["Edited"] print "Reviews", self.data["Reviews"] print "done" question_id = raw_input("input question id:") driver = login_hahaha.login() url = "https://www.zhihu.com/question/" + question_id driver.get(url) "driver=get_question.process_question_page(driver)" question = Question() question = get_question.get_question(driver, question_id) print " " question = get_answers.get_answers(driver, question, question_id) for i in question.data["Answers"]: answer_url = url + "/answer/" + question.data["Answers"][i].data["ID"] driver.get(answer_url) i = get_voters.get_voter_id(driver, question.data["Answers"][i]) """ mkdir.mkdir(question) """
下一步计划,插入多线程函数,可选模式,以簇或堆或箱或电站进行多线程 @author: 逻辑的使命 ''' from getAPI import getStaWork import pandas as pd from mkdir import mkdir sta_data_date = "2019-04-19 00:00:00" end_data_date = "2019-04-19 12:00:00" if __name__ == '__main__': sta_config = pd.read_excel('./assets/sta_cl_ah_config.xlsx') sta_codes, sta_names = sta_config['code'], sta_config['name'] sta_i = 1 #云河编号 obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[sta_i]) mkdir('./curs_datas/' + sta_names[sta_i]) sbox, ebox = 1, 2 sba, eba = 0, 2 scl, ecl = 0, 3 mode = ['getDateTime', 'getCurs'] rs = obj.getDataByCLS(quickly=False, sbox=sbox, ebox=ebox, sba=sba, eba=eba, scl=scl, ecl=ecl, mode=mode) if len(rs) > 1:
def Main(Nowurl,NowNumber,Sleeptime): #设置休眠时间 Sleeptime = Sleeptime #统计抓取页面总数 NowNumber = MainRunNumber(NowNumber) print("已抓取页面数:"+str(NowNumber)) #构造虚拟浏览器参数 header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'} strhtml = requests.get(Nowurl,headers = header) #Get Html Data #print(strhtml.text) #解析网页,获得介绍信息 # temp_stehtml = str(strhtml.text) new_strhtml = strhtml.text soup = BeautifulSoup(new_strhtml,'lxml') #使用lxml解析文档 #调用Get_pid 获取页面唯一pid try: pidnumber = Get_pid(soup) except Exception: print("获取失败,请检查输入链接是否是论坛帖子链接!") return -1 data = soup.select('#postmessage_'+pidnumber) #定位到资源信息界面 line_data = str(data).replace('<br/>','\n') #获取资源名字 name = soup = BeautifulSoup(new_strhtml,'lxml') name = soup.select('#thread_subject') #定位到资源名字 name = str(name).replace('<span id="thread_subject">','').replace('</span>','').replace('/','').replace('*','').replace('?','').replace('<','').replace('>','').replace(':','') #去掉Windows文件夹命名不允许的符号 bug:\ 未检查! # print(name) #新建目录单独保存 mkdirPatch = "Download"+"\\"+name+"\\" mkdir(mkdirPatch) #调用函数新教目录 #获取图片介绍,并保存为文件 # imgid = Get_imgid(soup,"#postmessage_"+pidnumber) #获取定位的图片容器id # get_img = soup.select(imgid) #打开文件保存资源详细信息 f = open(mkdirPatch+name+'.txt','w',encoding='utf-8') f.write(str(data).replace('<br/>','')) f.close() #关闭文件 #获取图片链接并下载 Img_number = 1 ImgUrl = Get_Imgurl(soup) i = 0 while (i < len(ImgUrl)): try: Save_img(ImgUrl[i],Img_number,mkdirPatch) except OSError: print("图片下载失败!") else: print("第"+str(Img_number)+"张图片正在下载... [共"+str(len(ImgUrl))+"张]") i+=1 Img_number+=1 #下载种子文件 Downloadurl = DownloadTorrent(soup) #获取种子下载页面 #print(len(Downloadurl)) if (len(Downloadurl) == 1): #判断 是否存在1080p种子 DownTorrenturl = "http://thz5.net/forum.php?mod=attachment&" + Downloadurl[0] r = requests.get(DownTorrenturl) with open(mkdirPatch+name+' 720p'+'.torrent', "wb") as code: code.write(r.content) print("种子下载完成!") elif (len(Downloadurl)>1): TorrentName = ["720p","1080p","2k","4k"] i = 0 while (i < len(Downloadurl)): # 下载720p资源 DownTorrenturl = "http://thz5.net/forum.php?mod=attachment&" + Downloadurl[i] r = requests.get(DownTorrenturl) # print("正在下载种子文件!") with open(mkdirPatch + name + TorrentName[i] + ".torrent", "wb") as code: code.write(r.content) i+=1 print(str(len(Downloadurl))+"个种子下载完成!") else: print("未找到种子文件!") #获取下一页url,递归调用主函数 url = Get_nextPage(soup,pidnumber) time.sleep(3) #休眠3秒 print("现在休眠"+str(Sleeptime)+"秒") return url #返回下一页面url
#file_def = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt20-50_PU0_pt0_new_eff_4simhits.root" file_def = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt10-50_PU0_pt0_new_eff_4simhits_etalow21.root" #file_def = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt10-50_PU0_pt0_new_eff_4simhits_etahigh21.root" ## input files with various GEM,CSC bending angle cuts file_def = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt0_new_eff.root" file_gem5 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt5_new_eff.root" file_gem10 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt10_new_eff.root" file_gem15 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt15_new_eff.root" file_gem20 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt20_new_eff.root" file_gem30 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt30_new_eff.root" file_gem40 = "hp_dimu_CMSSW_6_2_0_SLHC1_upgrade2019_pu000_w3_gem98_pt2-50_PU0_pt40_new_eff.root" ## output directory definition reuseOutputDirectory = True if not reuseOutputDirectory: output_dir = mkdir("myTest") else: output_dir = "myTest_20140107_044508/" output_dir = "myTest_20140108_233926/" output_dir = "myTest_20140110_230540/" ## default name of the directory dir_name = 'GEMCSCTriggerEfficiency' ## plots that require a single input file eff_pth_tf_3st1a() eff_pth_tf() eff_pt_tf() do_h_pt_after_tfcand_ok_plus_pt10() eff_pt_tf_q()
def deal_LPresult(sr_path): savepath=mkdir.mkdir(sr_path) print savepath wp.walkpath(sr_path,savepath)
#Get the cattle number and the picture taking date aa = pathName.split('/') #print(aa[4]) #print(aa[5]) print(aa) bb = aa[-1].split('_') #The date folder always be the last one print('Check!!!', bb) #print(bb[1], ' ', bb[2], ' ', bb[3]) date = bb[1] + '/' + bb[2] + '/' + bb[3] #cattle_num = aa[4] #print(cattles[llx]) cattle_num = cattles[llx] #Set the destination folder dstpathName = '/Volumes/Transcend/Data' + pathName[8:] + '/result' mkdir.mkdir(dstpathName) #Start a excel book book = xlwt.Workbook(encoding='utf-8') sheet1 = book.add_sheet(u'Sheet1', cell_overwrite_ok=True) sheet1.write(0,0,'ファイル名:読み込んだ時のセッション名をファイル名とする') sheet1.write(1,4,'牛ビタミンA推定説明変数の画像とのCSVデータ関連付け(各画像データから特徴量の抽出) 画像読み取りソフト活用による特徴量のエクセル取込みイメージ') sheet1.write(2,0,'画像取込み確認 (チェック) Confirmation of Input picture numerical ananysis') sheet1.write(2,1,' ') #Waiting for check sheet1.write(2,4,' ') sheet1.write(2,5,'→画像から特徴量特定して挿入する箇所') sheet1.write(2,12,'R,G,Bの各占有率を以下に定義する。特徴量から自動計算する。') sheet1.write(3,0,' 牛番号 Cattle No 首輪画像から識別') sheet1.write(3,1,cattle_num) sheet1.write(3,4,'注)牛の個体識別タグと牛番号の対応が必要') sheet1.write(3,12,'r=R/(R+G+B)')
def generate_xml(file_root, file_info, obj): import mkdir from lxml.etree import Element, SubElement, tostring import pprint from xml.dom.minidom import parseString import os try: node_root = Element('annotation') node_folder = SubElement(node_root, 'folder') node_folder.text = file_info[0] except: print "error" node_filename = SubElement(node_root, 'filename') node_filename.text = file_info[1] node_size = SubElement(node_root, 'size') node_width = SubElement(node_size, 'width') node_width.text = '640' node_height = SubElement(node_size, 'height') node_height.text = '480' node_depth = SubElement(node_size, 'depth') node_depth.text = '3' for obj_i in obj: print obj_i node_object = SubElement(node_root, 'object') node_name = SubElement(node_object, 'name') #node_name.text = 'mouse' node_name.text = 'person' node_name = SubElement(node_object, 'difficult') # node_name.text = 'mouse' node_name.text = '0' node_bndbox = SubElement(node_object, 'bndbox') node_xmin = SubElement(node_bndbox, 'xmin') #node_xmin.text = '99' node_xmin.text = obj_i['xmin'] node_ymin = SubElement(node_bndbox, 'ymin') #node_ymin.text = '358' node_ymin.text = obj_i['ymin'] node_xmax = SubElement(node_bndbox, 'xmax') #node_xmax.text = '135' node_xmax.text = obj_i['xmax'] node_ymax = SubElement(node_bndbox, 'ymax') #node_ymax.text = '375' node_ymax.text = obj_i['ymax'] xml = tostring(node_root, pretty_print=True) #格式化显示,该换行的换行 dom = parseString(xml) #file_root = '/home/user/Downloads/caltech_data_set/data_train/' # file_root = '/home/user/Downloads/caltech_data_set/30stepsize1_train/' file_name = file_root + file_info[0] mkdir.mkdir(file_name) fw = open(file_name + "/" + file_info[1].split('.')[0] + ".xml", 'w') fw.write(xml) print "xml _ ok" fw.close()
def getStackInfo(que, i, sta_codes, sta_names, sta_data_date, end_data_date, mode): sbox, ebox = 0, -1 sba, eba = 0, -1 scl, ecl = 0, -1 sp, ep = 0, -1 if len(mode) == 1 and 'sta' in mode: #i就表示电站id pass #mode=[id,'box'] elif len(mode) == 2 and 'box' in mode: sbox, ebox = i, i + 1 i = mode[0] elif len(mode) == 3 and 'ba' in mode: sba, eba = i, i + 1 i = mode[0] sbox, ebox = mode[1], mode[1] + 1 elif len(mode) == 4 and 'cl' in mode: scl, ecl = i, i + 1 i = mode[0] sbox, ebox = mode[1], mode[1] + 1 sba, eba = mode[2], mode[2] + 1 elif len(mode) == 5 and 'pack' in mode: sp, ep = i, i + 1 i = mode[0] sbox, ebox = mode[1], mode[1] + 1 sba, eba = mode[2], mode[2] + 1 scl, ecl = mode[3], mode[3] + 1 obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[i]) #6-3-3 3pack sbox=5,ebox=6,sba=2,eba=3,scl=2,ecl=3,sp=2,ep=3 rs = obj.getCellDataByPack(sbox=sbox, ebox=ebox, sba=sba, eba=eba, scl=scl, ecl=ecl, sp=sp, ep=ep) if len(rs) > 0: rs = rs[0] df_rs = pd.DataFrame(rs) df_rs = pd.concat( [pd.DataFrame([[sta_data_date, end_data_date]]), df_rs]) df_rs.columns = ['描述', '电压'] mkdir('./BMUQS/太阳宫/') fileH = './BMUQS/太阳宫/' + sta_data_date.split(' ')[0] + sta_names[i] if 'sta' in mode: fileN = fileH + '.xlsx' elif 'box' in mode: # fileN=fileH+str(mode[0]+1)+'箱.xlsx' fileN = fileH + str(sbox + 1) + '箱.xlsx' elif 'ba' in mode: # fileN=fileH+str(mode[0]+1)+'箱'+str(mode[1]+1)+'堆.xlsx' fileN = fileH + str(mode[1] + 1) + '箱' + str(sba + 1) + '堆.xlsx' elif 'cl' in mode: # fileN=fileH+str(mode[0]+1)+'箱'+str(mode[1]+1)+'堆'+str(mode[2]+1)+'簇.xlsx' fileN = fileH + str(mode[1] + 1) + '箱' + str( mode[2] + 1) + '堆' + str(scl + 1) + '簇.xlsx' elif 'pack' in mode: # fileN=fileH+str(mode[0]+1)+'箱'+str(mode[1]+1)+'堆'+str(mode[2]+1)+'簇'+str(mode[3]+1)+'包.xlsx' fileN = fileH + str(mode[1] + 1) + '箱' + str( mode[2] + 1) + '堆' + str(mode[3] + 1) + '簇' + str(sp + 1) + '包.xlsx' else: print('添加更多模式...') df_rs.to_excel(fileN, index=False) print('发现如下问题[描述,12s电压],已存入BMUQS目录下') else: print('所选时间段内没有发现任何问题') print(que.get()) que.task_done()
def makeFolder(path): """Makes a folder using mkdir.mkdir(path)""" mkdir.mkdir(path)
import pandas as pd import numpy as np import matplotlib.pyplot as plt from collections import Counter labelfile = pd.read_csv('./EPIC_train_action_labels.csv') startarray = np.array(labelfile.start_frame) stoparray = np.array(labelfile.stop_frame) videoid = labelfile.video_id.tolist() #k=np.concatenate((startarray,stoparray),axis = 1) num = 0 tensor_x = torch.Tensor() #%% path = './dynamic_image' mkdir.mkdir(path) folder = sorted( glob.glob( '/data/scene_understanding/EPIC_KITCHENS_2018/epic/data/raw/rgb/*')) for i in folder: subfolder = sorted(glob.glob(i + '/*')) for j in subfolder: frames = sorted(glob.glob(j + '/*.jpg')) for k in range(videoid.count(j.split("/")[-1])): start = startarray[num] stop = stoparray[num]
f.write(data+'\n') else: with open('Null_recommend_list','a', encoding='UTF-8') as f3: f3.write(k+':'+v+'\n') if __name__=='__main__': start_time = time.time() # 创建写html的路径,以时间为单位 。D:\spider_html\2018_01_05 star_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\")+str("star")+str("\\") star_movie_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\") + str("movie") + str("\\") star_show_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\") + str("show") + str("\\") path1 = mk.mkdir(star_html) path2 = mk.mkdir(star_movie_html) path3 = mk.mkdir(star_show_html) # 创建url列表 path = '.\\oid_name_type\\20180107.txt' path_res = '.\\res_container\\res13' path_recommend = mk.mkdir('.\\recommend_container\\recommend8\\') # {starname:url} full = [] full.append(star.get_mingxingurl_dict(path)) full.append(star.get_yinyueurl_dict(path)) for i in full: recomend(i, path1, path2, path3,path_res)
def getStackInfo(que, sta_i, sta_codes, sta_names): obj = getStaWork(sta_data_date, end_data_date, sta_code=sta_codes[sta_i]) mkdir('./temp_datas/' + sta_names[sta_i]) #默认 0 -1 sbox, ebox = 0, -1 sba, eba = 0, -1 scl, ecl = 0, -1 sp, ep = 0, -1 #默认以包,簇,堆 mode = [''] rs = obj.getCellDataByPack(quickly=False, sbox=sbox, ebox=ebox, sba=sba, eba=eba, scl=scl, ecl=ecl, sp=sp, ep=ep, mode=mode) # rs=pd.read_excel('D:\\python_works\\ClouPy\works\\API_Class\\temp_datas\\河北宣化电厂储能电站\\2019-05-26.xlsx',sheet_name=0,index_col=0) # rs=np.array(rs) # print(rs) pack_bmu_num = obj.pack_bmu_num if ebox == -1: ebox = obj.box_num if ep == -1: ep = obj.pack_num # rs=[[[25.0, 23.0, 2.0, 24.333333333333332], [25.0, 23.0, 1.0, 24.166666666666668], [26.0, 23.0, 1.0, 25.166666666666668], [25.0, 23.0, 1.0, 24.166666666666668], [25.0, 23.0, 1.0, 24.833333333333332], [25.0, 24.0, 1.0, 25.0], [25.0, 23.0, 1.0, 24.666666666666668], [25.0, 23.0, 2.0, 24.666666666666668], [25.0, 23.0, 1.0, 24.666666666666668], [25.0, 23.0, 1.0, 24.666666666666668], [25.0, 23.0, 1.0, 24.666666666666668], [25.0, 23.0, 1.0, 24.833333333333332], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN]], [[26.0, 23.0, 2.0, 24.555555555555557], [26.0, 23.0, 2.0, 24.62962962962963], [25.0, 23.0, 2.0, 24.555555555555557], [25.0, 23.0, 2.0, 24.77777777777778], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN],[np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN], [np.NaN,np.NaN,np.NaN,np.NaN]], [[26.0, 23.0, 2.0, 24.970807875084862], [np.NaN,np.NaN,np.NaN,np.NaN]]] print([eba, ep, pack_bmu_num]) print(rs) # obj.cl_num_list=[4,5] print(obj.cl_num_list) if len(rs) == len(mode): write = pd.ExcelWriter("./temp_datas/" + sta_names[sta_i] + '/' + sta_data_date.split(' ')[0] + ".xlsx") for i in range(len(rs)): #对数据格式化处理,便于画图 formatRs, ys, xs, fxs = [], [], [], [] for box in range(sbox + 1, ebox + 1): boxformatRs = [] #每一箱 Flag_ba = False if eba == -1: eba = obj.ba_num_list[box - 1 - sbox] print('%s-------:' % eba) Flag_ba = True for ba in range(sba + 1, eba + 1): xba = str(ba) + '堆' A = sum(obj.ba_num_list[0:box - 1 - sbox] ) #(box-sbox-1)*(eba-sba)#已经循环过的堆数 if mode[i] == 'getDTempsByBA': try: boxformatRs.append(rs[i][(ba - sba - 1) + A]) fxs.append(str(box) + '箱-' + xba) except: boxformatRs.append( [np.NaN, np.NaN, np.NaN, np.NaN]) if box == sbox + 1: xs.append(xba) else: if ecl == -1: ecl = obj.cl_num_list[ba - 1 - sba + A] #(box-1-sbox)*(eba-sba) print(ecl) FLag = True for cl in range(scl + 1, ecl + 1): xcl = xba + '-' + str(cl) # B=(A+(ba-sba-1))*(ecl-scl) B = sum( obj.cl_num_list[0:ba - 1 - sba + A]) #(box-1-sbox)*(eba-sba) if mode[i] == 'getDTempsByCL': try: boxformatRs.append(rs[i][(cl - scl - 1) + B]) fxs.append(str(box) + '箱-' + xcl) except: boxformatRs.append( [np.NaN, np.NaN, np.NaN, np.NaN]) if box == sbox + 1: xs.append(xcl) else: for p in range(sp + 1, ep + 1): xp = xcl + '-Pack' + str(p) C = (B + (cl - scl - 1)) * (ep - sp) if mode[i] == 'getDTempsByPack': try: fxs.append(str(box) + '箱-' + xp) boxformatRs.append( rs[i][(p - sp - 1) + C]) except: boxformatRs.append([ np.NaN, np.NaN, np.NaN, np.NaN ]) if box == sbox + 1: xs.append(xp) else: for bmu in range(1, pack_bmu_num + 1): xb = xp + '-BMU' + str(bmu) D = (C + (p - sp - 1)) * pack_bmu_num if 'getDTempsByBMU' in mode: try: fxs.append( str(box) + '箱-' + xb) boxformatRs.append( rs[i][(bmu - 1) + D]) except: boxformatRs.append([ np.NaN, np.NaN, np.NaN, np.NaN ]) if box == sbox + 1: xs.append(xb) if FLag: ecl = -1 if Flag_ba: eba = -1 formatRs.append(boxformatRs) ys.append('第%s箱' % box) if len(xs) > 0: xs_split = xs[0].split('-') if len(xs_split) == 1: xlabel = '堆' txt = '各堆' elif len(xs_split) == 2: xlabel = '堆-簇' txt = '各簇' elif len(xs_split) == 3: xlabel = '堆-簇-包' txt = '各包' elif len(xs_split) == 4: xlabel = '堆-簇-包-BMU' txt = '各BMU' #保留原格式数据到表格 df_r = pd.DataFrame(rs[i]) print(df_r) df_r.columns = ['最高温度/℃', '最低温度/℃', '最大温差/℃', '最大平均温度/℃'] print(fxs) df_r.index = fxs df_r.to_excel(write, sta_names[sta_i] + txt + "温度数据", index=True) #画图最高温度与最大温差,最大平均与最低温度不画 maxTs, max_dif_Ts = [], [] for it_box in formatRs: maxTs.append([it[0] for it in it_box]) max_dif_Ts.append([it[2] for it in it_box]) df = pd.DataFrame(maxTs) # print(df) df.index = ys df.columns = xs f, (ax1, ax2) = plt.subplots(figsize=(10, 8), nrows=2) cmap = sns.cubehelix_palette(start=1.5, rot=3, gamma=0.8, as_cmap=True) try: sns.heatmap(df, linewidths=0.05, ax=ax1, annot=True, annot_kws={'weight': 'bold'}, vmax=max(np.max(df)), vmin=min(np.min(df)), cmap=cmap) ax1.set_title( sta_data_date.split(' ')[0] + sta_names[sta_i] + txt + '最高温度') ax1.set_xlabel('') ax1.set_xticklabels([]) #设置x轴图例为空值 ax1.set_ylabel('箱') except: print('图像绘制失败') df = pd.DataFrame(max_dif_Ts) df.index = ys df.columns = xs try: sns.heatmap(df, ax=ax2, cmap=sns.color_palette("Reds", 7), cbar=False, annot=True, mask=df < max(np.max(df)), annot_kws={ "weight": "bold", 'size': 25 }) sns.heatmap(df, linewidths=0.05, vmax=max(np.max(df)), vmin=min(np.min(df)), cmap=sns.color_palette("Reds", 7), ax=ax2, annot=True, mask=df >= max(np.max(df))) # rainbow为 matplotlib 的colormap名称 ax2.set_title( sta_data_date.split(' ')[0] + sta_names[sta_i] + txt + '最大温差') ax2.set_xlabel(xlabel) ax2.set_ylabel('箱') plt.savefig("./temp_datas/" + sta_names[sta_i] + '/' + sta_data_date.split(' ')[0] + txt + '最高温度与最大温差.png') except: print('图像绘制失败') write.save() print(que.get()) que.task_done()
async def book(self, ctx, choice=None, *, data=None): booksdb = reader("/home/rix56/pycharm/BOT/jsondata/books.json") if choice is None: await ctx.send( "Séléctionnez un de ces modes : read(lire dans un bouquin à l'aide de son id), write(pour écrire un bouquin, vous obtiendrez son id), download(pour télécharger un bouquin à l'aide de son id, il sera envoyé sur le serveur)." ) return if choice == "write": if data is None: await ctx.send("Vous n'avez pas mis quoi écrire !") return id = str(os.urandom(random.randint(4, 12))) for x in ["b", "'", '"', "/", '\\']: id = id.split(x) id = ''.join(id) member = ctx.message.author booksdb[id] = { "content": data, "author": f"{member}", "id": f"{id}" } with open("/home/rix56/pycharm/BOT/jsondata/books.json", 'w', encoding='utf8') as jsonFile: json.dump(booksdb, jsonFile, indent=4) await ctx.send( f"Vous avez écrit dans un bouquin ! ID du bouquin : {id}") if choice == "read": if data is None: await ctx.send("Vous n'avez pas mis une ID !") return id = data listkeys = booksdb.keys() if id not in listkeys: await ctx.send("ID non trouvée !") return embed = discord.Embed(title=f"Lecteur de livre", color=0xF1C40F) embed.add_field(name="ID du bouquin", value=booksdb[id]["id"]) embed.add_field(name="Auteur du bouquin", value=booksdb[id]["author"]) embed.add_field(name="Contenu du bouquin", value=booksdb[id]["content"]) embed.set_footer( text=f'En réponse à {ctx.message.author.name} - Créé par Rix56', icon_url=ctx.message.author.avatar_url) await ctx.send(embed=embed) if choice == "download": if data is None: await ctx.send("Vous n'avez pas mis une ID !") return id = data listkeys = booksdb.keys() if id not in listkeys: await ctx.send("ID non trouvée !") return mkdir.mkdir("tmp-book") with open("/home/rix56/pycharm/BOT/tmp-book/tmp.txt", "x") as fichier: fichier.write( f"ID du bouquin : {booksdb[id]['id']}\n\nAuteur du bouquin : {booksdb[id]['author']}\n\nContenu du bouquin : {booksdb[id]['content']}\n\n\nFichier généré par The Fox Bot Books." ) fichier.close() await asyncio.sleep(0.2) await ctx.send("Le livre est en train d'être envoyé...") await ctx.send( file=discord.File("/home/rix56/pycharm/BOT/tmp-book/tmp.txt", filename="book.txt")) os.remove("/home/rix56/pycharm/BOT/tmp-book/tmp.txt") os.removedirs("/home/rix56/pycharm/BOT/tmp-book")
import urllib.request import requests import wget as wget import urllib from bs4 import BeautifulSoup import re import time from mkdir import mkdir lines = open('csur00.txt').readlines() issue_lines = open('csur01.txt').readlines() mkpath = r".\cusr" mkdir(mkpath) headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36', } para = { 'type': 'issue', 'sellOnline': '0', 'parent_id': 'J401', 'parent_type': 'periodical', 'title': 'Journal of the ACM (JACM) - Distributed Computing, Algorithms and Data Structures, Algorithms, Scientific Computing, Derandomizing Algorithms, Online Algorithms and Algorithmic Information Theory', 'toctitle': 'Journal of the ACM (JACM)', 'tocissue_date': 'Volume 52 Issue 6, November 2019', 'notoc': '0', 'usebody': 'tabbody', 'tocnext_id': '', 'tocnext_str': '', 'tocprev_id': '3362097', 'tocprev_str': 'Volume 52 Issue 5, October 2019',
def ENCDECTrainer(df): param = constants.enc_dec_param model = models.ENDELSTM( dict(features=param['n_features'], output_dim=param["output_dim"])) generic_train(model, df, param, "log/enddeclstm.txt", "auto_lstm.pth") def GULSTMTrainer(dataset): param = constants.gu_lstm_param model = models.GULSTM(param["input_dim"], param["hidden_dim"], param["layer_dim"], param["output_dim"]) generic_train(model, df, param, "log/gu_lstm.txt", "gu_lstm.pth") if __name__ == "__main__": mkdir.mkdir() print("Using device {} for training".format(device)) save_plots = True debug = False parser = argparse.ArgumentParser() parser.add_argument("train_mode") args = parser.parse_args() ### READ DATASET ### df = pd.read_csv('data/weatherstats_montreal_daily.csv', index_col=0, nrows=100 if debug else None) df = pre_process(df) if args.train_mode == "LSTM": print(constants.lstm_param) LSTMTrainer(df)
else: with open('Null_recommend_list', 'a', encoding='UTF-8') as f3: f3.write(k + ':' + v + '\n') if __name__ == '__main__': start_time = time.time() p = Pool(70) # 创建写html的路径,以时间为单位 。D:\spider_html\2018_01_05 star_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\")+str("star")+str("\\") star_movie_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\") + str("movie") + str("\\") star_show_html = str("D:\\spider_html\\") + time.strftime('%Y_%m_%d', time.localtime(time.time())) \ + str("\\") + str("show") + str("\\") path1 = mk.mkdir(star_html) path2 = mk.mkdir(star_movie_html) path3 = mk.mkdir(star_show_html) # 创建url列表 path = mk.get_data_file_path('.\\oid_name_type') print(path) path_res = mk.res_name_path('.\\res_container') path_recommend = mk.recommed_file_path('.\\recommend_container') # {starname:url} full = [] full.append(star.get_mingxingurl_dict(path)) full.append(star.get_yinyueurl_dict(path)) for i in full: p.apply_async(recomend, args=(
import get_stardict as star import analysis import json as js import name_topicid as name_oid import add_re_relation as add import mkdir import count import Parallel_processing.recommend_main as recommend path1 = '/Users/zhangwei/Desktop/sina_job/recommend/oid_name_type/test.txt' # 每次运行修改 path1_1 = '/Users/zhangwei/Desktop/sina_job/recommend/oid_name_type/20180114.txt' path2 = '../res_container/test' # 每次运行修改 # path3 = mkdir.mkdir('../recommend_container/recommend7/')# 每次运行修改 # path3_3 = './recommend_container/recommend7/'# 修改 path3 = mkdir.mkdir('../recommend_container/test/') # 每次运行修改 # print(type(path3)) # print(path3) full = [] full.append(star.get_mingxingurl_dict(path1)) full.append(star.get_yinyueurl_dict(path1)) # star_url = star.get_mingxingurl_dict(path) # 明星 # star_url = star.get_yinyueurl_dict(path) # 音乐 # star_url = star.getmingxingurl_test() # 测试用例 for i in full: recommend.recomend(i, path2) # path3_3 = './recommend_container/recommend7/'# 修改#注意逻辑这里是错的 name_oid.name_oid(path1_1, path2, path3) # 3. 增加反向关系,得到最终的列表 add.ad_re_relation(path3) # 4.统计结果