def _prepare_batch(batch, outputs_per_step): random.shuffle(batch) speaker_id = np.asarray([x[0] for x in batch], dtype=np.int32) mask = _prepare_targets([x[1] for x in batch], outputs_per_step) inputs = _prepare_inputs([x[2] for x in batch]) input_lengths = np.asarray([len(x[2]) for x in batch], dtype=np.int32) mel_targets = _prepare_targets([x[4] for x in batch], outputs_per_step) if (mel_targets.shape != mask.shape): print('not equal') os.exist(0) return (speaker_id, mask, inputs, input_lengths, mel_target)
def main(workingDir): #this is to downlaod all the CLU shape file in the server here outputResPath = workingDir + "outputRes//" #there should be CLU, county and state level output directory. if os.exist(outputResPath) os.mkdir(outputResPath)
def __init__(self, Title=None, IconPath=None, BackGroundColor="#d9d9d9"): # Setup The GUI self.window = tk.Tk() exitcmd = Command("exit", self.exit) self.window.geometry("450x300") self.window.minsize(120, 1) self.window.maxsize(1604, 881) self.window.resizable(1, 1) self.window.title("Developer Console") if Title: self.window.title(Title) if IconPath: if os.exist(IconPath): self.window.iconbitmap(IconPath) self.window.configure(background=BackGroundColor) self.SubmitButton = tk.Button(self.window) self.SubmitButton.place(relx=0.822, rely=0.833, height=44, width=70) self.SubmitButton.configure(activebackground="#ececec") self.SubmitButton.configure(activeforeground="#000000") self.SubmitButton.configure(background="#d9d9d9") self.SubmitButton.configure(disabledforeground="#a3a3a3") self.SubmitButton.configure(foreground="#000000") self.SubmitButton.configure(highlightbackground="#d9d9d9") self.SubmitButton.configure(highlightcolor="black") self.SubmitButton.configure(pady="0") self.SubmitButton.configure(text='''Execute''') self.SubmitButton.configure(command=self.exec) self.CommandEntry = tk.Entry(self.window) self.CommandEntry.place(relx=0.044, rely=0.867, height=30, relwidth=0.764) self.CommandEntry.configure(background="white") self.CommandEntry.configure(disabledforeground="#a3a3a3") self.CommandEntry.configure(font="TkFixedFont") self.CommandEntry.configure(foreground="#000000") self.CommandEntry.configure(insertbackground="black") self.CommandList = tk.Listbox(self.window) self.CommandList.place(relx=0.022, rely=0.033, relheight=0.773, relwidth=0.964) self.CommandList.configure(background="white") self.CommandList.configure(disabledforeground="#a3a3a3") self.CommandList.configure(font="TkFixedFont") self.CommandList.configure(foreground="#000000") self.Commands = [exitcmd] self.window.bind('<Return>', self.exec)
def run(self): while (1): if (DataCenter.POOL_SIZE_NOW < DataCenter.POOL_SIZE_LIMIT): DataCenter.POOL_SIZE_NOW += 1 DataCenter.pdfdownload(self.url, self.path) if not os.exist("problemset"): os.mkdir("problemset") shutil.move(self.path, "problemset/") DataCenter.POOL_SIZE_NOW -= 1 break else: time.sleep(1)
def _create_new_facility(facility_path): # if the folder doesn't exist then create it and add two template scripts path = os.path.join(os.path.dirname(os.path.realpath(__file__))) facility_path = path + '/' + facility_path dls_path = path + '/system_files/dls' if not os.path.exists(facility_path): os.makedirs(facility_path) for root, dirs, files in os.walk(dls_path): folder = os.path.relpath(root, dls_path) to_this_folder = os.path.join(facility_path, folder) if not os.path.exists(to_this_folder): os.makedirs(to_this_folder) for f in files: copy_this_file = os.path.join(root, f) shutil.copy(copy_this_file, to_this_folder) else: dls_sys_params = os.path.join(dls_path, 'system_parameters.yml') facility_sys_params = \ os.path.join(facility_path, 'system_parameters.yml') if not os.exist(facility_sys_params): shutil.copy(dls_sys_params, facility_path)
_size = 0 _vectors = None _stopwords = set() _cache_nearby = dict() ''' lambda fns ''' # combine similarity scores _similarity_smooth = lambda x, y, z, u: (x * y) + z - u _flat_sum_array = lambda x: np.sum(x, axis=0) # 分子 ''' tokenizer settings ''' tokenizer_dict = os.path.join(curdir, 'data', 'vocab.txt') if "SYNONYMS_WORDSEG_DICT" in ENVIRON: if os.exist(ENVIRON["SYNONYMS_WORDSEG_DICT"]): print("info: set wordseg dict with %s" % tokenizer_dict) tokenizer_dict = ENVIRON["SYNONYMS_WORDSEG_DICT"] else: print("warning: can not find dict at [%s]" % tokenizer_dict) print(">> Synonyms load wordseg dict [%s] ... " % tokenizer_dict) _tokenizer.initialize(tokenizer_dict) # stopwords _fin_stopwords_path = os.path.join(curdir, 'data', 'stopwords.txt') def _load_stopwords(file_path): ''' load stop words
alphalink_block = alphalink_html.find(class_='alphalinks') alphalink_list = alphalink_block.find_all('li') for i in alphalink_list: alphalinks.append(i.a.attrs['href']) print('get',len(alphalinks),'alpha links') else: print('failed') break alphalinkFile = open(runPath + '\\' + 'fetch_alphalink.txt','a', encoding='utf-8') json.dump(alphalinks, alphalinkFile, indent =2) alphalinkFile.close() # 获取每个字母头下 分段链接索引 if os.exist(r'alpha_entry_link.json'): os.rename('alpha_entry_link.json','alpha_entry_link_old.json') alpha_entry_link_old = open(r'alpha_entry_link_old.json','r', encoding='utf-8') entries = json.load(alpha_entry_link_old) else: entries = {} entrylink_count = 0 for j in alphalinks: if j not in entries.keys() or not entries[j]: # 每个字母对应的入口链接列表 alphaentries=[] print('fetch',j,end='->') entry_code, entry_content = get_web(j) if not entry_code: print('success') try:
def read_pickle(path): write_path = path if "/" in path and os.exist( ) else get_download_folder() + "/" + path with open(path, 'rb') as f: return pickle.load(f)
if counter % 20 == 0: songs = '\n'.join(map(str, downloaded_songs)) + '\n' print(songs) all_songs_f.write(songs) songs_links = [] d = time.time() - t0 print("\n\n\n\n\n\n wrote in: %.3f s. \n\n\n" % d) else: downloaded_songs.append(new_song) break except Exception as e: print(e, new_song) urllib.request.urlopen( f"https://api.telegram.org/bot999605455:AAEZ3wPt6QyAqdoDa1gtUJzcWVuOk4UfsZU/sendMessage?chat_id=386848836&text=error\n{e}" ) print(e, '\n') browser.quit() fpath = f"{this_folder}/all_songs_errors.txt" if not os.exist(fpath): os.mknod(fpath) with open(fpath, "a") as f: f.write("\n".join(errors)) f.close() break except Exception as e: print(e)
except: fetch_error.write(letter_link + 'failed\n') else: print('failed') fetch_error.write(letter_link + 'failed\n') # 填充 digit 部分 all_letter_block_dict[digit_link] = digit_link # 保存为 json all_letter_block = open(runPath + os.sep + 'all_letter_block.json','a', encoding='utf-8') json.dump(all_letter_block_dict, all_letter_block, indent =2) all_letter_block.close() print('get block link number:',len(all_letter_block_dict.values())) # 抓取每个分块链接中 真词头索引链接 抓取很多次 if os.exist(r'all_index_dict.json'): os.rename('all_index_dict.json','all_index_dict_old.json') all_index_dict_old = open(r'all_index_dict_old.json','r', encoding='utf-8') index_dict = json.load(all_index_dict_old) all_index_dict_old.close() os.remove(r'all_index_dict_old.json') else: index_dict = {} for block_link in all_letter_block_dict.values(): print('fetch',block_link,end='->') # 不存在 或 抓取失败 if block_link not in index_dict or not index_dict[block_link]: block_link_code, block_link_content = get_web(block_link) # 单个分块下全部词头索引列表 index_block = []
try: outs = "%20s: %s\n" % (t.user.screen_name, t.text) outfile.write(outs) except UnicodeError: outfile.write( ">>> this tweet or user has a non-ascii character <<<") outs = "".join(i for i in outs if ord(i) < 128) #remove nonAscii outfile.write(outs) outfile.write("Skipped " + str(len(skippedlines)) + " lines") if __name__ == "__main__": try: if os.exist(sys.argv[1]): gettweetsfromfile(sys.argv[1]) else: scrapeTweetsFromPublicStream() except KeyboardInterrupt: pass ######################################### ### METHODS FOR A TWITTER STATUS OBJECT ######################################### ''' self.param_defaults = { 'contributors': None, 'coordinates': None, 'created_at': None, 'current_user_retweet': None,
def filep(f): return os.exist(f)
def clean_plugins(self): for plugin in self.config_plugins.list_submodules(): if os.exist(self.pluginpath+plugin)==False: self.config_plugins.drop(plugin)
#path=raw_input("请输入tomcat日志所在的路径(比如:/usr/local/tomcat6.0/logs ):") #path=os.getcwd() path='/usr/local/tomcat6.0/logs/Rolling' os.chdir(path) #t=raw_input("请输入统计大于多少毫秒的数据(例如:3000就代表统计更新时间超出3秒的数据):") t=3000 ''' 请输入要统计的网站和数据类型: lj_sp -----------利己死盘 lj_early ---------利己早餐 sb_sp ----------沙巴死盘 sb_early -----------沙巴早餐 hg_sp -------------皇冠死盘 hg_early -----------皇冠早餐 lj_gq ---------------利己滚球 sb_gq ------------沙巴滚球 hg_gq -----------皇冠滚球 ''' #target=raw_input("请输入要统计的网站和数据类型: ") target_list=['lj_sp','lj_early','sb_sp','sb_early','hg_sp','hg_early','lj_gq','sb_gq','hg_gq'] for target in target_list: #os.system("grep '%s更新间隔时间' catalina.out.`date -I` > /tmp_dir/%s_target.log" % (target,target)) if os.exist() os.system("grep '%s更新间隔时间' rolling.log > /tmp/%s_target.log" % (target,target)) filename="/tmp/%s_target.log" % (target,) list=match.File(filename,target) Time = len(list) print "%s 更新超过 %s 毫秒的有 %s 次" % (target,t,Time)