def download_data(url, file_name=None, save_path=None, block_size=8192): import shutil import os from six.moves import urllib if save_path is None: save_path = get_data_home() if not os.path.isdir(save_path): os.makedirs(save_path) if file_name is None: file_name = urllib.parse.urlsplit(url)[2].split("/")[-1] data_file = os.path.join(save_path, file_name) if os.path.exists(data_file): return data_file data_file_tmp = data_file + '.downloading' with remove_file(data_file_tmp): with open(data_file_tmp, 'wb') as f, urlopen(url) as response: content_length = int(response.info().get("Content-Length")) prev_progress = 0 print("Start download: {}.".format(url)) for i in range(0, content_length, block_size): read_end = min(i + block_size, content_length) read_size = read_end - i buf = response.read(read_size) if not buf: raise ValueError() f.write(buf) progress = int(i / (content_length / 10)) if progress > prev_progress: prev_progress = progress print("{}% downloaded.".format(int(progress * 10))) print("[100%] downloaded.") shutil.move(data_file_tmp, data_file) return data_file
def __doCerts(self, sts, sks, token): home = expanduser("~") # if os.path.exists(home + '/magistral') == False: # os.makedirs(home + '/magistral') if os.path.exists(home + '/magistral/tmp') == False: os.makedirs(home + '/magistral/tmp') with open(home + '/magistral/tmp/ts', 'wb') as f: f.seek(0) f.write(bytearray(base64.standard_b64decode(sts))) f.close() with open(home + '/magistral/tmp/ks', 'wb') as f: f.seek(0) f.write(bytearray(base64.standard_b64decode(sks))) f.close() ks = jks.KeyStore.load(home + '/magistral/tmp/ks', 'magistral') self.uid = JksHandler.writePkAndCerts(ks, token) if os.path.exists(home + '/magistral/tmp'): shutil.rmtree(home + '/magistral/tmp')
def GeneratedFileHandler(self, headerlist): if not os.path.exists('generated_data'): os.makedirs('generated_data') self.filename = 'generated_data/' + self.filename count = (config.kfps*300*1000)/2500 starttime = self.getEpoch(self.startTime) print starttime stoptime = self.getEpoch(self.stopTime) while starttime<stoptime : fileName = self.getFileFormat(self.filename, starttime) if(self.gzipOn == 'yes'): fileHandle = gzip.open(fileName, 'wb', compresslevel = 1) elif(self.gzipOn == 'no'): fileHandle = open(fileName, 'w+') fileHandle.write(headerlist) for i in range(count): nextRecord = self.GenerateRecords(starttime) fileHandle.write(nextRecord) fileHandle.close() commands.getstatusoutput('scp %s %s@%s:%s'%(fileName, config.username, config.ip, config.directory)) starttime+= 300
def download_csv(self, indicator, profile, countries, regions, file_name): downloaded = False if os.path.exists(os.path.join(__data__.path(), os.path.basename(file_name))) : print "File already downloaded, please erase it" else : connection_attempts = 3 url = self._prepare_url(indicator, profile, countries, regions) #Check if folder exists if not os.path.exists(__data__.path()): os.makedirs(__data__.path()) outpath = os.path.join(__data__.path(), os.path.basename(file_name)) while not downloaded and connection_attempts > 0: try: downloaded_file = urllib2.urlopen(url) data = downloaded_file.read() with open(outpath, "wb") as code: code.write(data) downloaded = True except: downloaded = False connection_attempts = connection_attempts-1 return downloaded
def test_typing_not_shown(libspec_manager, workspace, data_regression, workspace_dir): from robocorp_ls_core import uris from os.path import os from robotframework_ls_tests.fixtures import LIBSPEC_3 from robotframework_ls.impl import keyword_completions from robotframework_ls.impl.completion_context import CompletionContext from robocorp_ls_core.lsp import TextDocumentItem workspace_dir_a = os.path.join(workspace_dir, "workspace_dir_a") os.makedirs(workspace_dir_a) with open(os.path.join(workspace_dir_a, "my.libspec"), "w") as stream: stream.write(LIBSPEC_3) libspec_manager.add_workspace_folder(uris.from_fs_path(workspace_dir_a)) assert libspec_manager.get_library_info("case3_library", create=False) is not None workspace.set_root(workspace_dir, libspec_manager=libspec_manager) doc = workspace.ws.put_document(TextDocumentItem("temp_doc.robot", text="")) doc.source = """*** Settings *** Library case3_library *** Test Cases *** Can use resource keywords Case Verify""" completions = keyword_completions.complete( CompletionContext(doc, workspace=workspace.ws)) data_regression.check(completions)
def write_file(self, path, data, filename): if not os.path.isdir(path): os.makedirs(path) save_dialog = open(path+filename, "a") save_dialog.write(data+"\n") save_dialog.close() self.log.debug('new knowledge saved')
def dowAndExtractZip(dowLink, albumName): if (not dowLink.endswith("zip")): print("Invalid download link '{}'".format(dowLink)); return; albumFolder = getAlbumFolder(albumName) if (os.path.exists(albumFolder)): if (not os.listdir(albumFolder)): os.rmdir(albumFolder); else: print("\nAlbum '{}' already exists at location '{}'".format(albumName, albumFolder)); return; zipFilePath = downloadZipFromLink(dowLink, albumName) os.makedirs(albumFolder) print("Extracting archive to folder".format(albumFolder)); archive = zipfile.ZipFile(zipFilePath) for zipInfo in archive.filelist: songName = os.path.basename(zipInfo.filename); zipInfo.filename = songName; print("Extracting file '{}'".format(songName)); archive.extract(zipInfo, albumFolder); archive.close(); print("Deleting archive file '{}'".format(zipFilePath)); os.remove(zipFilePath); print("\n***Album '{}' is created***".format(albumName));
def main(): parser = argparse.ArgumentParser(description='Generate playlists with the indicated length') parser.add_argument('-d','--directory', help='Directory with music files',type=str, required=True) parser.add_argument('-l', '--length', help='Length of the playlist, in minutes', type=int, required=True) args = parser.parse_args() directory = args.directory length = args.length * 60 path = r'./playlists/' if not os.path.exists(path): os.makedirs(path) playlist_basename = 'playlist_' #basename(argv[0][:-3]) + '_' curr_items = [] too_long_items = [] all_items = [] dir_queue = deque([directory]) while len(dir_queue) != 0: cur_directory = dir_queue.popleft() for node in os.listdir(cur_directory): node = os.path.join(cur_directory, node) if os.path.isdir(node): dir_queue.append(node) elif fnmatch.fnmatch(node, '*.mp[43]') or fnmatch.fnmatch(node, '*.flac'): all_items.append(node) shuffle(all_items) for item in all_items: global curr_length if curr_length >= length: create_playlist(path, playlist_basename, curr_items) else: encoding = item[-4:] encodings = {'.mp3': MP3, '.mp4': MP4, 'flac': FLAC} try: music_file = encodings[encoding](item) except Exception as e: handleException(e) else: file_length = music_file.info.length if file_length > length: too_long_items.append(item) print("File %s exceed the given length (%s min)" % (item, file_length/60)) else: curr_length += file_length curr_items.append(item+'\n') if curr_items: create_playlist(path, playlist_basename, curr_items) if too_long_items: print("\nThis files exceeded the given length and were not added to any playlist...\n") for i in too_long_items: print(basename(i))
def add_category(self, cat, path): path = path + "/" + self.lang category = self.get_response("add.category", data={"cat": cat}) if not os.path.isdir(path): os.makedirs(path) save_category = open(path + "/" + cat + '.voc', "w") save_category.write(cat) save_category.close() return True
def main(): parser = argparse.ArgumentParser(description='Generate playlists with the indicated length') parser.add_argument('-d','--directory', help='Directory with music files',type=str, required=True) parser.add_argument('-l', '--length', help='Length of the playlist, in minutes', type=int, required=True) args = parser.parse_args() directory = args.directory length = args.length * 60 path = r'./playlists/' if not os.path.exists(path): os.makedirs(path) playlist_basename = basename(argv[0][:-3]) + str(length/60) + '_' playlist_number = 1 curr_length = 0 curr_items = [] too_long_items = [] all_items = [] for music_file in os.listdir(directory): if fnmatch.fnmatch(music_file, '*.mp[43]'): all_items.append(directory + music_file) shuffle(all_items) for item in all_items: if curr_length >= length: name = path + playlist_basename + str(playlist_number) + '.m3u' playlist_file = open(name, 'w') playlist_file.writelines(curr_items) playlist_file.close() print 'Playlist generated, name: ', name , ' length ', curr_length/60 , 'min' playlist_number += 1 curr_length = 0 curr_items = [] else: encoding = item[-4:] encodings = {'.mp3': MP3, '.mp4': MP4} try: music_file = encodings[encoding](item) except Exception as e: handleException(e) else: file_length = music_file.info.length if file_length > length: too_long_items.append(item) print 'File %s exceed the given length (%s min)' % (item, file_length/60) else: curr_length += file_length curr_items.append(item+'\n') print '\nThis files exceeded the given length and were not added to any playlist...\n' for i in too_long_items: print basename(i)
def handle_interaction(self, message): private = message.data.get("Private", None) if private is None: privacy = self.public_path catego = self.get_response("begin.learning") else: privacy = self.local_path catego = self.get_response("begin.private") #privacy = self.public_path if catego in self._humor_words: #self.speak("humor") Category = "humor" elif catego in self._science_words: #self.speak("science") Category = "science" elif catego in self._love_words: #self.speak("love") Category = "love" elif catego in self._cancel_words: self.speak_dialog("Cancel") return else: self.speak_dialog("invalid.category") return catego question = self.get_response("question") if not question: return # user cancelled keywords = self.get_response("keywords") if not keywords: return # user cancelled answer = self.get_response("answer") if not answer: return # user cancelled answer_path = privacy + "/" + Category + "/" + "dialog" + "/" + self.lang question_path = privacy + "/" + Category + "/" + "vocab" + "/" + self.lang if not os.path.isdir(answer_path): os.makedirs(answer_path) if not os.path.isdir(question_path): os.makedirs(question_path) self.speak_dialog("save.learn", data={ "question": question, "answer": answer }, expect_response=True) save_dialog = open( answer_path + "/" + keywords.replace(" ", ".") + ".dialog", "a") save_dialog.write(answer + "\n") save_dialog.close() save_intent = open( question_path + "/" + keywords.replace(" ", ".") + ".intent", "a") save_intent.write(question + "\n") save_intent.close()
def save_intent(self, question_path, question, keywords, answer_path=None, answer=None): if not answer_path is None: if not os.path.isdir(answer_path): os.makedirs(answer_path) save_dialog = open(answer_path+"/"+keywords.replace(" ", ".")+".dialog", "a") save_dialog.write(answer+"\n") save_dialog.close() if not os.path.isdir(question_path): os.makedirs(question_path) save_intent = open(question_path+"/"+keywords.replace(" ", ".")+".intent", "a") save_intent.write(question+"\n") save_intent.close() self.log.debug('new knowledge saved')
def make_folders(alias=None): """ Creates the folders required for using the system. The path to the folders can be found in the "util" package. :param alias: The Alias of the texts. """ for path in paths.ALL: if not alias: if not os.path.exists(path): os.makedirs(path) else: folder = path + "/" + alias if not os.path.exists(folder) and path not in paths.EXCLUDE: os.makedirs(path + "/" + alias)
def handle_interaction(self, message): private = message.data.get("Private", None) if private is None: privacy = self.public_path catego = self.get_response("begin.learning") else: privacy = self.local_path catego = self.get_response("begin.private") privacy = self.public_path Category = "" for cat in self.allow_category.split(","): if catego in self.init_category(cat): Category = cat # set category if not Category: self.speak_dialog("invalid.category") return question = self.get_response("question") if not question: return # user cancelled keywords = self.get_response("keywords") if not keywords: return # user cancelled answer = self.get_response("answer") if not answer: return # user cancelled answer_path = privacy + "/" + Category + "/" + "dialog" + "/" + self.lang question_path = privacy + "/" + Category + "/" + "vocab" + "/" + self.lang if not os.path.isdir(answer_path): os.makedirs(answer_path) if not os.path.isdir(question_path): os.makedirs(question_path) confirm_save = self.ask_yesno("save.learn", data={ "question": question, "answer": answer }) if confirm_save != "yes": self.log.debug('new knowledge rejected') return # user cancelled save_dialog = open( answer_path + "/" + keywords.replace(" ", ".") + ".dialog", "a") save_dialog.write(answer + "\n") save_dialog.close() save_intent = open( question_path + "/" + keywords.replace(" ", ".") + ".intent", "a") save_intent.write(question + "\n") save_intent.close() self.log.debug('new knowledge saved')
def _extract(self): records_dir_path = os.path.join(self._data_dir_path, 'extracted', self.__institution_id, 'record') if not os.path.isdir(records_dir_path): os.makedirs(records_dir_path) base_url = self.__endpoint_url + '?verb=ListRecords' record_count = 0 resumption_token = None while True: if resumption_token is not None: url = base_url + '&resumptionToken=' + resumption_token else: url = base_url + '&metadataPrefix=' + self.__metadata_prefix if self.__set is not None: url = url + '&set=' + self.__set self._logger.debug("reading URL %s", url) url_f = urllib2.urlopen(url) try: xml_str = url_f.read() finally: url_f.close() self._logger.debug("read XML from URL %s: \n%s", url, xml_str) dom = parseString(xml_str) ListRecords_elements = dom.documentElement.getElementsByTagName('ListRecords') if len(ListRecords_elements) == 0: self._logger.error("no ListRecords element in XML: \n%s", xml_str) return ListRecords_element = ListRecords_elements[0] for record_element in ListRecords_element.getElementsByTagName('record'): record_identifier = record_element.getElementsByTagName('header')[0].getElementsByTagName('identifier')[0].childNodes[0].data safe_record_identifier = record_identifier.replace('/', '_').replace(':', '_') record_file_path = os.path.join(records_dir_path, safe_record_identifier + '.xml') if self.__clean or not os.path.isfile(record_file_path): with open(record_file_path, 'w+b') as f: f.write(record_element.toxml().encode('utf-8', 'ignore')) self._logger.debug("wrote record %s to %s", record_identifier, record_file_path) else: self._logger.debug("skipping record %s", record_identifier) record_count = record_count + 1 if record_count % 50 == 0: self._logger.info("read %d records", record_count) resumption_token = None for resumption_token_element in ListRecords_element.getElementsByTagName('resumptionToken'): resumption_token = resumption_token_element.childNodes[0].data break if resumption_token is None: break
def import_data(current_user): year = request.form['year'] # Validation if year == '' or not bool(request.files): return Response( response=json_util.dumps({'success': False, 'msg': 'Fajlli i të dhënave dhe viti duhet te zgjedhen!'}), mimetype='application/json') # Getting the file data_file = request.files['data-file'] answers_file = request.files['answers-file'] questions_file = request.files['questions-file'] # Get file mimetype data_mime = data_file.mimetype question_mime = questions_file.mimetype answer_mime = questions_file.mimetype if (data_mime not in allowedFiles()) or (question_mime not in allowedFiles()) or (answer_mime not in allowedFiles()): return Response( response=json_util.dumps({'success': False, 'msg': 'Fajlli i të dhënave duhet te jete i tipit csv!'}), mimetype='application/json') # Year data directory data_dir = UPLOAD_FOLDER + '/' + year # Create year directory if not exists if not os.path.exists(data_dir): os.makedirs(data_dir) if data_file.filename != '': # Building new filename with extension from current filename of uploaded file data_filename = 'cso-data' + '.' + data_file.filename.split('.')[len(data_file.filename.split('.')) - 1] # Saving uploaded file data_file.save(os.path.join(data_dir, data_filename)) getFileName(questions_file, "questions", data_dir) getFileName(answers_file, "answers", data_dir) DataImporter().run(year) # Returning success response return Response( response=json_util.dumps({'success': True, 'msg': 'Fajlli i të dhënave u ngarkua me sukses!'}), mimetype='application/json' )
def arquivar(f, n_evolucao, experimento, score_ini, melhores_evolucao, tempo): if not os.path.exists(join('dados', str(n_evolucao))): # cria a pasta, se necessario os.makedirs(join('dados', str(n_evolucao))) txt = open(join('dados', str(n_evolucao), str(f) + '.txt'), mode='w') txt.write(experimento+'\n\n') for score in score_ini: txt.write(str(score[2])+'\t'+str(score[1])+'\n') txt.write('\nTempo de execucao:\t' + str(tempo)+'\n') txt.write('Melhor Cromossomo:\t' + str(extremo(deepcopy(melhores_evolucao), 1)[0].score[2])+'\t'+str(extremo(deepcopy(melhores_evolucao), 1)[0].score[1])+'\n\n') """ for cromossomo in melhores_evolucao: txt.write(str(cromossomo.score[2])+'\t'+str(cromossomo.score[1])+'\n') log_resultado(f, str(melhores_evolucao.index(cromossomo)), n_evolucao, experimento, cromossomo) """ log_resultado(f, 'melhor_resultado', n_evolucao, experimento, extremo(deepcopy(melhores_evolucao), 1)[0]) txt.close
def precise_calc_check(self, message): self.log.info("precise: check for end calculation ") name = self.settings["Name"] if not os.path.isdir(self.file_system.path+"/"+name+".logs"): os.makedirs(self.file_system.path+"/"+name+".logs") if not self.precise_calc.poll() is None: self.cancel_scheduled_event('PreciseCalc') if os.path.isfile(self.file_system.path+"/"+self.settings["Name"]+".net"): self.log.info("start convert file: ") self.precise_con(name, message) #Write logfile calculation to disk file = open(self.file_system.path+"/"+name+".logs/output.txt", "a") if not self.precise_calc.stdout is None: for line in iter(self.precise_calc.stdout.readline, b''): data = str(line.rstrip()).replace("b'", "").replace("' ", "") file.write(data + "\n") self.log.info("schreibe log: "+data) file.close()
def ensuredir(dpath, mode=0o1777): """ Ensures that directory will exist. creates new dir with sticky bits by default Args: dpath (str): dir to ensure. Can also be a tuple to send to join mode (int): octal mode of directory (default 0o1777) Returns: str: path - the ensured directory """ if isinstance(dpath, (list, tuple)): # nocover dpath = join(*dpath) if not exists(dpath): try: os.makedirs(normpath(dpath), mode=mode) except OSError: # nocover raise return dpath
def start_recording(self, name, i, path, soundfile): self.settings["duration"] = 3 # default recording duration if self.has_free_disk_space(): # Initiate recording wait_while_speaking() self.start_time = now_local() # recalc after speaking completes if not os.path.isdir(path): os.makedirs(path) self.record_process = self.record(path + soundfile, int(self.settings["duration"]), self.settings["rate"], self.settings["channels"]) self.enclosure.eyes_color(255, 0, 0) # set color red self.last_index = 24 self.schedule_repeating_event(self.recording_feedback, None, 1, name='RecordingFeedback') else: self.speak_dialog("disk.full")
def improve_intent(self, message): name = self.config_core.get('listener', {}).get('wake_word').replace(' ', '-') i = 1 if os.path.isdir(self.settings["sell_path"]): onlyfiles = next(os.walk(self.settings["sell_path"]))[2] if len(onlyfiles) <= self.settings["improve"]: selling = len(onlyfiles) else: selling = self.settings["improve"] self.speak_dialog('improve', data={'name': name, "selling": selling}) self.log.info("search wake word in: "+self.settings["sell_path"]) wait_while_speaking() for root, dirs, files in os.walk(self.settings["sell_path"]): for f in files: filename = os.path.join(root, f) if filename.endswith('.wav'): if i <= selling: self.log.info("play file") play_wav(filename) wait_while_speaking() time.sleep(3) sell = self.ask_yesno("ask.sell", data={'i': i}) wait_while_speaking() i = i+1 path = None if sell == "yes": path = self.settings["file_path"]+name+"/wake-word/"+self.lang[:2]+"-short/" elif sell == "no": path = self.settings["file_path"]+name+"/not-wake-word/"+self.lang[:2]+"-short-not/" if not path is None: if not os.path.isdir(path): os.makedirs(path) file = path+name+"-"+self.lang[:2]+"-"+str(uuid.uuid1())+".wav" shutil.move(filename, file) self.log.info("move File: "+file) else: os.remove(filename) else: self.speak_dialog('improve.no.file', data={'name': name}) else: self.speak_dialog('improve.no.file', data={'name': name})
def start_agent(self, base_path, agent, agentmap): base_path = os.path.abspath(os.path.join(base_path, agent)) os.makedirs(base_path, exist_ok=True) cfg = """ [config] heartbeat-interval = 60 state-dir=%s agent-names = %s environment=%s agent-map=%s agent-splay = 10 agent-run-at-start=true """ % (base_path, ",".join(agentmap.keys()), self.envid, ",".join( ["%s=%s" % (k, v) for (k, v) in agentmap.items()])) config_path = os.path.join(base_path, "agent.cfg") with open(config_path, "w+") as fd: fd.write(cfg) app = os.path.abspath( os.path.join(__file__, "../../src/inmanta/app.py")) inmanta_path = [sys.executable, app] args = inmanta_path + [ "-vvvv", "--timed-logs", "--config", config_path, "agent" ] outfile = os.path.join(base_path, "out.log") err = os.path.join(base_path, "err.log") with open(outfile, "wb+") as outhandle: with open(err, "wb+") as errhandle: return subprocess.Popen(args, stdout=outhandle, stderr=errhandle, cwd=base_path, env=os.environ.copy())
def download_sounds(self): if self.settings["soundbackup"] is True: import py7zr name = self.settings["name"] if not os.path.isfile(self.file_system.path + "/nonesounds.7z"): self.log.info("downloading soundbackup") wget.download( 'http://downloads.tuxfamily.org/pdsounds/pdsounds_march2009.7z', self.file_system.path + "/nonesounds.7z") if not os.path.isdir(self.settings["file_path"] + "/" + name + "/not-wake-word/noises"): if not os.path.isdir(self.file_system.path + "/noises"): os.makedirs(self.file_system.path + "/noises") if not os.path.isdir(self.file_system.path + "/noises/mp3"): self.log.info("unzip soundbackup") py7zr.unpack_7zarchive( self.file_system.path + "/nonesounds.7z", self.file_system.path + "/noises") self.log.info("download sucess, start convert") if not os.path.isdir(self.file_system.path + "/noises/noises"): for root, dirs, files in os.walk(self.file_system.path + "/noises/mp3/"): for f in files: filename = os.path.join(root, f) if filename.endswith('.mp3'): self.log.info("Filename: " + filename) if not os.path.isdir(self.file_system.path + "/noises/noises"): os.makedirs(self.file_system.path + "/noises/noises") self.soundbackup_convert = subprocess.Popen( [ "ffmpeg -i " + filename + " -acodec pcm_s16le -ar 16000 -ac 1 -f wav " + self.file_system.path + "/noises/noises/noises-" + str(uuid.uuid1()) + ".wav" ], preexec_fn=os.setsid, shell=True) self.log.info("extratct: " + filename) self.log.info("Make Filelink") if not os.path.isdir(self.settings["file_path"] + "/" + name + "/not-wake-word"): os.makedirs(self.settings["file_path"] + "/" + name + "/not-wake-word") os.symlink( self.file_system.path + "/noises/noises/", self.settings["file_path"] + "/" + name + "/not-wake-word/noises") else: return True
def download_sounds(self): if self.settings["soundbackup"] is True: import py7zr name = self.settings["Name"] if not os.path.isfile(self.file_system.path+"/nonesounds.7z"): free_mb = psutil.disk_usage('/')[2] / 1024 / 1024 if free_mb <= 1500: self.settings["soundbackup"] = False self.log.info("no space: Sound Download not possible") return else: self.log.info("downloading soundbackup") wget.download('http://downloads.tuxfamily.org/pdsounds/pdsounds_march2009.7z', self.file_system.path+"/nonesounds.7z") #onlyfiles = next(os.walk(self.settings["file_path"]+name+"/not-wake-word/noises"))[2] #if len(onlyfiles) <= 30: if not os.path.isdir(self.file_system.path+"/noises"): os.makedirs(self.file_system.path+"/noises") if not os.path.isdir(self.file_system.path+"/noises/mp3"): self.log.info("unzip soundbackup") py7zr.unpack_7zarchive(self.file_system.path+"/nonesounds.7z", self.file_system.path+"/noises") self.log.info("download sucess, start convert") onlyfiles = next(os.walk(self.file_system.path+"/noises/noises"))[2] if len(onlyfiles) <= 30: folder = self.file_system.path+"/noises/mp3/" fileformat = '.mp3' i = 1 while i <= 2: for root, dirs, files in os.walk(folder): for f in files: filename = os.path.join(root, f) if filename.endswith(fileformat): self.log.info("Filename: "+filename) soundfile = filename.replace(fileformat, '').replace(folder, '') if not os.path.isdir(self.file_system.path+"/noises/noises"): os.makedirs(self.file_system.path+"/noises/noises") subprocess.call(["ffmpeg -i "+filename+" -acodec pcm_s16le -ar 16000 -ac 1 -f wav "+ self.file_system.path+"/noises/noises/"+soundfile+".wav"], preexec_fn=os.setsid, shell=True) self.log.info("extratct: "+filename) folder = self.file_system.path+"/noises/otherformats/" fileformat = '.flac' i = i + 1 self.speak_dialog("download.success") if not os.path.isdir(self.settings["file_path"]+name+"/not-wake-word"): os.makedirs(self.settings["file_path"]+"/"+name+"/not-wake-word") if not os.path.isdir(self.settings["file_path"]+name+"/not-wake-word/noises"): self.log.info("Make Filelink") os.symlink(self.file_system.path+"/noises/noises/", self.settings["file_path"]+name+"/not-wake-word/noises") else: return True
def train_wake_word_intent(self, message): if message.data.get("name"): name = message.data.get("name") name = name.replace(' ', '-') if os.path.isdir(self.settings["file_path"]+name): if self.ask_yesno("model.available", data={"name": name}) == "yes": if os.path.isdir(self.settings["file_path"]+name): rmtree(self.settings["file_path"]+name) if os.path.isdir("/tmp/mycroft_wakeword/"): rmtree("/tmp/mycroft_wakeword/") self.speak_dialog("word.wake", data={"name": name}) wait_while_speaking() # Throw away any previous recording time.sleep(4) i = 1 source = "/tmp/mycroft_wakeword/"+name nopath = "/not-wake-word/"+ self.lang[:2] + "-short/" yespath = "/wake-word/"+ self.lang[:2] + "-short/" ### Record test files to tmp while i <= 24: if i < 12: play_wav(self.piep) path = source+yespath soundfile = name+ "-" + self.lang[:2] +"-"+str(uuid.uuid1())+".wav" elif i == 12: stop = "" self.speak_dialog("none.wake.word", stop, expect_response=True) if stop == "stop": rmtree(source) return play_wav(self.piep) path = source+nopath soundfile = "not"+name+"-"+ self.lang[:2] +"-"+str(uuid.uuid1())+".wav" else: play_wav(self.piep) path = source+nopath soundfile = "not"+name+"-"+ self.lang[:2] +"-"+str(uuid.uuid1())+".wav" time.sleep(2) wait_while_speaking() i = i+1 self.start_recording(name,i,path,soundfile) else: #### Save wakewords in data folder if self.ask_yesno("is.all.ok") == "no": rmtree(source) return wait_while_speaking() #### wake words with 4 test files i = 1 if os.path.isdir(self.settings["file_path"]+name+"/test"+yespath): onlyfiles = next(os.walk(self.settings["file_path"]+name+"/test"+yespath)) i = 4 - len(onlyfiles) else: i = 1 os.makedirs(self.settings["file_path"]+name+"/test"+yespath) for root, dirs, files in os.walk(source+yespath): for f in files: filename = os.path.join(root, f) if filename.endswith('.wav'): if i <= 4: shutil.move(filename, self.settings["file_path"]+name+"/test"+yespath+ name+ "-" + self.lang[:2] +"-"+str(uuid.uuid1())+".wav") self.log.info("move file: "+filename) i = i + 1 else: if not os.path.isdir(self.settings["file_path"]+name+yespath): os.makedirs(self.settings["file_path"]+name+yespath) shutil.move(filename, self.settings["file_path"]+name+yespath+ name+ "-" + self.lang[:2] +"-"+str(uuid.uuid1())+".wav") self.log.info("move file: "+filename) i = i + 1 #### not wakeword with 4 test files i = 1 if os.path.isdir(self.settings["file_path"]+name+"/test"+nopath): onlyfiles = next(os.walk(self.settings["file_path"]+name+"/test"+nopath)) i = 4 - len(onlyfiles) else: i = 1 os.makedirs(self.settings["file_path"]+name+"/test"+nopath) for root, dirs, files in os.walk(source+nopath): for f in files: filename = os.path.join(root, f) if filename.endswith('.wav'): if i <= 4: if not os.path.isdir(self.settings["file_path"]+name+"/test"+nopath): os.makedirs(self.settings["file_path"]+name+"/test/"+nopath) shutil.move(filename, self.settings["file_path"]+name+"/test"+nopath+ "not"+name+"-"+ self.lang[:2] +"-"+str(uuid.uuid1())+".wav") self.log.info("move file: "+filename) i = i + 1 else: if not os.path.isdir(self.settings["file_path"]+name+nopath): os.makedirs(self.settings["file_path"]+name+nopath) shutil.move(filename, self.settings["file_path"]+name+nopath+ "not"+name+"-"+ self.lang[:2] +"-"+str(uuid.uuid1())+".wav") self.log.info("move file: "+filename) i = i + 1 self.speak_dialog("start.calculating") self.calculating_intent(name, message)
def main(): supportedLangs = [LANG_JAVA] logging.basicConfig(level=logging.DEBUG, format='%(levelname)s/%(name)s: %(message)s') logger = logging.getLogger(__name__.split('.')[-1]) parser = argparse.ArgumentParser() parser.add_argument('lang', help='generator language. available languages=%s' % ','.join(supportedLangs)) parser.add_argument('outputFile', help='output file path; generated source will be stored here. to use stdout set to -') parser.add_argument('-java_class', help='if language is set to %r, this will be the name of the java class generated' % LANG_JAVA) parser.add_argument('-java_package', help='if lang is set to %r, this will be the package of the java class generated' % LANG_JAVA) parser.add_argument('-args', nargs='+', help='a list of <name, value> pairs') args = parser.parse_args() if args.lang not in supportedLangs: logger.error('Invalid language: %r' % args.lang) return -1; params = [] if args.args: if len(args.args) % 2 != 0: logger.error('Invalid number of args: %d' % len(args.args)) return -1 # Parse arguments for i in range(int(len(args.args) / 2)): name = args.args[i * 2 + 0] arg = args.args[i * 2 + 1] value = None if value == None: # First try casting it to int try: value = int(arg) paramType = TYPE_INT32 except ValueError: pass # Try boolean if value == None: if arg.lower() in ['true', 'false']: value = True if arg.lower() == 'true' else False paramType = TYPE_BOOL # Default to string if value == None: value = arg paramType = TYPE_STRING param = Param(paramType, name, value) params.append(param) logger.debug('Adding param: %s' % str(param)) if args.lang not in supportedLangs: logger.error('Invalid language: %r' % args.lang) return -1; logger.debug('Generating %s ..' % args.lang) if args.lang == LANG_JAVA: if not args.java_class: logger.error('Java class not provided') return -1 if not args.java_package: logger.error('Java package not provided') return -1 generator = JavaGenerator(args.java_package, args.java_class, params) else: raise NotImplementedError("Unsupported lang: %r" % args.lang) newContent = generator.generate() oldContent = None if args.outputFile != '-' and os.path.exists(args.outputFile): with open(args.outputFile, 'r') as fileObj: oldContent = fileObj.read() if oldContent == newContent: logger.debug('File up-to-date') return 0 if args.outputFile != '-': outDir = os.path.dirname(args.outputFile) if not os.path.exists(outDir): os.makedirs(outDir) with open(args.outputFile, 'w') as fileObj: fileObj.write(newContent) else: sys.stdout.write(newContent) logger.debug('File updated: %r' % args.outputFile) return 0
def get_data_home(): import os d = os.path.expanduser("~/work/.nnabla_data") if not os.path.isdir(d): os.makedirs(d) return d
def __process_results__(self): lines = [] if len(self.experiments) == 0: lines.append("no experiments to process") return # burnin is the same for all chains burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin quantiles = zeros((len(self.experiments), len(self.ref_quantiles))) norm_of_means = zeros(len(self.experiments)) acceptance_rates = zeros(len(self.experiments)) # ess_0 = zeros(len(self.experiments)) # ess_1 = zeros(len(self.experiments)) # ess_minima = zeros(len(self.experiments)) # ess_medians = zeros(len(self.experiments)) # ess_maxima = zeros(len(self.experiments)) times = zeros(len(self.experiments)) for i in range(len(self.experiments)): burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :] # use precomputed quantiles if they match with the provided ones if hasattr(self.experiments[i], "ref_quantiles") and \ hasattr(self.experiments[i], "quantiles") and \ allclose(self.ref_quantiles, self.experiments[i].ref_quantiles): quantiles[i, :] = self.experiments[i].quantiles else: try: quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\ burned_in, self.ref_quantiles) except NotImplementedError: print "skipping quantile computations, distribution does", \ "not support it." # quantiles should be about average error rather than average quantile quantiles[i,:]=abs(quantiles[i,:]-self.ref_quantiles) dim = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.dimension norm_of_means[i] = norm(mean(burned_in, 0)) acceptance_rates[i] = mean(self.experiments[i].mcmc_chain.accepteds[burnin:]) # dump burned in samples to disc # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt" # savetxt(sample_filename, burned_in) # store minimum ess for every experiment #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)]) # ess_per_covariate = asarray([0 for _ in range(dim)]) # ess_0=ess_per_covariate[0] # ess_1=ess_per_covariate[1] # ess_minima[i] = min(ess_per_covariate) # ess_medians[i] = median(ess_per_covariate) # ess_maxima[i] = max(ess_per_covariate) # save chain time needed ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times times[i] = int(round(sum(ellapsed))) mean_quantiles = mean(quantiles, 0) std_quantiles = std(quantiles, 0) sqrt_num_trials=sqrt(len(self.experiments)) # print median kernel width sigma #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T) #lines.append("median kernel sigma: "+str(sigma)) lines.append("quantiles:") for i in range(len(self.ref_quantiles)): lines.append(str(mean_quantiles[i]) + " +- " + str(std_quantiles[i]/sqrt_num_trials)) lines.append("norm of means:") lines.append(str(mean(norm_of_means)) + " +- " + str(std(norm_of_means)/sqrt_num_trials)) lines.append("acceptance rate:") lines.append(str(mean(acceptance_rates)) + " +- " + str(std(acceptance_rates)/sqrt_num_trials)) # lines.append("ess dimension 0:") # lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials)) # # lines.append("ess dimension 1:") # lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials)) # # lines.append("minimum ess:") # lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials)) # # lines.append("median ess:") # lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials)) # # lines.append("maximum ess:") # lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials)) lines.append("times:") lines.append(str(mean(times)) + " +- " + str(std(times)/sqrt_num_trials)) # mean as a function of iterations, normalised by time step = round((self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin)/5) iterations = arange(self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin, step=step) running_means = zeros(len(iterations)) running_errors = zeros(len(iterations)) for i in arange(len(iterations)): # norm of mean of chain up norm_of_means_yet = zeros(len(self.experiments)) for j in range(len(self.experiments)): samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :] norm_of_means_yet[j] = norm(mean(samples_yet, 0)) running_means[i] = mean(norm_of_means_yet) error_level = 1.96 running_errors[i] = error_level * std(norm_of_means_yet) / sqrt(len(norm_of_means_yet)) ioff() figure() plot(iterations, running_means*mean(times)) fill_between(iterations, (running_means - running_errors)*mean(times), \ (running_means + running_errors)*mean(times), hold=True, color="gray") # make sure path to save exists try: os.makedirs(self.experiments[0].experiment_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean.png") close() # also store plot X and Y savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \ iterations) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \ running_means*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \ running_errors*mean(times)) # dont produce quantile convergence plots here for now """# quantile convergence of a single one desired_quantile=0.5 running_quantiles=zeros(len(iterations)) running_quantile_errors=zeros(len(iterations)) for i in arange(len(iterations)): quantiles_yet = zeros(len(self.experiments)) for j in range(len(self.experiments)): samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :] # just compute one quantile for now quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \ array([desired_quantile])) quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile) running_quantiles[i] = mean(quantiles_yet) error_level = 1.96 running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet)) ioff() figure() plot(iterations, running_quantiles*mean(times)) fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \ (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray") plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)]) title(str(desired_quantile)+"-quantile convergence") savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png") close() # also store plot X and Y savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \ iterations) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \ running_quantiles*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \ running_quantile_errors*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \ [desired_quantile*mean(times)]) """ # add latex table line # latex_lines = [] # latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ") # for i in range(len(self.ref_quantiles)): # latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile") # if i < len(self.ref_quantiles) - 1: # latex_lines.append(" & ") # latex_lines.append("\\\\") # lines.append("".join(latex_lines)) # # latex_lines = [] # latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__) # latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials)) # latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials)) # for i in range(len(self.ref_quantiles)): # latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials)) # # # lines.append(" & ".join(latex_lines) + "\\\\") return lines
def open_parallel_dir(base_dir, create=False): """ Open parallel directory of the current directory in Windows Explorer. E.g. jump from "D:\Study\Dev\Lang\Python" to "D:\Software\Dev\Lang\Python". :param base_dir: the base directory of the destination directory. :param create: create if the destination directory is not existing. :return: None. """ # Send "Ctrl+l" to move focus to location bar SendKeys(r'<^l')() # Sleep to keep from being too fast time.sleep(0.05) # Send "Ctrl+c" to copy the current directory path SendKeys(r'<^c')() # Sleep to keep from being too fast time.sleep(0.05) # Get the current directory path from clipboard dir_path_in_bar = clipboard_get_text() # If current directory is empty if not dir_path_in_bar: # Exit return # If is not Python 2 if not _IS_PY2: # Convert to Unicode dir_path_in_bar = dir_path_in_bar.decode('gbk') # Remove the "drive" part from the directory path. # E.g. "D:\Study" becomes "\Study" _, dir_path = splitdrive(dir_path_in_bar) # Print message print('Origin: {}'.format(dir_path)) # Strip slashes dir_path = dir_path.strip('\\/') # Whether the current directory path starts with any of the prefixes below start_with_prefix = False # Start to find a matching prefix for prefix in [ 'Study', 'Software', 'SoftwareData', 'All\\Software2\\SoftwareBig', 'All\\Software2\\SoftwareSmall', ]: # If the current directory path starts with the prefix if (dir_path + '\\').startswith(prefix + '\\'): # Set the boolean start_with_prefix = True # Remove the prefix rel_path = dir_path[len(prefix):] # Left-strip slashes rel_path = rel_path.lstrip('\\/') # Create a new directory path dir_path_new = os.path.join(base_dir, rel_path) # Replace backslash with forward slash dir_path_new = dir_path_new.replace('\\', '/') # If to create a destination path if it is not existing if create: # If the destination path is not existing if not os.path.isdir(dir_path_new): # Create destination path os.makedirs(dir_path_new) # Message print('Open: {}'.format(dir_path_new)) # Open the destination path webbrowser.open(dir_path_new) # Exit return # If not to create a destination path if it is not existing else: # Start to find the closest upper directory while True: # If the destination path is existing if os.path.isdir(dir_path_new): # Message print('Open: {}'.format(dir_path_new)) # Open the destination path webbrowser.open(dir_path_new) # Exit return # If the destination path is not existing else: # Get the parent directory dir_path_new = os.path.dirname(dir_path_new) # Remove the "drive" part path_part = os.path.splitdrive(dir_path_new)[1] # If the parent directory is partition root if path_part in ('', '/', '\\'): # Message print('Ignore: {}'.format(dir_path_in_bar)) # Exit return else: # Try the parent directory in the next loop continue # If the current directory path not starts with the any of the prefixes if not start_with_prefix: # Message print('Open: {}'.format(base_dir)) # Open the base destination directory webbrowser.open(base_dir) # Exit return
def main(): parser = argparse.ArgumentParser( description='Generate playlists with the indicated length') parser.add_argument('-d', '--directory', help='Directory with music files', type=str, required=True) parser.add_argument('-l', '--length', help='Length of the playlist, in minutes', type=int, required=True) args = parser.parse_args() directory = args.directory length = args.length * 60 path = r'./playlists/' if not os.path.exists(path): os.makedirs(path) playlist_basename = basename(argv[0][:-3]) + str(length / 60) + '_' playlist_number = 1 curr_length = 0 curr_items = [] too_long_items = [] all_items = [] for music_file in os.listdir(directory): if fnmatch.fnmatch(music_file, '*.mp[43]'): all_items.append(directory + music_file) shuffle(all_items) for item in all_items: if curr_length >= length: name = path + playlist_basename + str(playlist_number) + '.m3u' playlist_file = open(name, 'w') playlist_file.writelines(curr_items) playlist_file.close() print 'Playlist generated, name: ', name, ' length ', curr_length / 60, 'min' playlist_number += 1 curr_length = 0 curr_items = [] else: encoding = item[-4:] encodings = {'.mp3': MP3, '.mp4': MP4} try: music_file = encodings[encoding](item) except Exception as e: handleException(e) else: file_length = music_file.info.length if file_length > length: too_long_items.append(item) print 'File %s exceed the given length (%s min)' % ( item, file_length / 60) else: curr_length += file_length curr_items.append(item + '\n') print '\nThis files exceeded the given length and were not added to any playlist...\n' for i in too_long_items: print basename(i)
def __init__(self, data_dir): self._data_dir = data_dir try: os.makedirs(data_dir) except: pass
def __init__(self, folder_path): self.path = folder_path if not os.path.exists(folder_path): os.makedirs(folder_path)
writefagSet = set() processedPastes=[] wfSortList=[] errorID = 0; pastebinRequests=0 pasteCount=0 ''' Ok, next we need to retrieve the current lists, or else you'd have to run the whole script every time you want to make an update. ''' print('Retrieving already scanned pastebin links...') if not os.path.exists('resources'): os.makedirs('resources') try: pasteSetInput=open('resources/pasteSet.txt',mode='r') except Exception: open('resources/pasteSet.txt',mode='w') pasteSetInput=open('resources/pasteSet.txt',mode='r') inputLine = pasteSetInput.readline() while inputLine: inputLine=inputLine[:-1] pasteSet.add(inputLine) if inputLine.startswith('http://pastebin.com/u/'): writefagSet.add(inputLine) else:
def __process_results__(self): lines = [] if len(self.experiments) == 0: lines.append("no experiments to process") return # burnin is the same for all chains burnin = self.experiments[0].mcmc_chain.mcmc_params.burnin quantiles = zeros((len(self.experiments), len(self.ref_quantiles))) norm_of_means = zeros(len(self.experiments)) acceptance_rates = zeros(len(self.experiments)) # ess_0 = zeros(len(self.experiments)) # ess_1 = zeros(len(self.experiments)) # ess_minima = zeros(len(self.experiments)) # ess_medians = zeros(len(self.experiments)) # ess_maxima = zeros(len(self.experiments)) times = zeros(len(self.experiments)) for i in range(len(self.experiments)): burned_in = self.experiments[i].mcmc_chain.samples[burnin:, :] # use precomputed quantiles if they match with the provided ones if hasattr(self.experiments[i], "ref_quantiles") and \ hasattr(self.experiments[i], "quantiles") and \ allclose(self.ref_quantiles, self.experiments[i].ref_quantiles): quantiles[i, :] = self.experiments[i].quantiles else: try: quantiles[i, :] = self.experiments[i].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(\ burned_in, self.ref_quantiles) except NotImplementedError: print "skipping quantile computations, distribution does", \ "not support it." # quantiles should be about average error rather than average quantile quantiles[i, :] = abs(quantiles[i, :] - self.ref_quantiles) dim = self.experiments[ i].mcmc_chain.mcmc_sampler.distribution.dimension norm_of_means[i] = norm(mean(burned_in, 0)) acceptance_rates[i] = mean( self.experiments[i].mcmc_chain.accepteds[burnin:]) # dump burned in samples to disc # sample_filename=self.experiments[0].experiment_dir + self.experiments[0].name + "_burned_in.txt" # savetxt(sample_filename, burned_in) # store minimum ess for every experiment #ess_per_covariate = asarray([RCodaTools.ess_coda(burned_in[:, cov_idx]) for cov_idx in range(dim)]) # ess_per_covariate = asarray([0 for _ in range(dim)]) # ess_0=ess_per_covariate[0] # ess_1=ess_per_covariate[1] # ess_minima[i] = min(ess_per_covariate) # ess_medians[i] = median(ess_per_covariate) # ess_maxima[i] = max(ess_per_covariate) # save chain time needed ellapsed = self.experiments[i].mcmc_chain.mcmc_outputs[0].times times[i] = int(round(sum(ellapsed))) mean_quantiles = mean(quantiles, 0) std_quantiles = std(quantiles, 0) sqrt_num_trials = sqrt(len(self.experiments)) # print median kernel width sigma #sigma=GaussianKernel.get_sigma_median_heuristic(burned_in.T) #lines.append("median kernel sigma: "+str(sigma)) lines.append("quantiles:") for i in range(len(self.ref_quantiles)): lines.append( str(mean_quantiles[i]) + " +- " + str(std_quantiles[i] / sqrt_num_trials)) lines.append("norm of means:") lines.append( str(mean(norm_of_means)) + " +- " + str(std(norm_of_means) / sqrt_num_trials)) lines.append("acceptance rate:") lines.append( str(mean(acceptance_rates)) + " +- " + str(std(acceptance_rates) / sqrt_num_trials)) # lines.append("ess dimension 0:") # lines.append(str(mean(ess_0)) + " +- " + str(std(ess_0)/sqrt_num_trials)) # # lines.append("ess dimension 1:") # lines.append(str(mean(ess_1)) + " +- " + str(std(ess_1)/sqrt_num_trials)) # # lines.append("minimum ess:") # lines.append(str(mean(ess_minima)) + " +- " + str(std(ess_minima)/sqrt_num_trials)) # # lines.append("median ess:") # lines.append(str(mean(ess_medians)) + " +- " + str(std(ess_medians)/sqrt_num_trials)) # # lines.append("maximum ess:") # lines.append(str(mean(ess_maxima)) + " +- " + str(std(ess_maxima)/sqrt_num_trials)) lines.append("times:") lines.append( str(mean(times)) + " +- " + str(std(times) / sqrt_num_trials)) # mean as a function of iterations, normalised by time step = round( (self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin) / 5) iterations = arange( self.experiments[0].mcmc_chain.mcmc_params.num_iterations - burnin, step=step) running_means = zeros(len(iterations)) running_errors = zeros(len(iterations)) for i in arange(len(iterations)): # norm of mean of chain up norm_of_means_yet = zeros(len(self.experiments)) for j in range(len(self.experiments)): samples_yet = self.experiments[j].mcmc_chain.samples[burnin:( burnin + iterations[i] + 1 + step), :] norm_of_means_yet[j] = norm(mean(samples_yet, 0)) running_means[i] = mean(norm_of_means_yet) error_level = 1.96 running_errors[i] = error_level * std(norm_of_means_yet) / sqrt( len(norm_of_means_yet)) ioff() figure() plot(iterations, running_means * mean(times)) fill_between(iterations, (running_means - running_errors)*mean(times), \ (running_means + running_errors)*mean(times), hold=True, color="gray") # make sure path to save exists try: os.makedirs(self.experiments[0].experiment_dir) except OSError as exception: if exception.errno != errno.EEXIST: raise savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean.png") close() # also store plot X and Y savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_X.txt", \ iterations) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_Y.txt", \ running_means*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_mean_errors.txt", \ running_errors*mean(times)) # dont produce quantile convergence plots here for now """# quantile convergence of a single one desired_quantile=0.5 running_quantiles=zeros(len(iterations)) running_quantile_errors=zeros(len(iterations)) for i in arange(len(iterations)): quantiles_yet = zeros(len(self.experiments)) for j in range(len(self.experiments)): samples_yet = self.experiments[j].mcmc_chain.samples[burnin:(burnin + iterations[i] + 1 + step), :] # just compute one quantile for now quantiles_yet[j]=self.experiments[j].mcmc_chain.mcmc_sampler.distribution.emp_quantiles(samples_yet, \ array([desired_quantile])) quantiles_yet[j]=abs(quantiles_yet[j]-desired_quantile) running_quantiles[i] = mean(quantiles_yet) error_level = 1.96 running_quantile_errors[i] = error_level * std(quantiles_yet) / sqrt(len(quantiles_yet)) ioff() figure() plot(iterations, running_quantiles*mean(times)) fill_between(iterations, (running_quantiles - running_quantile_errors)*mean(times), \ (running_quantiles + running_quantile_errors)*mean(times), hold=True, color="gray") plot([iterations.min(),iterations.max()], [desired_quantile*mean(times) for _ in range(2)]) title(str(desired_quantile)+"-quantile convergence") savefig(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile.png") close() # also store plot X and Y savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_X.txt", \ iterations) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_Y.txt", \ running_quantiles*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_errors.txt", \ running_quantile_errors*mean(times)) savetxt(self.experiments[0].experiment_dir + self.experiments[0].name + "_running_quantile_reference.txt", \ [desired_quantile*mean(times)]) """ # add latex table line # latex_lines = [] # latex_lines.append("Sampler & Acceptance & ESS2 & Norm(mean) & ") # for i in range(len(self.ref_quantiles)): # latex_lines.append('%.1f' % self.ref_quantiles[i] + "-quantile") # if i < len(self.ref_quantiles) - 1: # latex_lines.append(" & ") # latex_lines.append("\\\\") # lines.append("".join(latex_lines)) # # latex_lines = [] # latex_lines.append(self.experiments[0].mcmc_chain.mcmc_sampler.__class__.__name__) # latex_lines.append('$%.3f' % mean(acceptance_rates) + " \pm " + '%.3f$' % (std(acceptance_rates)/sqrt_num_trials)) # latex_lines.append('$%.3f' % mean(norm_of_means) + " \pm " + '%.3f$' % (std(norm_of_means)/sqrt_num_trials)) # for i in range(len(self.ref_quantiles)): # latex_lines.append('$%.3f' % mean_quantiles[i] + " \pm " + '%.3f$' % (std_quantiles[i]/sqrt_num_trials)) # # # lines.append(" & ".join(latex_lines) + "\\\\") return lines
def test_libspec_manager_caches(libspec_manager, workspace_dir): from robocode_ls_core import uris from os.path import os from robotframework_ls_tests.fixtures import LIBSPEC_1 from robotframework_ls_tests.fixtures import LIBSPEC_2 from robotframework_ls_tests.fixtures import LIBSPEC_2_A import time from robocode_ls_core.unittest_tools.fixtures import wait_for_test_condition workspace_dir_a = os.path.join(workspace_dir, "workspace_dir_a") os.makedirs(workspace_dir_a) with open(os.path.join(workspace_dir_a, "my.libspec"), "w") as stream: stream.write(LIBSPEC_1) libspec_manager.add_workspace_folder(uris.from_fs_path(workspace_dir_a)) assert libspec_manager.get_library_info("case1_library", create=False) is not None libspec_manager.remove_workspace_folder(uris.from_fs_path(workspace_dir_a)) library_info = libspec_manager.get_library_info("case1_library", create=False) if library_info is not None: raise AssertionError( "Expected: %s to be None after removing %s" % (library_info, uris.from_fs_path(workspace_dir_a))) libspec_manager.add_workspace_folder(uris.from_fs_path(workspace_dir_a)) assert libspec_manager.get_library_info("case1_library", create=False) is not None # Give a timeout so that the next write will have at least 1 second # difference (1s is the minimum for poll to work). time.sleep(1.1) with open(os.path.join(workspace_dir_a, "my2.libspec"), "w") as stream: stream.write(LIBSPEC_2) def check_spec_found(): library_info = libspec_manager.get_library_info("case2_library", create=False) return library_info is not None # Updating is done in a thread. wait_for_test_condition(check_spec_found, sleep=1 / 5.0) library_info = libspec_manager.get_library_info("case2_library", create=False) assert set(x.name for x in library_info.keywords) == set( ["Case 2 Verify Another Model", "Case 2 Verify Model"]) # Give a timeout so that the next write will have at least 1 second # difference (1s is the minimum for poll to work). time.sleep(1) with open(os.path.join(workspace_dir_a, "my2.libspec"), "w") as stream: stream.write(LIBSPEC_2_A) def check_spec_2_a(): library_info = libspec_manager.get_library_info("case2_library", create=False) if library_info: return set(x.name for x in library_info.keywords) == set( ["Case 2 A Verify Another Model", "Case 2 A Verify Model"]) # Updating is done in a thread. wait_for_test_condition(check_spec_2_a, sleep=1 / 5.0)
def record_file_mover(self, yespath, nopath, source): #### wake words with 4 test files i = 1 if os.path.isdir(self.settings["file_path"] + self.new_name + "/test" + yespath): onlyfiles = next( os.walk(self.settings["file_path"] + self.new_name + "/test" + yespath)) i = 4 - len(onlyfiles) else: i = 1 os.makedirs(self.settings["file_path"] + self.new_name + "/test" + yespath) for root, dirs, files in os.walk(source + yespath): for f in files: filename = os.path.join(root, f) if filename.endswith('.wav'): if i <= 4: shutil.move( filename, self.settings["file_path"] + self.new_name + "/test" + yespath + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") self.log.info("move file: " + filename) i = i + 1 else: if not os.path.isdir(self.settings["file_path"] + self.new_name + yespath): os.makedirs(self.settings["file_path"] + self.new_name + yespath) shutil.move( filename, self.settings["file_path"] + self.new_name + yespath + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") self.log.info("move file: " + filename) i = i + 1 #### not wakeword with 4 test files i = 1 if os.path.isdir(self.settings["file_path"] + self.new_name + "/test" + nopath): onlyfiles = next( os.walk(self.settings["file_path"] + self.new_name + "/test" + nopath)) i = 4 - len(onlyfiles) else: i = 1 os.makedirs(self.settings["file_path"] + self.new_name + "/test" + nopath) for root, dirs, files in os.walk(source + nopath): for f in files: filename = os.path.join(root, f) if filename.endswith('.wav'): if i <= 4: if not os.path.isdir(self.settings["file_path"] + self.new_name + "/test" + nopath): os.makedirs(self.settings["file_path"] + self.new_name + "/test/" + nopath) shutil.move( filename, self.settings["file_path"] + self.new_name + "/test" + nopath + "not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") self.log.info("move file: " + filename) i = i + 1 else: if not os.path.isdir(self.settings["file_path"] + self.new_name + nopath): os.makedirs(self.settings["file_path"] + self.new_name + nopath) shutil.move( filename, self.settings["file_path"] + self.new_name + nopath + "not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") self.log.info("move file: " + filename) i = i + 1
def prepaire_repo(self, name): name = name.replace('-', '').replace(' ', '') ##### Model Files self.log.info("make repo ready vor upload") presiceversion = linecache.getline( self.file_system.path + "/precise/mycroft_precise.egg-info/PKG-INFO", 3).replace('Version: ', '')[:5] modelzip = self.precisefolder + "/" + name + "/models/" + name + "-" + self.lang[:3] + presiceversion + "-" + time.strftime( "%Y%m%d") + "-" + self.settings.get('localgit') + ".tar.gz" if not os.path.isdir(self.precisefolder + "/" + name + "/models/"): os.makedirs(self.precisefolder + "/" + name + "/models/") tar = tarfile.open(modelzip, "w:gz") for nams in [ self.file_system.path + "/" + name + ".pb", self.file_system.path + "/" + name + ".pbtxt", self.file_system.path + "/" + name + ".pb.params" ]: tar.add(nams) #### calculating info traininfo = linecache.getline( self.file_system.path + "/" + name + ".logs/output.txt", 2) #### generate Readme.md readmefile = self.precisefolder + "/" + name + "/models/README.md" file = open(readmefile, "a") if not os.path.isfile(readmefile): file.write("# " + name + "\n") file.write("\n### " + name + "-" + self.lang[:3] + time.strftime("%Y%m%d") + "\n") file.write(presiceversion + " " + traininfo[:1] + ". Use Public Domain Sounds Backup:" + str(self.settings["soundbackup"]) + ", automatically generated by wakeword trainer skill \n") file.close() ###### licenses licensefile = self.precisefolder + "/licenses/license-" + time.strftime( "%Y%m%d") + "-" + self.settings.get('localgit') + ".txt" fobj_in = open(self.precisefolder + "/licenses/license-template.txt", "r") fobj_out = open(licensefile, "w") for line in fobj_in: line = line.replace( "I, [author name]", "I, " + self.settings.get('localgit') + ' (https://github.com/' + self.settings.get('localgit') + ')') line = line.replace( "/file/name/1", "automatically generated by gras64 wakeword trainer skill" ).replace("/file/name/2", "") fobj_out.write(str(line)) modelzipfile = modelzip.replace(self.precisefolder + "/", "") fobj_out.write(modelzipfile + "\n") for root, dirs, files in os.walk(self.precisefolder + "/" + name + "/" + self.lang[:2]): for f in files: filename = os.path.join(root, f) self.log.info("filename: " + filename) if filename.endswith('.wav'): filename = filename.replace(self.precisefolder, "") fobj_out.write(filename + "\n") fobj_in.close() fobj_out.close() ##### Copy all wav file if not self.settings["onlyPrecise"]: source = self.settings[ "file_path"] + name + "/wake-word/" + self.lang[:2] + "-short/" destination = self.precisefolder + "/" + name + "/" + self.lang + "/" if not os.path.isdir(destination): os.makedirs(destination) fobj_out = open(licensefile, "a") for filename in os.listdir(source): if filename.endswith('.wav'): shutil.copy(source + filename, destination) fobj_out.write("/" + name + "/" + self.lang[:2] + "/" + filename + "\n") source = self.settings[ "file_path"] + name + "/test/wake-word/" + self.lang[:2] + "-short/" for filename in os.listdir(source): if filename.endswith('.wav'): shutil.copy(source + filename, destination) fobj_out.write("/" + name + "/" + self.lang[:2] + "/" + filename + "\n") fobj_out.close()
def main(): supportedLangs = [LANG_JAVA] logging.basicConfig(level=logging.DEBUG, format="%(levelname)s/%(name)s: %(message)s") logger = logging.getLogger(__name__.split(".")[-1]) parser = argparse.ArgumentParser() parser.add_argument( "lang", help="generator language. available languages=%s" % ",".join(supportedLangs), ) parser.add_argument( "outputFile", help= "output file path; generated source will be stored here. to use stdout set to -", ) parser.add_argument( "-java_class", help= "if language is set to %r, this will be the name of the java class generated" % LANG_JAVA, ) parser.add_argument( "-java_package", help= "if lang is set to %r, this will be the package of the java class generated" % LANG_JAVA, ) parser.add_argument("-args", nargs="+", help="a list of <name, value> pairs") args = parser.parse_args() if args.lang not in supportedLangs: logger.error("Invalid language: %r" % args.lang) return -1 params = [] if args.args: if len(args.args) % 2 != 0: logger.error("Invalid number of args: %d" % len(args.args)) return -1 # Parse arguments for i in range(int(len(args.args) / 2)): name = args.args[i * 2 + 0] arg = args.args[i * 2 + 1] value = None if value == None: # First try casting it to int try: value = int(arg) paramType = TYPE_INT32 except ValueError: pass # Try boolean if value == None: if arg.lower() in ["true", "false"]: value = True if arg.lower() == "true" else False paramType = TYPE_BOOL # Default to string if value == None: value = arg paramType = TYPE_STRING param = Param(paramType, name, value) params.append(param) logger.debug("Adding param: %s" % str(param)) if args.lang not in supportedLangs: logger.error("Invalid language: %r" % args.lang) return -1 logger.debug("Generating %s .." % args.lang) if args.lang == LANG_JAVA: if not args.java_class: logger.error("Java class not provided") return -1 if not args.java_package: logger.error("Java package not provided") return -1 generator = JavaGenerator(args.java_package, args.java_class, params) else: raise NotImplementedError("Unsupported lang: %r" % args.lang) newContent = generator.generate() oldContent = None if args.outputFile != "-" and os.path.exists(args.outputFile): with open(args.outputFile, "r") as fileObj: oldContent = fileObj.read() if oldContent == newContent: logger.debug("File up-to-date") return 0 if args.outputFile != "-": outDir = os.path.dirname(args.outputFile) if not os.path.exists(outDir): os.makedirs(outDir) with open(args.outputFile, "w") as fileObj: fileObj.write(newContent) else: sys.stdout.write(newContent) logger.debug("File updated: %r" % args.outputFile) return 0
spamreader = csv.reader(csvfile, delimiter=',') for row in spamreader: if (first_line): features_header = np.asarray(row[0:-1]) first_line = False continue all_features.append(np.asarray(row)) all_features = np.asanyarray(all_features) seeds = np.random.randint(120000, size=3) #seeds = [10657, 12421, 541] #seed = 3467 for seed in iter(seeds): outdir = argsDict["outdir"] + "/" + str(seed) if not os.path.exists(outdir): os.makedirs(outdir) # split data into 80% train 20% heldout test split_data_train_test(seed) # cross validation and getting mean of best parameters mean_param = split_train_cross_val(5) #mean_param = 0.000266 # normalize whole train set scaler = preprocessing.StandardScaler( with_mean=True, with_std=True).fit(train_all_features) #scaler = preprocessing.MinMaxScaler().fit(train_all_features) scaled_train_all = scaler.transform(train_all_features) scaled_test_all = scaler.transform(test_all_features) auroc, auprc = linear_model_simple(scaled_train_all,
def mkdirs(self,name): #获取目录是否存在,然后进行创建 nn = os.path.split(name) if not os.path.exists(nn[0]): os.makedirs(nn[0])
def export(self, to_file): directory = os.path.dirname(to_file) if directory != '' and not os.path.exists(directory): os.makedirs(directory) self.img.saveas(to_file)
def train_wake_word_intent(self, message): if message.data.get("number"): self.settings["wwnr"] = int(message.data.get("number")) if message.data.get("name"): name = message.data.get("name") else: name = self.get_response('witch.wakeword') if name is None: self.speak_dialog('no') return name = name.replace(' ', '-') if os.path.isdir(self.settings["file_path"] + name): if self.ask_yesno("model.available", data={"name": name}) == "yes": if os.path.isdir(self.settings["file_path"] + name): rmtree(self.settings["file_path"] + name) if os.path.isdir("/tmp/mycroft_wakeword/"): rmtree("/tmp/mycroft_wakeword/") if self.settings["wwnr"] >= 1: self.speak_dialog("word.wake", data={ "name": name, "number": self.settings["wwnr"] }) else: self.speak_dialog("none.wake.word") # Throw away any previous recording i = 1 self.halt = False source = "/tmp/mycroft_wakeword/" + name nopath = "/not-wake-word/" + self.lang[:2] + "-short/" if not os.path.isdir(source + nopath): os.makedirs(source + nopath) yespath = "/wake-word/" + self.lang[:2] + "-short/" if not os.path.isdir(source + yespath): os.makedirs(source + yespath) self.new_name = name wait_while_speaking() ### Record test files to tmp while i <= self.settings["wwnr"] + self.settings["nowwnr"]: while self.record_process: time.sleep(1) time.sleep(2) if self.halt is True: self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() if self.ask_yesno("calculate.anyway") == "yes": self.speak_dialog("start.calculating") self.calculating_intent(self.new_name) return else: rmtree(source) self.speak_dialog("no") wait_while_speaking() return elif self.halt is "break": self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() self.record_file_mover(yespath, nopath, source) if self.ask_yesno("calculate.anyway") == "yes": self.speak_dialog("start.calculating") self.calculating_intent(self.new_name) else: self.speak_dialog("break") wait_while_speaking() return elif self.halt is None: shutil.move( self.recordpath + self.recordfile, source + nopath + "not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") if i <= self.settings["wwnr"] - 1: i = i - 1 self.log.info("step number " + str(i)) if i < self.settings["wwnr"]: #play_wav(self.piep) self.recordpath = source + yespath self.recordfile = str(self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") elif i == self.settings["wwnr"]: time.sleep(2) self.speak_dialog("none.wake.word") wait_while_speaking() #play_wav(self.piep) self.recordpath = source + nopath self.recordfile = str("not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") else: #play_wav(self.piep) self.recordpath = source + nopath self.recordfile = str("not" + self.new_name + "-" + self.lang[:2] + "-" + str(uuid.uuid1()) + ".wav") #time.sleep(2) self.log.info(self.recordfile) wait_while_speaking() i = i + 1 #play_wav(self.piep).wait() if i <= 2: self.add_event('recognizer_loop:record_end', self.rec_stop) self.add_event('recognizer_loop:record_begin', self.loop) self.register_fallback(self.handle_validator, 1) self.bus.emit(Message('mycroft.mic.listen')) self.start_recording() #self.bus.emit(Message('mycroft.volume.unmute', # {"speak_message": False})) else: self.log.info("end records") self.remove_event('recognizer_loop:record_end') self.remove_event('recognizer_loop:record_begin') self.remove_instance_handlers() #### Save wakewords in data folder if self.ask_yesno("is.all.ok") == "no": rmtree(source) return wait_while_speaking() self.record_file_mover(yespath, nopath, source) self.calculating_intent(self.new_name) self.speak_dialog("start.calculating")