def act(): user, user_id, screen = get_user_info(request) users.update({"_id": user_id}, {"$set": {"shield" : False}}) dir = bottle.request.json["action"] #tool = TOOLS[bottle.request.json["current_tool"]]; tool = TOOLS[user["current_tool"]] print tool if tool== "pickup": if can_pickup(user, dir): pickup(user,user_id, dir) else: print "invalid_pickup" elif tool == "bow": if can_shoot(user, dir): shoot(user, user_id,screen, dir) else: print "invalid_shoot" elif tool == "mine": if can_lay_mine(user, dir): if not user['primed']: lay_mine(user) else: det_mine(user) else: print "invalid mine" elif tool == "build": if can_build(user,dir): build(user, dir) else: print "invalid build" else: return
def run(index=-1): pattern = '../testcases/%s' for _, _, filenames in os.walk('../testcases', False): sources = [pattern % filename for filename in filenames] sources.sort(key=len) build() cwd = os.getcwd() os.chdir('../bin') if os.path.exists('../result'): shutil.rmtree('../result') os.mkdir('../result') if index == -1: for source in sources: res = os.system( 'java -cp ../lib/java-cup-11b.jar:../lib/callgraph.jar:../lib/jgraph.jar:. Main ' + source + " 1> /dev/null 2> ../result/" + os.path.basename(source)) print('{:<10s}: {}'.format(os.path.basename(source), res >> 8)) else: res = os.system( 'java -cp ../lib/java-cup-11b.jar:../lib/callgraph.jar:../lib/jgraph.jar:. Main ' + sources[index] + " 1> /dev/null 2> ../result/" + os.path.basename(sources[index])) print('{:<10s}: {}'.format(os.path.basename(sources[index]), res >> 8)) os.chdir(cwd)
def on_any_event(self, event): if not isinstance(event, events.DirModifiedEvent): time.sleep(0.05) print 'Rebuilding documentation...', config = load_config(options=self.options) build(config, live_server=True) print ' done'
def build_and_test(project_root): try: build(project_root) test(project_root) except BuildException as e: fabric.utils.abort("{}".format(e.value)) except TestsFailedException as e: fabric.utils.abort("{}".format(e.value))
def test(name, snapshot): config = Config(name) if snapshot is None: snapshots = sorted(glob(f"snapshots/{name}.*.h5"), reverse=True) if len(snapshots) == 0: echo(f"[Error] No snapshots found for name={name}", fg='red') return snapshot = snapshots[0] del snapshots # running parameters run_params = locals() del run_params['config'] run_params.update(dict(config)) # init echo('test', run_params) session = tf.Session('') K.set_session(session) K.set_learning_phase(0) # model loading echo(f'model loading from {snapshot}...') model = build(config) model.load_weights(snapshot) # testing data echo('test dataset loading...')
def run(args): #first build a project then flash it build.run(args) # time to flash if args.file: # known project from records workspace = Workspace(args.file, os.getcwd()) if args.project: workspace.flash_project(args.project, args.tool) else: workspace.flash_projects(args.tool) else: # not project known by pgen project_settings = ProjectSettings() project_files = [os.path.join(args.directory, args.project)] flasher = ToolsSupported().get_value(args.tool, 'flasher') build(flasher, args.project, project_files, args.tool, project_settings)
def buildShip(tkRoot, baseName): print("buildMenu", baseName) base = findBase(tkRoot.game, baseName) if (base and tkRoot.hCon is not None): buildResult = build(tkRoot, base) if (buildResult is not None and buildResult.ship): sendJson = warpWarCmds().buildShip(tkRoot.plid, buildResult.ship, baseName) print(" main sending: ", sendJson) tkRoot.hCon.sendCmd(sendJson)
def serve(config, options=None): """ Start the devserver, and rebuild the docs whenever any changes take effect. """ # Create a temporary build directory, and set some options to serve it tempdir = tempfile.mkdtemp() options['site_dir'] = tempdir # Perform the initial build config = load_config(options=options) build(config, live_server=True) # Note: We pass any command-line options through so that we # can re-apply them if the config file is reloaded. event_handler = BuildEventHandler(options) config_event_handler = ConfigEventHandler(options) observer = observers.Observer() observer.schedule(event_handler, config['docs_dir'], recursive=True) observer.schedule(event_handler, config['theme_dir'], recursive=True) observer.schedule(config_event_handler, '.') observer.start() class TCPServer(SocketServer.TCPServer): allow_reuse_address = True class DocsDirectoryHandler(FixedDirectoryHandler): base_dir = config['site_dir'] host, port = config['dev_addr'].split(':', 1) server = TCPServer((host, int(port)), DocsDirectoryHandler) print 'Running at: http://%s:%s/' % (host, port) print 'Live reload enabled.' print 'Hold ctrl+c to quit.' server.serve_forever() # Clean up observer.stop() observer.join() shutil.rmtree(tempdir)
def do(config): # 데이터 읽기 & 전처리 print("Read data") ds = Datasets(config.data_path) data = ds.read_data() print("Data preprocessing..") preprocessing = Preprocessing(config) x_train, y_train = preprocessing.do(data) print("Model build..") model, callback = build(config, preprocessing.vocab_size) history = model.fit(x_train, y_train, epochs=config.epoch, callbacks=callback, batch_size=config.batch_size, validation_split=0.2) model.save(os.path.join(config.save_directory, config.ckpt_name))
"pendulum.parsing.exceptions", "pendulum.tz", "pendulum.tz.data", "pendulum.tz.zoneinfo", "pendulum.utils", ] package_data = {"": ["*"]} install_requires = ["python-dateutil>=2.6,<3.0", "pytzdata>=2018.3"] extras_require = {':python_version < "3.5"': ["typing>=3.6,<4.0"]} setup_kwargs = { "name": "pendulum", "version": "2.0.4", "description": "Python datetimes made easy", "author": "Sébastien Eustace", "author_email": "*****@*****.**", "url": "https://pendulum.eustace.io", "packages": packages, "package_data": package_data, "install_requires": install_requires, "extras_require": extras_require, "python_requires": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*", } build(setup_kwargs) setup(**setup_kwargs)
def __init__(self, language, api_key, src, dst): self.language = language self.api_key = api_key self.service = build('translate', 'v2', developerKey=self.api_key) self.src = src self.dst = dst def __call__(self, sentence): try: if not sentence: return result = self.service.translations().list( source=self.src, target=self.dst, q=[sentence] ).execute() if 'translations' in result and len(result['translations']) and \ 'translatedText' in result['translations'][0]: return result['translations'][0]['translatedText'] return "" exceptKeyboardInterrupt: return defextract_audio(filename, channels=1, rate=16000): temp = tempfile.NamedTemporaryFile(suffix='.wav', delete=False) command = ["ffmpeg", "-y", "-i", filename, "-ac", str(channels), "-ar", str(rate), "-loglevel", "error", temp.name] subprocess.check_output(command) return temp.name, rate deffind_speech_regions(filename, frame_width=4096, min_region_size=0.5, max_region_size=6): reader = wave.open(filename) sample_width = reader.getsampwidth() rate = reader.getframerate() n_channels = reader.getnchannels() total_duration = reader.getnframes() / rate chunk_duration = float(frame_width) / rate n_chunks = int(total_duration / chunk_duration) energies = [] fori in range(n_chunks): chunk = reader.readframes(frame_width) energies.append(audioop.rms(chunk, sample_width * n_channels)) threshold = percentile(energies, 0.2) elapsed_time = 0 regions = [] region_start = None for energy in energies: elapsed_time += chunk_duration is_silence = energy <= threshold max_exceeded = region_start and elapsed_time - region_start>= max_region_size if (max_exceeded or is_silence) and region_start: ifelapsed_time - region_start>= min_region_size: regions.append((region_start, elapsed_time)) region_start = None elif (not region_start) and (not is_silence): region_start = elapsed_time return regions def main(): parser = argparse.ArgumentParser() parser.add_argument('source_path', help="Path to the video or audio file to subtitle", nargs='?') parser.add_argument('-C', '--concurrency', help="Number of concurrent API requests to make", type=int, default=10) parser.add_argument('-o', '--output', help="Output path for subtitles (by default, subtitles are saved in \ the same directory and name as the source path)") parser.add_argument('-F', '--format', help="Destination subtitle format", default="srt") parser.add_argument('-S', '--src-language', help="Language spoken in source file", default="en") parser.add_argument('-D', '--dst-language', help="Desired language for the subtitles", default="en") parser.add_argument('-K', '--api-key', help="The Google Translate API key to be used. (Required for subtitle translation)") parser.add_argument('--list-formats', help="List all available subtitle formats", action='store_true') parser.add_argument('--list-languages', help="List all available source/destination languages", action='store_true') args = parser.parse_args() ifargs.list_formats: print("List of formats:") forsubtitle_format in FORMATTERS.keys(): print("{format}".format(format=subtitle_format)) return 0 ifargs.list_languages: print("List of all languages:") for code, language in sorted(LANGUAGE_CODES.items()): print("{code}\t{language}".format(code=code, language=language)) return 0 ifargs.format not in FORMATTERS.keys(): print("Subtitle format not supported. Run with --list-formats to see all supported formats.") return 1 ifargs.src_language not in LANGUAGE_CODES.keys(): print("Source language not supported. Run with --list-languages to see all supported languages.") return 1 ifargs.dst_language not in LANGUAGE_CODES.keys(): print( "Destination language not supported. Run with --list-languages to see all supported languages.") return 1 if not args.source_path: print("Error: You need to specify a source path.") return 1 audio_filename, audio_rate = extract_audio(args.source_path) regions = find_speech_regions(audio_filename) pool = multiprocessing.Pool(args.concurrency) converter = FLACConverter(source_path=audio_filename) recognizer = SpeechRecognizer(language=args.src_language, rate=audio_rate, api_key=GOOGLE_SPEECH_API_KEY) transcripts = [] if regions: try: widgets = ["Converting speech regions to FLAC files: ", Percentage(), ' ', Bar(), ' ', ETA()] pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start() extracted_regions = [] fori, extracted_region in enumerate(pool.imap(converter, regions)): extracted_regions.append(extracted_region) pbar.update(i) pbar.finish() widgets = ["Performing speech recognition: ", Percentage(), ' ', Bar(), ' ', ETA()] pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start() fori, transcript in enumerate(pool.imap(recognizer, extracted_regions)): transcripts.append(transcript) pbar.update(i) pbar.finish() if not is_same_language(args.src_language, args.dst_language): ifargs.api_key: google_translate_api_key = args.api_key translator = Translator(args.dst_language, google_translate_api_key, dst=args.dst_language, src=args.src_language) prompt = "Translating from {0} to {1}: ".format(args.src_language, args.dst_language) widgets = [prompt, Percentage(), ' ', Bar(), ' ', ETA()] pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start() translated_transcripts = [] fori, transcript in enumerate(pool.imap(translator, transcripts)): translated_transcripts.append(transcript) pbar.update(i) pbar.finish() transcripts = translated_transcripts else: print ("Error: Subtitle translation requires specified Google Translate API key. \ See --help for further information.") return 1 exceptKeyboardInterrupt: pbar.finish() pool.terminate() pool.join() print ("Cancelling transcription") return 1 timed_subtitles = [(r, t) for r, t in zip(regions, transcripts) if t] formatter = FORMATTERS.get(args.format) formatted_subtitles = formatter(timed_subtitles) dest = args.output if not dest: base, ext = os.path.splitext(args.source_path) dest = "{base}.{format}".format(base=base, format=args.format) with open(dest, 'wb') as f: f.write(formatted_subtitles.encode("utf-8")) print ("Subtitles file created at {}".format(dest)) os.remove(audio_filename) return 0 if __name__ == '__main sys.exit(main())
if len(list(glob(f"snapshots/{name}.*.h5"))) > 0: echo(f"Error: Some snapshots for name ({name}) already exists. Use another name", fg='red') return # dataset echo('dataset loading...') {%- if cookiecutter.fit_generator == "no" %} X, y = dataset.load() {%- else %} seq_train, seq_valid = dataset.batch_generator(config('batch_size')) {%- endif %} # model building echo('model building...') model = build(config) model.summary() if resume: echo('Resume Learning from {}'.format(resume)) model.load_weights(resume, by_name=True) # training echo('start learning...') callbacks = [ logging.JsonLog(log_path), keras.callbacks.ModelCheckpoint(out_path, monitor='val_loss', save_weights_only=True, save_best_only=True,), ] {%- if cookiecutter.fit_generator == "no" %}
def main(): build() tot_native_data = [] for i in range(0, N_TRIALS): print("NATIVE BENCHMARK #{}:".format(i + 1)) tot_native_data.append(native_bench()) print() print("------------------") print() tot_hcomp_data = [] for i in range(0, N_TRIALS): print("HCOMP BENCHMARK #{}:".format(i + 1)) tot_hcomp_data.append(hcomp_bench()) print() print("------------------") print() tot_wasm_data = [] for i in range(0, N_TRIALS): print("WASM BENCHMARK #{}:".format(i + 1)) tot_wasm_data.append(wasm_bench()) print() print("------------------") print() #start plotting if not os.path.isdir("./dat"): os.mkdir("./dat/") #folder to store the dat # benchnam = getbenchmarks() benchnam = ["gemm.c.bin"] for b in benchnam: # avg_native = 0 avg_hcomp = 0 avg_wasm = 0 # vals_native = [] vals_hcomp = [] vals_wasm = [] # for m in tot_native_data: # avg_native+=m[b] # vals_native.append(m[b]) for m in tot_hcomp_data: avg_hcomp += m[b] vals_hcomp.append(m[b]) for m in tot_wasm_data: avg_wasm += m[b] vals_wasm.append(m[b]) outpath = "./dat/{}/".format(b) os.system("rm -rf {}".format(outpath)) os.mkdir(outpath) #make excel project wb = xl.Workbook() ws = wb.active ws.title = b # ws["A1"] = "Native" ws["C1"] = "HComp" ws["E1"] = "WASM" # for d in range(len(vals_native)): # ws["A{}".format(d + 2)] = vals_native[d] for d in range(len(vals_hcomp)): ws["C{}".format(d + 2)] = vals_hcomp[d] for d in range(len(vals_wasm)): ws["E{}".format(d + 2)] = vals_wasm[d] ws["G1"] = "Averages:" # ws["G2"] = "Native:" # ws["H2"] = "=AVERAGE(A2:A{})".format(len(vals_native) + 1) ws["G3"] = "HComp:" ws["H3"] = "=AVERAGE(C2:C{})".format(len(vals_hcomp) + 1) ws["G4"] = "WASM:" ws["H4"] = "=AVERAGE(E2:E{})".format(len(vals_wasm) + 1) ws["I1"] = "Standard Deviations:" # ws["I2"] = "Native:" # ws["J2"] = "=STDEV(A2:A{})".format(len(vals_native) + 1) ws["I3"] = "HComp:" ws["J3"] = "=STDEV(C2:C{})".format(len(vals_hcomp) + 1) ws["I4"] = "WASM:" ws["J4"] = "=STDEV(E2:E{})".format(len(vals_wasm) + 1) dof = N_TRIALS - 1 t_test = sp.ttest_ind(vals_hcomp, vals_wasm) print(t_test.pvalue) ws["G5"] = "P-Value:" ws["H5"] = t_test.pvalue wb.save("{}dat.xlsx".format(outpath)) avg_hcomp /= len(tot_hcomp_data) avg_wasm /= len(tot_wasm_data) hcomp_stdev = np.std(np.array(vals_hcomp)) wasm_stdev = np.std(np.array(vals_wasm)) plt.figure(0) plt.figure(0) plt.bar(range(0, 2), [avg_hcomp, avg_wasm], color=["blue", "orange"], yerr=[hcomp_stdev, wasm_stdev]) plt.xticks(range(0, 2), ["HComp", "WASM"]) plt.ylabel("Execution speed in milliseconds") plt.title("Benchmarks") plt.savefig("{}graph.png".format(outpath)) plt.close(0)
with open("names.csv") as f: reader = csv.reader(f) next(reader) # skip header data = [r for r in reader] data = list(chain(*data)) #unlist data = [item.lower() for item in data] #make all lower case data = [item.strip() for item in data] sample = random.sample(data, 10) DEVELOPER_KEY = 'AIzaSyBYl6LkkHOIfBCSzRpMcptBiEArCDKwq7g' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY) out = [] for i in range(len(sample)): search_response = youtube.search().list(q=sample[i], part='id', maxResults=2).execute() search_response search_response["search_key"] = sample[i] out.append(search_response) print out[0].keys() out[0].keys() out[0]['items'][0] out[9]['items'][0]['id']['videoId']
def google_search(search_term, api_key, cse_id, **kwargs): service = build("customsearch", "v1", developerKey=api_key) res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute() return res['items']