def main(): """ Reads arguments from command line (including an input file), reads planning problem and upper bound on length (t_max) from the specified input file, calls solve_planning_problem_using_ASP() from asp_planner_core.py to find a plan for the planning problem (if it exists), and prints the found plan. """ # Take command line arguments parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", required=True, help="input file") parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true") args = parser.parse_args(map(lambda x: x.lower(), sys.argv[1:])) input = args.input print(input) verbose = args.verbose # Read sudoku from input file if verbose: print("Reading planning problem and bound on plan length from " + input + "..") planning_problem, t_max = read_problem_from_file(input) if planning_problem == None: print("Exiting..") return # Print information, in verbose mode if verbose: print("Planning problem:") print(pretty_repr_planning_problem(planning_problem)) print("Upper bound on plan length: {}".format(t_max)) # Solve the planning problem plan = None timer = Timer(name="solving-time", text="Did ASP encoding & solving in {:.2f} seconds") if verbose: print("Solving planning problem using ASP encoding..") timer.start() plan = solve_planning_problem_using_ASP(planning_problem, t_max) # with suppress_stdout_stderr(): # plan = solve_planning_problem_using_ASP(planning_problem,t_max) if verbose: timer.stop() # Print the solved sudoku if plan == None: print("NO PLAN FOUND") else: if verify_plan(planning_problem, plan) == True: if verbose: print("Correct plan found:") print(pretty_repr_plan(plan)) else: print("INCORRECT PLAN FOUND") print(pretty_repr_plan(plan))
def main(): # Commands sent to this bot cmds = argv[1:] if len(cmds) == 0 or len(cmds) < 3: raise Exception(f"You must pass at least three arguments.{linesep} \ They are: <str:USER_NAME> <str:PASSWORD> <str:SCHOOL_TERM> <int:OPTION(optional)>{linesep} \ Follow this example:{linesep} \ python bot.py Caio20 caio123 2020-1 1") t = Timer( text=f"Execution of {__file__}; Time spent: {{seconds:.2f}} seconds.", logger=logger.debug) t.start() robot = SigaaBot() robot.sigaa_user(cmds[0], cmds[1], cmds[2]) robot.sigaa_site() if cmds[3] == "1": robot.ver_notas() elif cmds[3] == "2": robot.ver_atestado() elif cmds[3] == "3": robot.ver_historico() elif cmds[3] == "4": robot.emitir_declaracao_vinculo() elif cmds[3] == "5": robot.matricula_online() else: logger.debug(f"Welcome back {getenv('USER')}!") t.stop()
async def worker(worker_name, queue, **kwargs): timer = Timer(text=f"Task {worker_name} elapsed time: {{:.3f}}") # conn = psycopg2.connect(dbname='dev', # host=f"{kwargs.get('domain')}.redshift.amazonaws.com", # port=5439, # user=kwargs.get('username'), # password=kwargs.get('password') # ) # with conn: # with conn.cursor() as curs: # while not queue.empty(): # query = await queue.get() # try: # timer.start() # curs.execute(query) # timer.stop() # except Exception as exception: # print(exception) # pprint(f"{worker_name} failed to run query:\n{query}") # conn.close() while not queue.empty(): delay = await queue.get() print(f"Task {worker_name} running") timer.start() await asyncio.sleep(delay) timer.stop()
def benchmark(): """Benchmark chunking performance.""" files = [os.urandom(4194304) for _ in range(64)] num_bytes = 4194304 print(system_info()) from fastcdc.fastcdc_py import fastcdc_py chunk_funks = [fastcdc_py] try: from fastcdc.fastcdc_cy import fastcdc_cy chunk_funks.append(fastcdc_cy) except ImportError: click.echo("Skip native cython version") chunk_sizes = (1024, 2048, 4096, 8192, 16384, 32768, 65536) result = [] for avg_size in chunk_sizes: click.echo("Chunksize: {}".format(nsize(avg_size))) for func in chunk_funks: timer_name = "{}_{}".format(func.__name__, avg_size) t = Timer(timer_name, logger=None) for file in files: t.start() result = list(func(file, avg_size=avg_size)) t.stop() data_per_s = num_bytes / Timer.timers.mean(timer_name) click.echo("{}: {}/s".format(func.__name__, nsize(data_per_s))) avg_size = mean([c.length for c in result]) click.echo("Real AVG: {}".format(nsize(avg_size))) click.echo()
def main(func, num_cols, min_rows, max_rows, num_examples): """ Will generate `num-examples` of DataFrames using numpy.linspace, going from `min-rows` rows to `max-rows` rows. """ bmark_name = f"Benchmark run: func={func}, num_cols={num_cols}, min_rows={min_rows}, max_rows={max_rows}, num_examples={num_examples}" print(f"Starting {bmark_name}") timer = Timer(name=bmark_name) timer.start() docker_db = DockerDB("bcpandas-benchmarks", "MyBigSQLPasswordAlso!!!") try: # run benchmarks creds = setup(docker_db) results = [] for n in np.linspace(min_rows, max_rows, num=num_examples): num_rows = int(n) df = pd.DataFrame( data=np.ndarray(shape=(num_rows, num_cols), dtype=int), columns=[f"col-{x}" for x in range(num_cols)], ) if func == "readsql": _results = run_benchmark_readsql(df=df, creds=creds) elif func == "tosql": _results = run_benchmark_tosql(df=df, creds=creds) results.append({"num_rows": num_rows, **_results}) finally: teardown(docker_db) save_and_plot(func=func, results=results, num_cols=num_cols)
def _run_single_func(title, func, **kwargs): print(f"starting {title}") t = Timer(name=title) t.start() func(**kwargs) elapsed = t.stop() print(f"finished {title}") return elapsed
async def task(name, work_queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not work_queue.empty(): delay = await work_queue.get() print(f"Task {name} running") timer.start() await asyncio.sleep(delay) timer.stop()
def task(name: str, queue: queue.Queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not queue.empty(): delay = queue.get() print(f"Task {name} running") timer.start() time.sleep(delay) timer.stop() yield
async def task(name, work_queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") async with aiohttp.ClientSession() as session: while not work_queue.empty(): url = await work_queue.get() print(f"Task {name} getting URL: {url}") timer.start() await session.get(url) timer.stop()
def task(name, queue): timer = Timer(text=f'Task {name} elapsed time: {{:.1f}}') while not queue.empty(): delay = queue.get() print(f'Task {name} running') timer.start() time.sleep(delay) timer.stop() yield
async def task(name, work_queue): timer = Timer() while not work_queue.empty(): delay = await work_queue.get() print(f"Task {name} running") timer.start() await asyncio.sleep(delay) #This creates a non-blocking delay # that will perform a context switch back to the caller main() timer.stop()
def task(name, work_queue): timer = Timer(text=f'Task {name} elapsed time: {{:.1f}}') with requests.Session() as session: while not work_queue.empty(): url = work_queue.get() print(f'Task {name} getting URL: {url}') timer.start() session.get(url) timer.stop() yield
def task(name, queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not queue.empty(): delay = queue.get() print(f"Task {name} running, delay: {delay}") timer.start() time.sleep(delay) timer.stop( ) #stops the timer instance and outputs the elapsed time since timer.start() was called. yield
def task(name, work_queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") with requests.Session() as session: while not work_queue.empty(): url = work_queue.get() print(f"Task {name} getting URL: {url}") timer.start() session.get(url) timer.stop() yield # yield turns task() into a generator.
def test_explicit_timer(capsys): """Test that timed section prints timing information""" t = Timer(text=TIME_MESSAGE) t.start() waste_time() t.stop() stdout, stderr = capsys.readouterr() assert RE_TIME_MESSAGE.match(stdout) assert stdout.count("\n") == 1 assert stderr == ""
async def task(name, work_queue): timer = Timer(text=f'Task {name} elasped time: {{:.1f}}') while not work_queue.empty(): # this is needed otherwise it won't work. It seems for queue get/put you always have to use await delay = await work_queue.get() print(f'Task {name} running') timer.start() await asyncio.sleep(delay) timer.stop()
def train_loop( self, *, train_dataloader, label_mappings, val_dataloader, train_sampler=None, ): """train on whole range of epochs. Args: train_dataloader (torch.utils.data.DataLoader): label_mappings (dict): a dict of {label_id: label_name} mapping val_dataloader (torch.utils.data.DataLoader) train_sampler: (torch.utils.data.Sampler) """ model = self.model.to(self.device) params = [p for p in model.parameters() if p.requires_grad] optimizer, lr_scheduler = FasterRCNN.create_optimizer_lrs( self.config, params) accumulation_steps = self.config.train.get("accumulation_steps", DEFAULT_ACCUMULATION_STEPS) logger.debug("Start training") total_timer = Timer(name="total-time", text=const.TIMING_TEXT, logger=logging.info) total_timer.start() for epoch in range(self.config.train.epochs): with Timer( name=f"epoch-{epoch}-train-time", text=const.TIMING_TEXT, logger=logging.info, ): self.train_one_epoch( optimizer=optimizer, data_loader=train_dataloader, epoch=epoch, lr_scheduler=lr_scheduler, accumulation_steps=accumulation_steps, ) if self.distributed: train_sampler.set_epoch(epoch) self.checkpointer.save(self, epoch=epoch) with Timer( name=f"epoch-{epoch}-evaluate-time", text=const.TIMING_TEXT, logger=logging.info, ): self.evaluate_per_epoch( data_loader=val_dataloader, epoch=epoch, label_mappings=label_mappings, ) total_timer.stop()
def timer(request): if request.is_ajax(): hour = request.GET.get('hour') minute = request.GET.get('minute') seconds = request.GET.get('seconds') total_seconds = ((hour * 60) + minute) * 60 + seconds t = Timer(name=request.user) t.start() time.sleep(1) t.stop() elapsed_time = t.stop() return
async def get_goods(self): timer = Timer(text=f'Scraping time: {{:.1f}}s') timer.start() await self.get_categories() if not self.categories: return list_to_scraping = self.create_list_to_scraping() for item in list_to_scraping: await self.get_goods_in_category(item) await self.api.close_session() print(f'\nScraped {self.progress.counter} goods.') timer.stop()
def download_queue_oday(self, directory): logger.debug("download start: bt") self.ftp.cwd("//MP3") self.ftp.cwd("0-DAY") self.ftp.cwd(directory) if not os.path.exists(self.download_root + directory): os.makedirs(self.download_root + directory) timer = Timer(text="Track downloaded in {:0.2f} seconds", logger=logger.info) for ftpfile in self.queue_oday: #print("Current directory {}".format(self.ftp.pwd())) #print("cwd to {}".format(ftpfile.directory)) self.ftp.cwd(ftpfile.directory) for filename in (path for path in self.ftp.nlst() if path not in ( '.', '..')): #first entry is always a sub directory logger.debug(f"Checking filename {filename}") destination_dir = os.path.join(self.download_root, directory, ftpfile.group, ftpfile.directory) if not os.path.exists(destination_dir): os.makedirs(destination_dir) local_filename = os.path.join( destination_dir, filename.replace('-www.groovytunes.org', '')) if (filename.startswith('-[')): os.makedirs(local_filename) else: if not os.path.exists(local_filename): if (ftpfile.size < 52914560): logger.info( f"Downloading {filename} to {local_filename} with size {ftpfile.size}" ) timer.start() file = open(local_filename, 'wb') self.ftp.retrbinary('RETR ' + filename, file.write) file.close() timer.stop() else: logger.warn(f"Skip oversized file {filename}") else: logger.warn(f"File already exists {local_filename}.") clean_download_directory(destination_dir) self.ftp.cwd("..")
def scan(paths, recursive, size, min_size, max_size, hash_function): """Scan files in directories and report duplication.""" if min_size is None: min_size = size // 4 if max_size is None: max_size = size * 8 bytes_total = 0 bytes_dupe = 0 fingerprints = set() supported = supported_hashes() if hash_function not in supported: msg = "'{}' is not a supported hash.\nTry one of these:\n{}".format( hash_function, ", ".join(supported)) raise click.BadOptionUsage("hf", msg) hf = getattr(hashlib, hash_function) files = [] for path in paths: files += list(iter_files(path, recursive)) t = Timer("scan", logger=None) t.start() with click.progressbar(files) as pgbar: for entry in pgbar: try: chunker = fastcdc.fastcdc(entry.path, min_size, size, max_size, hf=hf) except Exception as e: click.echo("\n for {}".format(entry.path)) click.echo(repr(e)) continue for chunk in chunker: bytes_total += chunk.length if chunk.hash in fingerprints: bytes_dupe += chunk.length fingerprints.add(chunk.hash) t.stop() if bytes_total: data_per_s = bytes_total / Timer.timers.mean("scan") dd_ratio = bytes_dupe / bytes_total * 100 click.echo("Files: {}".format(intcomma(len(files)))) click.echo("Chunk Sizes: min {} - avg {} - max {}".format( min_size, size, max_size)) click.echo("Unique Chunks: {}".format(intcomma(len(fingerprints)))) click.echo("Total Data: {}".format(naturalsize(bytes_total))) click.echo("Dupe Data: {}".format(naturalsize(bytes_dupe))) click.echo("DeDupe Ratio: {:.2f} %".format(dd_ratio)) click.echo("Throughput: {}/s".format(naturalsize(data_per_s))) else: click.echo("No data.")
async def get_data(name, work_queue): timer = Timer(text=f'Task {name} elapsed time: {{:.1f}}') async with aiohttp.ClientSession() as session: data = [] while not work_queue.empty(): url = await work_queue.get() print(f'Task {name} getting URL: {url}') timer.start() async with session.get(url) as response: contents = await response.text() data.append(contents[:100]) timer.stop() return data
def download_queue_bt(self, directory): logger.debug("download start: bt") self.ftp.cwd("//MP3") self.ftp.cwd("BEATPORT__AND__WEBSITE_SECTION") self.ftp.cwd(directory) if not os.path.exists(self.download_root + directory): os.makedirs(self.download_root + directory) timer = Timer(text="Track downloaded in {:0.2f} seconds", logger=logger.info) for ftpfile in self.queue_bt: self.ftp.cwd(ftpfile.directory) logger.debug(f"Listing directory {ftpfile.directory}") for filename in (path for path in self.ftp.nlst() if path not in ('.', '..')): logger.debug(f"Checking filename {filename}") destination_dir = os.path.join(self.download_root, directory, ftpfile.group, ftpfile.directory) if not os.path.exists(destination_dir): os.makedirs(destination_dir) local_filename = os.path.join( destination_dir, filename.replace('-www.groovytunes.org', '').replace('_', ' ')) if not os.path.exists(local_filename): if (ftpfile.size < 52914560): logger.info( f"Downloading {filename} to {local_filename}") timer.start() file = open(local_filename, 'wb') self.ftp.retrbinary('RETR ' + filename, file.write) file.close() timer.stop() else: logger.warn(f"Skip oversized file {filename}") else: logger.info(f"File already exists {local_filename}.") clean_download_directory(destination_dir) self.ftp.cwd("..")
def RunTests(self, seed_bytes: bytes) -> None: cls_name = type(self).__name__ self.m_test_elapsed_times = [] for t in range(0, self.m_test_num): # Start timer tmr = Timer(name=cls_name, text="{name} - Elapsed time: {milliseconds:.0f}ms") tmr.start() # Run tests self._RunTest(seed_bytes) # Stop timer self.m_test_elapsed_times.append(int(tmr.stop()))
def sq_analyze(input_path): # We keep track of the time it takes to analyse the different projects in a separate "times" file f = open("times", 'a+') f.close() apk_path = os.path.join(input_path, "apks") print(apk_path) # We find all relevant apk files files = os.listdir(apk_path) files = [file for file in files if re.match(".*.apk", file)] for file in files: # We find the name of a project without the extension to proceed name = ".".join(file.split(".")[0:-1]) # If we have not decompiled the apk to a jar file, we do this now if name + "dex2jar.jar" not in os.listdir(apk_path): try: apk2jar(apk_path, file) except: pass else: print("Skipped %s -> %s" % (file, name + "dex2jar.jar")) # If we have not decompiled the jar file to java files we do this now if name + "_2java" not in os.listdir(apk_path): try: jar2java(apk_path, name + "dex2jar.jar", name) except: pass else: print("Skipped %s -> %s" % (name + "dex2jar.jar", name + "_2java")) # We add the sonarqube properties file if necessary and analyse the project, # unless there exists a .skip file in the source folder of the project if (name + "_2java") in os.listdir(apk_path): add_sonar_properties(apk_path, name) if not exists_in_sonar( apk_path, name) and not ".skip" in os.listdir( os.path.join(apk_path, name + "_2java")): t = Timer() t.start() result = True while result: result = sonar_scan(apk_path, name) # We write the time it took analysing the project in the times file f = open("times", 'a+') f.write("%s\t\t%d\n" % (name, t.stop())) f.close() f = open(os.path.join(apk_path, name + "_2java", ".skip"), "x") f.close() else: print("\n\n\nTHIS WAS ALREADY SCANNED (%s)" % name)
async def spider_task(self, name): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not self.queue.empty(): try: work = await self.queue.get() print(work) print(f"{name} running task {work}") timer.start() await self.spyder(work) timer.stop() except asyncio.CancelledError: print('task_func was canceled') raise return 'the result'
def testKMeans(self): data = DataRetriever("../Datasets/metadata.json") data.retrieveData("computerHardware") kValue = 15 t = Timer() t.start() mediods = KMediods(data.getDataSet(), data.getDataClass(), data.getDescreteAttributes(), data.getContinuousAttributes(), data.getPredictionType(), kValue, 100) t.stop() print(f"Time: {t}") print(mediods) mediods.to_csv('kmedoids.csv', index=False)
def main(): # Take command line arguments parser = argparse.ArgumentParser(); # parser.add_argument("input", help="Input file"); parser.add_argument("-i", "--input", required=True, help="input file") parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true") args = parser.parse_args(map(lambda x: x.lower(),sys.argv[1:])); input = args.input; verbose = args.verbose; # Read sudoku from input file if verbose: print("Reading planning problem and bound on plan length from " + input + ".."); planning_problem, t_max = read_problem_from_file(input); if planning_problem == None: print("Exiting.."); return; # Print information, in verbose mode if verbose: print("Planning problem:"); print(pretty_repr_planning_problem(planning_problem)); print("Upper bound on plan length: {}".format(t_max)); # Solve the planning problem plan = None; timer = Timer(name="solving-time", text="Did ASP encoding & solving in {:.2f} seconds"); if verbose: print("Solving planning problem using ASP encoding.."); timer.start(); with suppress_stdout_stderr(): plan = solve_planning_problem_using_ASP(planning_problem,t_max); if verbose: timer.stop(); # Print the solved sudoku if plan == None: print("NO PLAN FOUND"); else: if verify_plan(planning_problem,plan) == True: if verbose: print("Correct plan found:"); print(pretty_repr_plan(plan)); else: print("INCORRECT PLAN FOUND"); print(pretty_repr_plan(plan));
def test_accumulated_explicit_timer(capsys): """Test that explicit timer can accumulate""" t = Timer(name="accumulated_explicit_timer", text=TIME_MESSAGE) total = 0 t.start() waste_time() total += t.stop() t.start() waste_time() total += t.stop() stdout, stderr = capsys.readouterr() lines = stdout.strip().split("\n") assert len(lines) == 2 assert RE_TIME_MESSAGE.match(lines[0]) assert RE_TIME_MESSAGE.match(lines[1]) assert stderr == "" assert total == Timer.timers["accumulated_explicit_timer"]
async def rand_id_generate(): link = "https://nhentai.net/g/" hdr = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36' } timer = Timer(text=f"Randomize sauce elapsed time: {{:.5f}}") timer.start() while True: sauce = random.randint(0, rand_limit) link_comp = link + str(sauce) req = requests.get(link_comp, hdr) try: req.raise_for_status() except requests.exceptions.RequestException: continue timer.stop() return sauce