def task(name, queue): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not queue.empty(): delay = queue.get() print(f"Task {name} running") timer.start() time.sleep(delay) timer.stop() yield
async def main(): """ This is the main entry point for the program """ # Create the queue of work work_queue = asyncio.Queue() # Put some work in the queue for work in [15, 10, 5, 2]: await work_queue.put(work) # Run the tasks with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.create_task(task("One", work_queue)), asyncio.create_task(task("Two", work_queue)), )
def __init__(self, service, coords, radius, out_dir, use_subdir=True, agent='NAVO-servicemon', tap_mode='async', save_results=True, verbose=False): self._save_results = save_results self._timer = Timer('query_total', logger=None) self._timer.timers.clear() self.__agent = agent self._tap_mode = tap_mode self._service = service self._base_name = self._compute_base_name() self._service_type = self._compute_service_type() self._use_subdir = use_subdir self._orig_coords = coords self._orig_radius = radius self._coords = self._compute_coords() self._adql = self._compute_adql() self._access_url = self._compute_access_url() # Add cone params to xcone access_url if self._service_type == 'xcone': ra, dec, radius = self._get_ra_dec_radius() self._access_url = self._access_url.format(ra, dec, radius) if self._use_subdir: self._out_path = pathlib.Path(f'{out_dir}/{self._base_name}') else: self._out_path = pathlib.Path(out_dir) self._verbose = verbose self._query_params = self._compute_query_params() self._query_name = self._compute_query_name() self._filename = self._out_path / (self._query_name + '.xml') self._stats = QueryStats(self._query_name, self._base_name, self._service_type, self._access_url, self._query_params, self._result_meta_attrs())
async def main(): """ Это главная точка входа для главной программы """ # Создание очереди работы work_queue = asyncio.Queue() # Помещение работы в очередь for work in [15, 10, 5, 2]: await work_queue.put(work) # Запуск задач with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.create_task(task("One", work_queue)), asyncio.create_task(task("Two", work_queue)), )
def main(): work_queue = queue.Queue() for work in [15, 10, 5, 2]: work_queue.put(work) tasks = [task("One", work_queue), task("Two", work_queue)] done = False with Timer(text="\nTotal elapsed time: {:.1f}"): while not done: for t in tasks: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
def main(): """ Reads arguments from command line (including an input file), reads planning problem and upper bound on length (t_max) from the specified input file, calls solve_planning_problem_using_ASP() from asp_planner_core.py to find a plan for the planning problem (if it exists), and prints the found plan. """ # Take command line arguments parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", required=True, help="input file") parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true") args = parser.parse_args(map(lambda x: x.lower(),sys.argv[1:])) input = args.input verbose = args.verbose # Read sudoku from input file if verbose: print("Reading planning problem and bound on plan length from " + input + "..") planning_problem, t_max = read_problem_from_file(input) if planning_problem == None: print("Exiting..") return # Print information, in verbose mode if verbose: print("Planning problem:") print(pretty_repr_planning_problem(planning_problem)) print("Upper bound on plan length: {}".format(t_max)) # Solve the planning problem plan = None timer = Timer(name="solving-time", text="Did ASP encoding & solving in {:.2f} seconds") if verbose: print("Solving planning problem using ASP encoding..") timer.start() # with suppress_stdout_stderr(): plan = solve_planning_problem_using_ASP(planning_problem,t_max) if verbose: timer.stop() # Print the solved sudoku if plan == None: print("NO PLAN FOUND") else: if verify_plan(planning_problem,plan) == True: if verbose: print("Correct plan found:") print(pretty_repr_plan(plan)) else: print("INCORRECT PLAN FOUND") print(pretty_repr_plan(plan))
def scan(paths, recursive, size, min_size, max_size, hash_function): """Scan files in directories and report duplication.""" if min_size is None: min_size = size // 4 if max_size is None: max_size = size * 8 bytes_total = 0 bytes_dupe = 0 fingerprints = set() supported = supported_hashes() if hash_function not in supported: msg = "'{}' is not a supported hash.\nTry one of these:\n{}".format( hash_function, ", ".join(supported)) raise click.BadOptionUsage("hf", msg) hf = getattr(hashlib, hash_function) files = [] for path in paths: files += list(iter_files(path, recursive)) t = Timer("scan", logger=None) t.start() with click.progressbar(files) as pgbar: for entry in pgbar: try: chunker = fastcdc.fastcdc(entry.path, min_size, size, max_size, hf=hf) except Exception as e: click.echo("\n for {}".format(entry.path)) click.echo(repr(e)) continue for chunk in chunker: bytes_total += chunk.length if chunk.hash in fingerprints: bytes_dupe += chunk.length fingerprints.add(chunk.hash) t.stop() if bytes_total: data_per_s = bytes_total / Timer.timers.mean("scan") dd_ratio = bytes_dupe / bytes_total * 100 click.echo("Files: {}".format(intcomma(len(files)))) click.echo("Chunk Sizes: min {} - avg {} - max {}".format( min_size, size, max_size)) click.echo("Unique Chunks: {}".format(intcomma(len(fingerprints)))) click.echo("Total Data: {}".format(naturalsize(bytes_total))) click.echo("Dupe Data: {}".format(naturalsize(bytes_dupe))) click.echo("DeDupe Ratio: {:.2f} %".format(dd_ratio)) click.echo("Throughput: {}/s".format(naturalsize(data_per_s))) else: click.echo("No data.")
async def get_goods(self): timer = Timer(text=f'Scraping time: {{:.1f}}s') timer.start() await self.get_categories() if not self.categories: return list_to_scraping = self.create_list_to_scraping() for item in list_to_scraping: await self.get_goods_in_category(item) await self.api.close_session() print(f'\nScraped {self.progress.counter} goods.') timer.stop()
def download_queue_oday(self, directory): logger.debug("download start: bt") self.ftp.cwd("//MP3") self.ftp.cwd("0-DAY") self.ftp.cwd(directory) if not os.path.exists(self.download_root + directory): os.makedirs(self.download_root + directory) timer = Timer(text="Track downloaded in {:0.2f} seconds", logger=logger.info) for ftpfile in self.queue_oday: #print("Current directory {}".format(self.ftp.pwd())) #print("cwd to {}".format(ftpfile.directory)) self.ftp.cwd(ftpfile.directory) for filename in (path for path in self.ftp.nlst() if path not in ( '.', '..')): #first entry is always a sub directory logger.debug(f"Checking filename {filename}") destination_dir = os.path.join(self.download_root, directory, ftpfile.group, ftpfile.directory) if not os.path.exists(destination_dir): os.makedirs(destination_dir) local_filename = os.path.join( destination_dir, filename.replace('-www.groovytunes.org', '')) if (filename.startswith('-[')): os.makedirs(local_filename) else: if not os.path.exists(local_filename): if (ftpfile.size < 52914560): logger.info( f"Downloading {filename} to {local_filename} with size {ftpfile.size}" ) timer.start() file = open(local_filename, 'wb') self.ftp.retrbinary('RETR ' + filename, file.write) file.close() timer.stop() else: logger.warn(f"Skip oversized file {filename}") else: logger.warn(f"File already exists {local_filename}.") clean_download_directory(destination_dir) self.ftp.cwd("..")
def main(): work_queue = Queue() for work in [8, 5, 3, 1]: work_queue.put(work) tasks = [task('One', work_queue), task('Two', work_queue)] done = False with Timer(text='\nTotal elapsed time: {:.1f}'): while not done: for t in tasks[:]: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
async def get_data(name, work_queue): timer = Timer(text=f'Task {name} elapsed time: {{:.1f}}') async with aiohttp.ClientSession() as session: data = [] while not work_queue.empty(): url = await work_queue.get() print(f'Task {name} getting URL: {url}') timer.start() async with session.get(url) as response: contents = await response.text() data.append(contents[:100]) timer.stop() return data
def process(img_path, out, n_points): # Loading the image with Timer(text="Loading the image: {:.6f} seconds"): img = Image.open(img_path) img = np.asarray(img) random_i = np.random.randint(0, img.shape[1], size=n_points) random_j = np.random.randint(0, img.shape[0], size=n_points) points = np.vstack((random_i, random_j)).T # Creating the kdtree with Timer(text="Creating the kdtree: {:.6f} seconds"): vor = Voronoi(points) kdtree = KDTree(vor.points) # Identifying regions pixel by pixel with Timer(text="Computing positions: {:.6f} seconds"): region_array = -np.ones(img.shape[:2]) all_pos = product(range(img.shape[1]), range(img.shape[0])) positions = np.array(list(all_pos)) with Timer(text="Identifying regions pixel by pixel: {:.6f} seconds"): distances, labels = kdtree.query(positions) with Timer(text="Labelizing: {:.6f} seconds"): for pos, label in zip(positions, labels): region_array[pos[1], pos[0]] = label # Storing colors with Timer(text="Storing colors: {:.6f} seconds"): colors = {} for region_id in np.unique(region_array): colors[region_id] = compute_mean_color( img, np.where(region_array == region_id)) new_img = np.zeros(img.shape) for region_id in np.unique(region_array): region = np.where(region_array == region_id) new_img[region] = colors[region_id] # Saving it with Timer(text="Saving: {:.6f} seconds"): new_img = new_img.astype(int) PIL_image = Image.fromarray(new_img.astype("uint8"), "RGB") PIL_image.save(out)
async def main(): work_queue = asyncio.Queue() for url in [ 'https://google.com', 'https://yahoo.com', 'https://linkedin.com', 'https://apple.com', 'https://microsoft.com', 'https://facebook.com', 'https://twitter.com', ]: await work_queue.put(url) with Timer(text='\nTotal elapsed time: {:.1f}'): await asyncio.gather( asyncio.create_task(task('One', work_queue)), asyncio.create_task(task('Two', work_queue)), )
async def main(): work_queue = asyncio.Queue() for url in [ "http://google.com", "http://yahoo.com", "http://linkedin.com", "http://apple.com", "http://microsoft.com", "http://facebook.com", "http://twitter.com", ]: await work_queue.put(url) with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.create_task(task("One", work_queue)), asyncio.create_task(task("Two", work_queue)), )
async def main(): work_queue = asyncio.Queue() for work in [15, 10, 5, 2]: await work_queue.put(work) # this is needed otherwise it won't work! # Run the tasks with Timer(text="\nTotal elapsed time: {:.1f}"): # The gather() will do two things, one is to gather the tasks that you create using asyncio.create_task # second is it will wait until all these tasks are completed before proceeding await asyncio.gather( asyncio.create_task(task("one", work_queue)), asyncio.create_task(task("two", work_queue)), asyncio.create_task(task("three", work_queue)), asyncio.create_task(task("four", work_queue)), ) print("I am all done!" ) # this should be executed after the asyncio.gather is all done
def download_queue_bt(self, directory): logger.debug("download start: bt") self.ftp.cwd("//MP3") self.ftp.cwd("BEATPORT__AND__WEBSITE_SECTION") self.ftp.cwd(directory) if not os.path.exists(self.download_root + directory): os.makedirs(self.download_root + directory) timer = Timer(text="Track downloaded in {:0.2f} seconds", logger=logger.info) for ftpfile in self.queue_bt: self.ftp.cwd(ftpfile.directory) logger.debug(f"Listing directory {ftpfile.directory}") for filename in (path for path in self.ftp.nlst() if path not in ('.', '..')): logger.debug(f"Checking filename {filename}") destination_dir = os.path.join(self.download_root, directory, ftpfile.group, ftpfile.directory) if not os.path.exists(destination_dir): os.makedirs(destination_dir) local_filename = os.path.join( destination_dir, filename.replace('-www.groovytunes.org', '').replace('_', ' ')) if not os.path.exists(local_filename): if (ftpfile.size < 52914560): logger.info( f"Downloading {filename} to {local_filename}") timer.start() file = open(local_filename, 'wb') self.ftp.retrbinary('RETR ' + filename, file.write) file.close() timer.stop() else: logger.warn(f"Skip oversized file {filename}") else: logger.info(f"File already exists {local_filename}.") clean_download_directory(destination_dir) self.ftp.cwd("..")
async def get_data_async(): work_queue = asyncio.Queue() for url in [ "http://google.com", "http://yahoo.com", "http://linkedin.com", "http://apple.com", ]: await work_queue.put(url) # Run tasks with Timer(text='\nTotal elapsed time: {:.1f}'): all_data = await asyncio.gather( asyncio.create_task(get_data("One", work_queue)), asyncio.create_task(get_data("Two", work_queue)), ) print("All done getting all URLs") print(all_data)
async def main(): """ This is the main entry point for the program """ # Create the queue of work work_queue = asyncio.Queue() # Put some work in the queue for url in [ "https://baidu.com", "https://www.taobao.com/", "https://www.apple.com", "https://www.microsoft.com", "https://www.qq.com/" ]: await work_queue.put(url) # Run the tasks with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.create_task(task("One", work_queue)), asyncio.create_task(task("Two", work_queue)), )
async def spider_task(self, name): timer = Timer(text=f"Task {name} elapsed time: {{:.1f}}") while not self.queue.empty(): try: work = await self.queue.get() print(work) print(f"{name} running task {work}") timer.start() await self.spyder(work) timer.stop() except asyncio.CancelledError: print('task_func was canceled') raise return 'the result'
def testKMeans(self): data = DataRetriever("../Datasets/metadata.json") data.retrieveData("computerHardware") kValue = 15 t = Timer() t.start() mediods = KMediods(data.getDataSet(), data.getDataClass(), data.getDescreteAttributes(), data.getContinuousAttributes(), data.getPredictionType(), kValue, 100) t.stop() print(f"Time: {t}") print(mediods) mediods.to_csv('kmedoids.csv', index=False)
def main(): # Take command line arguments parser = argparse.ArgumentParser(); # parser.add_argument("input", help="Input file"); parser.add_argument("-i", "--input", required=True, help="input file") parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true") args = parser.parse_args(map(lambda x: x.lower(),sys.argv[1:])); input = args.input; verbose = args.verbose; # Read sudoku from input file if verbose: print("Reading planning problem and bound on plan length from " + input + ".."); planning_problem, t_max = read_problem_from_file(input); if planning_problem == None: print("Exiting.."); return; # Print information, in verbose mode if verbose: print("Planning problem:"); print(pretty_repr_planning_problem(planning_problem)); print("Upper bound on plan length: {}".format(t_max)); # Solve the planning problem plan = None; timer = Timer(name="solving-time", text="Did ASP encoding & solving in {:.2f} seconds"); if verbose: print("Solving planning problem using ASP encoding.."); timer.start(); with suppress_stdout_stderr(): plan = solve_planning_problem_using_ASP(planning_problem,t_max); if verbose: timer.stop(); # Print the solved sudoku if plan == None: print("NO PLAN FOUND"); else: if verify_plan(planning_problem,plan) == True: if verbose: print("Correct plan found:"); print(pretty_repr_plan(plan)); else: print("INCORRECT PLAN FOUND"); print(pretty_repr_plan(plan));
def run(pipeline: str, *stages) -> None: """Run one pipeline Args: pipeline: Name of pipeline """ # Run pipeline stages = stages or config.{{ cookiecutter.repo_name }}.pipelines[pipeline].stages or funcs(pipeline) logger.opt(colors=True).info( f"Start pipeline <red>{pipeline!r}</red> with stages: {', '.join(stages)}" ) data, meta = munch.Munch(), munch.Munch() for stage in stages: logger.opt(colors=True).info(f"Start stage <cyan>{stage!r}</cyan>") {% raw %}with Timer( f"stage_{stage}", f"Finished {stage!r} in {{:.2f}} seconds", logger=logger.time, ):{% endraw %} call(pipeline, func=stage, data=data, meta=meta)
def main(): """ This method allows to run cooperatively two instances of a task with blocking calls. This is still a synchronous program. """ work_queue = queue.Queue() for work in [15, 10, 5, 2]: work_queue.put(work) tasks = [task("One", work_queue), task("Two", work_queue)] done = False with Timer(text="\nTotal elapsed time: {:.1f}"): while not done: for t in tasks: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
def __call__(self, b, u=None): from codetiming import Timer if u is None: u = b else: assert u.shape == b.shape s0 = tuple(base.slice() for base in self.T) if b.dtype.char in 'fdg': u[s0] = scp.linalg.spsolve(self.M, b[s0].flatten()).reshape(self.T.dims()) else: factor = 1 if self.M.dtype.char in 'FDG': factor = abs(self.M.real).max() / abs(self.M.imag).max() if factor > 1e12: # If M is basically a real matrix with roundoff numbers in imag Mc = self.M.real.copy() with Timer('LU'): lu = splu(Mc) with Timer('LU solve'): u.real[s0] = lu.solve(b.real[s0].flatten()).reshape( self.T.dims()) u.imag[s0] = lu.solve(b.imag[s0].flatten()).reshape( self.T.dims()) #u.real[s0] = scp.linalg.spsolve(self.M.real.copy(), b.real[s0].flatten()).reshape(self.T.dims()) elif factor < 1e-12: # if M is basically imaginary with roundoff numbers in real u.real[s0] = 0 Mc = self.M.imag.copy() with Timer('LU'): lu = splu(Mc) with Timer('LU solve'): u.real[s0] = lu.solve(b.imag[s0].flatten()).reshape( self.T.dims()) u.imag[s0] = lu.solve(b.real[s0].flatten()).reshape( self.T.dims()) #u.imag[s0] = scp.linalg.spsolve(self.M.imag.copy(), b.imag[s0].flatten()).reshape(self.T.dims()) else: with Timer('LU'): lu = splu(self.M) with Timer('LU solve'): u[s0] = lu.solve(b[s0].flatten()).reshape(self.T.dims()) #u[s0] = scp.linalg.spsolve(self.M, b[s0].flatten()).reshape(self.T.dims()) return u
async def main(): """ Это основная точка входа в программу """ # Создание очереди работы work_queue = asyncio.Queue() # Помещение работы в очередь for url in [ "http://google.com", "http://yahoo.com", "http://linkedin.com", "http://apple.com", "http://microsoft.com", "http://facebook.com", "http://twitter.com", ]: await work_queue.put(url) with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.create_task(task('One', work_queue)), asyncio.create_task(task('Two', work_queue)), )
def main(): work_queue = queue.Queue() for url in [ "http://google.com", "http://yahoo.com", "http://linkedin.com", "http://apple.com", "http://microsoft.com", "http://facebook.com", "http://twitter.com", ]: work_queue.put(url) tasks = [task("One", work_queue), task("Two", work_queue)] done = False with Timer(): while not done: for t in tasks: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
def main(): """ Это основная точка входа в программу """ # Создание очереди работы work_queue = queue.Queue() # Добавление работы в очередь for work in [15, 10, 5, 2]: work_queue.put(work) tasks = [task("One", work_queue), task("Two", work_queue)] # Запуск задач done = False with Timer(text="\nTotal elapsed time: {:.1f}"): while not done: for t in tasks: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
async def main(): """ This is the main entry point for the program """ # Create the queue of work work_queue = asyncio.Queue() # Put some work in the queue for url in [ "http://google.com", "http://yahoo.com", "http://linkedin.com", "http://apple.com", "http://microsoft.com", "http://facebook.com", "http://twitter.com", ]: await work_queue.put(url) # Run the tasks with Timer(text="\nTotal elapsed time: {:.1f}"): await asyncio.gather( asyncio.gather(task("One", work_queue), task("Two", work_queue)) )
def main(): """ This is the main entry point for the program """ # Create the queue of work work_queue = queue.Queue() # Put some work in the queue for work in [15, 10, 5, 2]: work_queue.put(work) tasks = [task("One", work_queue), task("Two", work_queue)] # Run the tasks done = False with Timer(text="\nTotal elapsed time: {:.1f}"): while not done: for t in tasks: try: next(t) except StopIteration: tasks.remove(t) if len(tasks) == 0: done = True
async def rand_id_generate(): link = "https://nhentai.net/g/" hdr = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36' } timer = Timer(text=f"Randomize sauce elapsed time: {{:.5f}}") timer.start() while True: sauce = random.randint(0, rand_limit) link_comp = link + str(sauce) req = requests.get(link_comp, hdr) try: req.raise_for_status() except requests.exceptions.RequestException: continue timer.stop() return sauce