def poll_workflow_status(repo_name, check_run_id): import colorama import humanfriendly import time check_run_status = None check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status == 'queued': # When workflow status is Queued colorama.init() with humanfriendly.Spinner(label="Workflow is in queue") as spinner: # pylint: disable=no-member while True: spinner.step() time.sleep(0.5) check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status in ('in_progress', 'completed'): break colorama.deinit() if check_run_status == 'in_progress': # When workflow status is inprogress colorama.init() with humanfriendly.Spinner(label="Workflow is in progress") as spinner: # pylint: disable=no-member while True: spinner.step() time.sleep(0.5) check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status == 'completed': break colorama.deinit() print('GitHub workflow completed.') if check_run_conclusion == 'success': print('Workflow succeeded') else: raise CLIError('Workflow status: {}'.format(check_run_conclusion))
def poll_workflow_status(repo_name, check_run_id): import colorama import humanfriendly import time check_run_status = None check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status == 'queued': colorama.init() with humanfriendly.Spinner(label='Workflow is in queue') as spinner: while True: spinner.step() time.sleep(0.5) check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status == 'in_progress' or check_run_status == 'completed': break colorama.deinit() if check_run_status == 'in_progress': colorama.init() with humanfriendly.Spinner(label="Workflow is in Progress") as spinner: while True: spinner.step() time.sleep(0.5) check_run_status, check_run_conclusion = get_check_run_status_and_conclusion(repo_name, check_run_id) if check_run_status == 'completed': break colorama.deinit() print('GitHub Workflow Completed.') print('') if check_run_conclusion == 'success': print('Workflow Succeded.') else: raise CLIError('Workflow status: {}'.format(check_run_conclusion))
def __init__(self, cli_ctx, message="Running"): self.cli_ctx = cli_ctx self.message = message self.hook = self.cli_ctx.get_progress_controller( det=False, spinner=humanfriendly.Spinner( # pylint: disable=no-member label='Running', stream=sys.stderr, hide_cursor=False))
def _update_artifacttool(uri, release_id): root = _compute_artifacttool_root() # Remove all existing releases. In the future we may maintain some old versions, # but right now we always delete them. if os.path.isdir(root): for item in os.listdir(root): path = os.path.join(root, item) if os.path.isdir(path): logger.debug("Trying to remove old release %s", item) shutil.rmtree(path, ignore_errors=True) # Failing cleanup is not fatal with humanfriendly.Spinner(label="Downloading Universal Packages tooling ({})" .format(release_id), total=100, stream=sys.stderr) as spinner: spinner.step() logger.debug("Downloading ArtifactTool from %s", uri) # Make the request, determine the total size response = requests.get(uri, stream=True) content_length_header = response.headers['Content-Length'].strip() content_length = int(content_length_header) # Do the download, updating the progress bar content = io.BytesIO() bytes_so_far = 0 for chunk in response.iter_content(chunk_size=1024 * 512): if chunk: content.write(chunk) bytes_so_far += len(chunk) spinner.step(100 * float(bytes_so_far) / float(content_length)) # Extract the zip release_temp_dir = os.path.join(root, str(uuid.uuid4())) logger.debug("Extracting ArtifactTool to %s", release_temp_dir) f = zipfile.ZipFile(content) try: _mkdir_if_not_exist(release_temp_dir) f.extractall(path=release_temp_dir) # For Linux, ensure the executable bit is set on the binary "ArtifactTool" if it exists. # Python has a bug https://bugs.python.org/issue15795 where file permissions are not preserved. artifacttool_binary = os.path.join(release_temp_dir, "artifacttool") if os.path.exists(artifacttool_binary): artifacttool_stat = os.stat(artifacttool_binary) os.chmod(artifacttool_binary, artifacttool_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) # Move the release into the real releases location release_dir = _compute_release_dir(release_id) logger.debug("Moving downloaded ArtifactTool from %s to %s", release_temp_dir, release_dir) os.rename(release_temp_dir, release_dir) logger.info("Downloaded Universal Packages tooling successfully") except BaseException as ex: logger.error("An error occurred while extracting the Universal Packages tooling: %s", ex) logger.debug("Removing temporary directory %s", release_temp_dir) shutil.rmtree(release_temp_dir, ignore_errors=True)
def write(self, args): """ writes the progress :param args: dictionary containing key 'message' """ if self.spinner is None: self.spinner = humanfriendly.Spinner(label='In Progress', stream=self.out, hide_cursor=False) msg = args.get('message', 'In Progress') self.spinner.step(label=msg)
def write(self, args): """ writes the progress :param args: dictionary containing key 'message' """ if self.spinner is None: self.spinner = humanfriendly.Spinner( # pylint: disable=no-member label='In Progress', stream=self.out, hide_cursor=False) msg = args.get('message', 'In Progress') try: self.spinner.step(label=msg) except OSError: pass
def run(self, command_args, env, initial_progress_text, stderr_handler): with humanfriendly.Spinner( # pylint: disable=no-member label=initial_progress_text, total=100, stream=sys.stderr) as self._spinner: self._spinner.step() # Start the process, process stderr for progress reporting, check the process result self.start(command_args, env) try: for bline in iter(self._proc.stderr.readline, b''): line = bline.decode('utf-8', 'ignore').strip() stderr_handler(line, self._update_progress) return self.wait() except IOError as ex: if not self._terminating: raise ex
def poll_connection_ready(organization, project, connection_id): import colorama import humanfriendly import time colorama.init() with humanfriendly.Spinner(label="Checking resource readiness") as spinner: se_client = get_service_endpoint_client(organization) while True: spinner.step() time.sleep(0.5) service_endpoint = se_client.get_service_endpoint_details( project, connection_id) if service_endpoint.is_ready: break
def run(self, command_args, env, initial_progress_text, stderr_handler): with humanfriendly.Spinner(label=initial_progress_text, total=100, stream=sys.stderr) as self._spinner: self._spinner.step() # Start the process, process stderr for progress reporting, check the process result self.start(command_args, env) try: for line in iter(self._proc.stderr.readline, b''): stderr_handler(line, self._update_progress) return self.wait() except IOError as ex: if not self._terminating: raise ex
def test_spinner(self): """Test :func:`humanfriendly.Spinner`.""" stream = StringIO() spinner = humanfriendly.Spinner('test spinner', total=4, stream=stream, interactive=True) for progress in [1, 2, 3, 4]: spinner.step(progress=progress) time.sleep(0.2) spinner.clear() output = stream.getvalue() output = (output.replace(humanfriendly.show_cursor_code, '') .replace(humanfriendly.hide_cursor_code, '')) lines = [line for line in output.split(humanfriendly.erase_line_code) if line] self.assertTrue(len(lines) > 0) self.assertTrue(all('test spinner' in l for l in lines)) self.assertTrue(all('%' in l for l in lines)) self.assertEqual(sorted(set(lines)), sorted(lines))
def dl_all(start, end): page = start while True: print("Downloading page {}...".format(page)) try: raw_quotes = dl_page(page) except (requests.ConnectionError, RequestError): print("Network error. Stopping.") break for quote in raw_quotes: q = parse_quote(quote) status = add_quote(q) print("Done, go to the next page.") page += 1 if page == end: print("Finished!") exit(0) with humanfriendly.Spinner( label="Wait before moving to the next page...", timer=humanfriendly.Timer()) as s: for i in range(25): time.sleep(0.2) s.step() return
def run(self) -> None: """Run the simulation and process all events.""" from simfantasy.event import ActorReadyEvent, CombatStartEvent, CombatEndEvent, \ ServerTickEvent auras_df = pd.DataFrame() damage_df = pd.DataFrame() resources_df = pd.DataFrame() try: # Create a friendly progress indicator for the user. with humanfriendly.Spinner(label='Simulating', total=self.iterations) as spinner: # Store iteration runtimes so we can predict overall runtime. iteration_runtimes: List[timedelta] = [] for iteration in range(self.iterations): pd_runtimes = pd.Series(iteration_runtimes) iteration_start = datetime.now() self.current_iteration = iteration # Schedule the bookend events. self.schedule(CombatStartEvent(sim=self)) self.schedule(CombatEndEvent(sim=self), self.combat_length) # Schedule the server ticks. for delta in range(3, int(self.combat_length.total_seconds()), 3): self.schedule(ServerTickEvent(sim=self), delta=timedelta(seconds=delta)) # TODO Maybe move this to Actor#arise? # Tell the actors to get ready. for actor in self.actors: self.schedule(ActorReadyEvent(sim=self, actor=actor)) # Start the event loop. while not self.events.empty(): _, _, event = self.events.get() # Ignore events that are flagged as unscheduled. if event.unscheduled is True: self.events.task_done() continue # Some event desync clearly happened. if event.timestamp < self.current_time: LOGGER.critical( '[%s] %s %s timestamp %s before current timestamp', self.current_iteration, self.relative_timestamp, event, (event.timestamp - self.start_time).total_seconds() ) # Update the simulation's current time to the latest event. self.current_time = event.timestamp if self.log_pops is True: if self.log_event_filter is None or self.log_event_filter.match( event.__class__.__name__) is not None: LOGGER.debug( '[%s] <= %s %s', self.current_iteration, format( abs(event.timestamp - self.start_time).total_seconds(), '.3f' ), event ) # Handle the event. event.execute() if not self.events.all_tasks_done: # type: ignore self.events.task_done() # Build statistical dataframes for the completed iteration. for actor in self.actors: auras_df = auras_df.append(pd.DataFrame(actor.statistics['auras'])) damage_df = damage_df.append(pd.DataFrame(actor.statistics['damage'])) resources_df = resources_df.append( pd.DataFrame(actor.statistics['resources'])) # Add the iteration runtime to the collection. iteration_runtimes.append(datetime.now() - iteration_start) auras_df = auras_df.astype(dtype={ 'aura': 'category', 'target': 'category', }) damage_df = damage_df.astype(dtype={ 'action': 'category', 'source': 'category', 'target': 'category', }) # Update our fancy progress indicator with the runtime estimation. spinner.label = 'Simulating ({0})'.format( (pd_runtimes.mean() * (self.iterations - self.current_iteration))) spinner.step(iteration) LOGGER.info('Finished %s iterations in %s (mean %s).\n', self.iterations, pd_runtimes.sum(), pd_runtimes.mean()) except KeyboardInterrupt: # Handle SIGINT. LOGGER.critical('Interrupted at %s / %s iterations after %s.\n', self.current_iteration, self.iterations, pd_runtimes.sum()) # TODO Everything. auras_df.set_index('iteration', inplace=True) damage_df.set_index('iteration', inplace=True) resources_df.set_index('iteration', inplace=True) TerminalReporter(self, auras=auras_df, damage=damage_df, resources=resources_df).report() # HTMLReporter(self, df).report() LOGGER.info('Quitting!')