class ProgressUpdater(): """Callback class for updating the progress on the console. Internally, this maintains a map from the item type to a TaskID. When the callback is invoked, it tries to find a match for the scraped item, and advance the corresponding task progress. """ def __init__(self, infinite=False): if infinite: self.progress = Progress( "[progress.description]{task.description}", TimeElapsedColumn(), TextColumn("{task.completed} items scraped"), SpinnerColumn()) else: self.progress = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.0f}%", TimeRemainingColumn(), "/", TimeElapsedColumn()) self.item_type_to_task = {} def add_task_for(self, item_type, *args, **kwargs) -> TaskID: task = self.progress.add_task(*args, **kwargs) self.item_type_to_task[item_type] = task return task def __call__(self, item, spider): item_type = type(item) task = self.item_type_to_task.get(item_type, None) if task is not None: self.progress.advance(task)
def make_progress() -> Progress: _time = 0.0 def fake_time(): nonlocal _time try: return _time finally: _time += 1 console = Console( file=io.StringIO(), force_terminal=True, color_system="truecolor", width=80, legacy_windows=False, ) progress = Progress(console=console, get_time=fake_time, auto_refresh=False) task1 = progress.add_task("foo") task2 = progress.add_task("bar", total=30) progress.advance(task2, 16) task3 = progress.add_task("baz", visible=False) task4 = progress.add_task("egg") progress.remove_task(task4) task4 = progress.add_task("foo2", completed=50, start=False) progress.stop_task(task4) progress.start_task(task4) progress.update( task4, total=200, advance=50, completed=200, visible=True, refresh=True ) progress.stop_task(task4) return progress
def format_markdown(path): import re from rich.progress import Progress from .. import user_root rt_path = dir_char.join(os.path.abspath(path).split(dir_char)[:-1]) + dir_char img_dict = {} with open(path, 'r') as fp: ct = fp.read() aims = re.findall('!\[.*?]\((.*?)\)', ct, re.M) + re.findall('<img.*?src="(.*?)".*?>', ct, re.M) progress = Progress(console=qs_default_console) pid = progress.add_task(' Upload' if user_lang != 'zh' else ' 上传', total=len(aims)) progress.start() progress.start_task(pid) for aim in aims: if aim.startswith('http'): # Uploaded qs_default_console.print(qs_warning_string, aim, 'is not a local file' if user_lang != 'zh' else '非本地文件') progress.advance(pid, 1) continue raw_path = aim aim = aim.replace('~', user_root) aim = aim if aim.startswith(dir_char) else get_path(rt_path, aim) if aim not in img_dict: qs_default_console.print(qs_info_string, 'Start uploading:' if user_lang != 'zh' else '正在上传:', aim) res_dict = post_img(aim) if not res_dict: res_table.add_row(aim.split(dir_char)[-1], 'No File' if user_lang != 'zh' else '无文件', '') img_dict[aim] = False else: try: res_table.add_row( aim.split(dir_char)[-1], str(res_dict['code']), res_dict['msg'] if res_dict['code'] != 200 else ( res_dict['data']['url'] if res_dict['data']['url'] else plt_type + ' failed') ) img_dict[aim] = res_dict['data']['url'] if res_dict['code'] == 200 else False except Exception: qs_default_console.print(qs_error_string, res_dict) res_table.add_row(aim.split(dir_char)[-1], str(res_dict['code']), res_dict['msg']) img_dict[aim] = False if img_dict[aim]: qs_default_console.print(qs_info_string, 'replacing img:' if user_lang != 'zh' else '替换路径', f'"{raw_path}" with "{img_dict[aim]}"') ct = ct.replace(raw_path, img_dict[aim]) progress.advance(pid, 1) progress.stop() with open(path, 'w') as fp: fp.write(ct) qs_default_console.print(res_table, justify="center")
def make_progress() -> Progress: console = Console(file=io.StringIO(), force_terminal=True) progress = Progress(console=console) task1 = progress.add_task("foo") task2 = progress.add_task("bar", 30) progress.advance(task2, 16) task3 = progress.add_task("baz", visible=False) task4 = progress.add_task("egg") progress.remove_task(task4) task4 = progress.add_task("foo2", completed=50, start=False) progress.start_task(task4) progress.update( task4, total=200, advance=50, completed=200, visible=True, refresh=True ) return progress
def test_columns() -> None: console = Console( file=io.StringIO(), force_terminal=True, width=80, log_time_format="[TIME]", color_system="truecolor", legacy_windows=False, log_path=False, _environ={}, ) progress = Progress( "test", TextColumn("{task.description}"), BarColumn(bar_width=None), TimeRemainingColumn(), TimeElapsedColumn(), FileSizeColumn(), TotalFileSizeColumn(), DownloadColumn(), TransferSpeedColumn(), MofNCompleteColumn(), MofNCompleteColumn(separator=" of "), transient=True, console=console, auto_refresh=False, get_time=MockClock(), ) task1 = progress.add_task("foo", total=10) task2 = progress.add_task("bar", total=7) with progress: for n in range(4): progress.advance(task1, 3) progress.advance(task2, 4) print("foo") console.log("hello") console.print("world") progress.refresh() from .render import replace_link_ids result = replace_link_ids(console.file.getvalue()) print(repr(result)) expected = "\x1b[?25ltest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kfoo\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2K\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0mhello \ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Kworld\ntest foo \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:07\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \x1b[32m 0/10\x1b[0m \x1b[32m 0 of 10\x1b[0m\ntest bar \x1b[38;5;237m━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[33m0:00:18\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[32m0/7 \x1b[0m \x1b[32m0 of 7 \x1b[0m\r\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \r\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:34\x1b[0m \x1b[32m12 \x1b[0m \x1b[32m10 \x1b[0m \x1b[32m12/10 \x1b[0m \x1b[31m1 \x1b[0m \x1b[32m12/10\x1b[0m \x1b[32m12 of 10\x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbyte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[33m0:00:29\x1b[0m \x1b[32m16 \x1b[0m \x1b[32m7 bytes\x1b[0m \x1b[32m16/7 \x1b[0m \x1b[31m2 \x1b[0m \x1b[32m16/7 \x1b[0m \x1b[32m16 of 7 \x1b[0m\n \x1b[32mbytes \x1b[0m \x1b[32mbytes \x1b[0m \x1b[31mbytes/s\x1b[0m \n\x1b[?25h\r\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K\x1b[1A\x1b[2K" assert result == expected
class RichLogger(Logger): def __init__(self) -> None: self.console = autogoal.logging.console() self.logger = autogoal.logging.logger() def begin(self, generations, pop_size): self.progress = Progress(console=self.console) self.pop_counter = self.progress.add_task("Generation", total=pop_size) self.total_counter = self.progress.add_task("Overall", total=pop_size * generations) self.progress.start() self.console.rule("Search starting", style="blue") def sample_solution(self, solution): self.progress.advance(self.pop_counter) self.progress.advance(self.total_counter) self.console.rule("Evaluating pipeline") self.console.print(repr(solution)) def eval_solution(self, solution, fitness): self.console.print(Panel(f"📈 Fitness=[blue]{fitness:.3f}")) def error(self, e: Exception, solution): self.console.print(f"⚠️[red bold]Error:[/] {e}") def start_generation(self, generations, best_fn): self.console.rule( f"New generation - Remaining={generations} - Best={best_fn or 0:.3f}" ) def start_generation(self, generations, best_fn): self.progress.update(self.pop_counter, completed=0) def update_best(self, new_best, new_fn, previous_best, previous_fn): self.console.print( Panel( f"🔥 Best improved from [red bold]{previous_fn or 0:.3f}[/] to [green bold]{new_fn:.3f}[/]" )) def end(self, best, best_fn): self.console.rule(f"Search finished") self.console.print(repr(best)) self.console.print(Panel(f"🌟 Best=[green bold]{best_fn or 0:.3f}")) self.progress.stop() self.console.rule("Search finished", style="red")
class Run: def __init__(self, all_users: bool): self.all_users = all_users self.progress = Progress( '[progress.description]{task.description}', BarColumn(), '{task.completed}/{task.total}', ) @logger.catch def signin_loop(self): with self.progress: if self.all_users: users = AccountsManager.get_all_accounts() else: users = AccountsManager.get_active_accounts() if not users: sys.exit( 'No active users found. Please configure your users first.' ) task_id = self.progress.add_task('signin_check', total=len(users)) self.progress.start_task(task_id) user: Account for user in users: self.progress.update(task_id, description=f'{user.name}: sign-in...') ct8_user = CT8(user.name, user.password) result = ct8_user.sign_in_request() ct8_user.update_expiration_date() self.progress.console.print(result) self.progress.advance(task_id) self.progress.update(task_id, description=':100: Completed!') @logger.catch def main(self): self.signin_loop()
class ProgressDisplay(BaseDisplay): def start(self, *, total: Optional[float], at: float, description: str) -> None: from rich.progress import ( Progress, BarColumn, DownloadColumn, TimeRemainingColumn, TransferSpeedColumn, ) assert total is not None self.console.print(f'[progress.description]{description}') self.progress_bar = Progress( '[', BarColumn(), ']', '[progress.percentage]{task.percentage:>3.0f}%', '(', DownloadColumn(), ')', TimeRemainingColumn(), TransferSpeedColumn(), console=self.console, transient=True) self.progress_bar.start() self.transfer_task = self.progress_bar.add_task(description, completed=at, total=total) def update(self, steps: float) -> None: self.progress_bar.advance(self.transfer_task, steps) def stop(self, time_spent: Optional[float]) -> None: self.progress_bar.stop() if time_spent: [task] = self.progress_bar.tasks self._print_summary(is_finished=task.finished, observed_steps=task.completed, time_spent=time_spent)
def test_columns() -> None: time = 1.0 def get_time(): nonlocal time time += 1.0 return time console = Console( file=io.StringIO(), force_terminal=True, width=80, log_time_format="[TIME]", color_system="truecolor", ) progress = Progress( "test", TextColumn("{task.description}"), BarColumn(bar_width=None), TimeRemainingColumn(), FileSizeColumn(), TotalFileSizeColumn(), DownloadColumn(), TransferSpeedColumn(), console=console, auto_refresh=False, get_time=get_time, ) task1 = progress.add_task("foo", total=10) task2 = progress.add_task("bar", total=7) with progress: for n in range(4): progress.advance(task1, 3) progress.advance(task2, 4) progress.log("hello") progress.print("world") progress.refresh() result = console.file.getvalue() print(repr(result)) expected = 'test foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \r\x1b[2Ktest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \x1b[?25l\r\x1b[1A\x1b[2Ktest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \r\x1b[1A\x1b[2K\x1b[2;36m[TIME]\x1b[0m\x1b[2;36m \x1b[0mhello \x1b[2mtest_progress.py:190\x1b[0m\x1b[2m \x1b[0m\ntest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \r\x1b[1A\x1b[2Kworld\ntest foo \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m0/10 bytes\x1b[0m \x1b[31m?\x1b[0m \ntest bar \x1b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m-:--:--\x1b[0m \x1b[32m0 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m0/7 bytes \x1b[0m \x1b[31m?\x1b[0m \r\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[32m12 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m12/10 bytes\x1b[0m \x1b[31m1 byte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[32m16 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m16/7 bytes \x1b[0m \x1b[31m2 bytes/s\x1b[0m \r\x1b[1A\x1b[2Ktest foo \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[32m12 bytes\x1b[0m \x1b[32m10 bytes\x1b[0m \x1b[32m12/10 bytes\x1b[0m \x1b[31m1 byte/s \x1b[0m \ntest bar \x1b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━\x1b[0m \x1b[36m0:00:00\x1b[0m \x1b[32m16 bytes\x1b[0m \x1b[32m7 bytes \x1b[0m \x1b[32m16/7 bytes \x1b[0m \x1b[31m2 bytes/s\x1b[0m \n\x1b[?25h' assert result == expected
def test_reset() -> None: progress = Progress() task_id = progress.add_task("foo") progress.advance(task_id, 1) progress.advance(task_id, 1) progress.advance(task_id, 1) progress.advance(task_id, 7) task = progress.tasks[task_id] assert task.completed == 10 progress.reset( task_id, total=200, completed=20, visible=False, description="bar", example="egg", ) assert task.total == 200 assert task.completed == 20 assert task.visible == False assert task.description == "bar" assert task.fields == {"example": "egg"} assert not task._progress
def run() -> bool: share_progress = Progress( "{task.description}", SpinnerColumn(), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%")) accepted = share_progress.add_task("[green]Accepted", total=len(test.functions.keys())) rejected = share_progress.add_task("[red]Rejected", total=len(test.functions.keys())) total = share_progress.add_task("[blue]Total", total=len(test.functions.keys())) progress_table = Table.grid() progress_table.add_row( Panel.fit(share_progress, title="[b]Jobs", border_style="red", padding=(1, 2))) rejected_shares = [] with Live(progress_table, refresh_per_second=10): for key in test.functions.keys(): result = test.functions[key][0]( *(test.functions[key][2] or [])) expected = test.functions[key][1] if result == expected: share_progress.advance(accepted) else: rejected_shares.append(key) share_progress.advance(rejected) share_progress.advance(total)
def video_thread_func( device: torch.device, num_lock: int, multi_gpu: bool, input: Path, start_frame: int, end_frame: int, num_frames: int, progress: Progress, task_upscaled_id: TaskID, ai_upscaled_path: Path, fps: int, quality: float, ffmpeg_params: str, deinterpaint: DeinterpaintOptions, diff_mode: bool, ssim: bool, min_ssim: float, chunk_size: int, padding_size: int, scale: int, upscale: Upscale, config: configparser.ConfigParser, scenes_ini: Path, ): log = logging.getLogger() video_reader: FfmpegFormat.Reader = imageio.get_reader( str(input.absolute())) start_time = time.process_time() last_frame = None last_frame_ai = None current_frame = None frames_diff: List[Optional[FrameDiff]] = [] video_reader.set_image_index(start_frame - 1) start_frame_str = str(start_frame).zfill(len(str(num_frames))) end_frame_str = str(end_frame).zfill(len(str(num_frames))) task_scene_desc = f'Scene [green]"{start_frame_str}_{end_frame_str}"[/]' if multi_gpu and len(upscale.devices) > 1: if device.type == "cuda": device_name = torch.cuda.get_device_name(device.index) else: device_name = "CPU" task_scene_desc += f" ({device_name})" task_scene_id = progress.add_task( description=task_scene_desc, total=end_frame - start_frame + 1, completed=0, refresh=True, ) video_writer_params = {"quality": quality, "macro_block_size": None} if ffmpeg_params: if "-crf" in ffmpeg_params: del video_writer_params["quality"] video_writer_params["output_params"] = ffmpeg_params.split() video_writer: FfmpegFormat.Writer = imageio.get_writer( str( ai_upscaled_path.joinpath( f"{start_frame_str}_{end_frame_str}.mp4").absolute()), fps=fps, **video_writer_params, ) duplicated_frames = 0 total_duplicated_frames = 0 for current_frame_idx in range(start_frame, end_frame + 1): frame = video_reader.get_next_data() if deinterpaint is not None: for i in range( 0 if deinterpaint == DeinterpaintOptions.even else 1, frame.shape[0], 2): frame[i:i + 1] = (0, 255, 0) # (B, G, R) if not diff_mode: if last_frame is not None and are_same_imgs( last_frame, frame, ssim, min_ssim): frame_ai = last_frame_ai if duplicated_frames == 0: start_duplicated_frame = current_frame_idx - 1 duplicated_frames += 1 else: frame_ai = upscale.image(frame, device, multi_gpu_release_device=False) if duplicated_frames != 0: start_duplicated_frame_str = str( start_duplicated_frame).zfill(len(str(num_frames))) current_frame_idx_str = str(current_frame_idx - 1).zfill( len(str(num_frames))) log.info( f"Detected {duplicated_frames} duplicated frame{'' if duplicated_frames==1 else 's'} ({start_duplicated_frame_str}-{current_frame_idx_str})" ) total_duplicated_frames += duplicated_frames duplicated_frames = 0 video_writer.append_data(frame_ai) last_frame = frame last_frame_ai = frame_ai progress.advance(task_upscaled_id) progress.advance(task_scene_id) else: if current_frame is None: current_frame = frame else: frame_diff = get_diff_frame(current_frame, frame, chunk_size, padding_size, ssim, min_ssim) if ( frame_diff is None ): # the frame is equal to current_frame, the best scenario!!! frames_diff.append(frame_diff) else: h_diff, w_diff, c_diff = frame_diff.frame.shape h, w, c = current_frame.shape if w * h > w_diff * h_diff: # TODO difference of size > 20% frames_diff.append(frame_diff) else: current_frame_ai = upscale.image( current_frame, device, multi_gpu_release_device=False) video_writer.append_data(current_frame_ai) progress.advance(task_upscaled_id) progress.advance(task_scene_id) current_frame = frame for frame_diff in frames_diff: if frame_diff is None: frame_ai = current_frame_ai else: diff_ai = upscale.image( frame_diff.frame, device, multi_gpu_release_device=False, ) frame_diff_ai = frame_diff frame_diff_ai.frame = diff_ai frame_ai = get_frame( current_frame_ai, frame_diff_ai, scale, chunk_size, padding_size, ) video_writer.append_data(frame_ai) progress.advance(task_upscaled_id) progress.advance(task_scene_id) frames_diff = [] if diff_mode: if len(frames_diff) > 0: current_frame_ai = upscale.image(current_frame, device, multi_gpu_release_device=False) video_writer.append_data(current_frame_ai) progress.advance(task_upscaled_id) progress.advance(task_scene_id) for frame_diff in frames_diff: if frame_diff is None: frame_ai = current_frame else: diff_ai = upscale.image(frame_diff.frame, device, multi_gpu_release_device=False) frame_diff_ai = frame_diff frame_diff_ai.frame = diff_ai frame_ai = get_frame( current_frame_ai, frame_diff_ai, scale, chunk_size, padding_size, ) video_writer.append_data(frame_ai) progress.advance(task_upscaled_id) progress.advance(task_scene_id) current_frame = None frames_diff = [] elif current_frame is not None: current_frame_ai = upscale.image(current_frame, device, multi_gpu_release_device=False) video_writer.append_data(current_frame_ai) progress.advance(task_upscaled_id) progress.advance(task_scene_id) if duplicated_frames != 0: start_duplicated_frame_str = str(start_duplicated_frame).zfill( len(str(num_frames))) current_frame_idx_str = str(current_frame_idx - 1).zfill( len(str(num_frames))) log.info( f"Detected {duplicated_frames} duplicated frame{'' if duplicated_frames==1 else 's'} ({start_duplicated_frame_str}-{current_frame_idx_str})" ) total_duplicated_frames += duplicated_frames duplicated_frames = 0 video_writer.close() task_scene = next(task for task in progress.tasks if task.id == task_scene_id) config.set(f"{start_frame_str}_{end_frame_str}", "upscaled", "True") config.set( f"{start_frame_str}_{end_frame_str}", "duplicated_frames", f"{total_duplicated_frames}", ) finished_speed = task_scene.finished_speed or task_scene.speed or 0.01 config.set( f"{start_frame_str}_{end_frame_str}", "average_fps", f"{finished_speed:.2f}", ) with open(scenes_ini, "w") as configfile: config.write(configfile) log.info( f"Frames from {str(start_frame).zfill(len(str(num_frames)))} to {str(end_frame).zfill(len(str(num_frames)))} upscaled in {precisedelta(dt.timedelta(seconds=time.process_time() - start_time))}" ) if total_duplicated_frames > 0: total_frames = end_frame - (start_frame - 1) seconds_saved = (((1 / finished_speed * total_frames) - ( total_duplicated_frames * 0.04) # 0.04 seconds per duplicate frame ) / (total_frames - total_duplicated_frames) * total_duplicated_frames) log.info( f"Total number of duplicated frames from {str(start_frame).zfill(len(str(num_frames)))} to {str(end_frame).zfill(len(str(num_frames)))}: {total_duplicated_frames} (saved ≈ {precisedelta(dt.timedelta(seconds=seconds_saved))})" ) progress.remove_task(task_scene_id) if multi_gpu: upscale.devices[device][num_lock].release()
class TrafficProgress: def __init__( self, numRepos: int, follower: int = 0, following: int = 0, numStat: int = 5, ) -> None: self.numStat = numStat self.numRepos = numRepos self._profileText = Text( f"{follower:03d} Follower\n{following:03d} Following\n{numRepos:03d} Public Repositories" ) self.progressTable = Table.grid(expand=True) self.progressTotal = Progress( "{task.description}", SpinnerColumn(), BarColumn(), TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), ) self.progressTable.add_row( Panel( Align.center(Text( """Placeholder""", justify="center", )), title="[b]Info", border_style="red", padding=(1, 1), ), Panel( Align.center(self._profileText), title="[b]Profile Info", border_style="yellow", padding=(1, 1), ), Panel( self.progressTotal, # type:ignore title="[b]Total Progress", border_style="green", padding=(1, 2), ), ) self.taskTotal = self.progressTotal.add_task(description="Progress", total=numStat * numRepos) self.taskRepo = self.progressTotal.add_task( description="Repository [bold yellow]#", total=numRepos) self.taskStat = self.progressTotal.add_task( description="Stat [bold violet]#", total=numStat) def UpdateRepoDescription(self, repo: str): self.progressTotal.update( self.taskRepo, description=f"Repository [bold yellow]#{repo}") def UpdateStatDescription(self, stat: str): self.progressTotal.update(self.taskStat, description=f"Stat [bold violet]#{stat}") def StepTotal(self): self.progressTotal.advance(self.taskTotal) def StepRepo(self): self.progressTotal.advance(self.taskRepo) def StepStat(self): self.progressTotal.advance(self.taskStat) def ResetStatProgress(self): self.progressTotal.reset(self.taskStat) def CompleteStat(self): self.progressTotal.reset( self.taskStat, description="Stat [bold violet]#Completed", completed=self.numStat, )
class Fofa: def __init__(self, targets, fofa_result): super(Fofa, self).__init__() self.email = fofaApi['email'] self.key = fofaApi['key'] self.fofa_result = fofa_result self.targets = targets self.result_urls = [] # fofa 查询到的web服务列表 self.urls_list = [] # 去重 self.life_urls = [] # 验证存活的web服务列表 self.urls = [] # fofa查询的 url 列表, 供异步协程使用 self.count = 30 # fofa 一次性查多少个 self.session = conn_pool() # 使用连接池 self.headers = { "Cache-Control": "max-age=0", "User-Agent": random.choice(USER_AGENTS), "Upgrade-Insecure-Requests": "1", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", } self.process = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.1f}%", "•", "[bold green]{task.completed}/{task.total}", "•", TransferSpeedColumn(), "•", TimeRemainingColumn(), transient=True, # 100%后隐藏进度条 ) self.fofa_progress_bar = self.process.add_task("[cyan]FOFA search...", total=len(self.targets)) self.web_progress_bar = None def run(self): try: with self.process: self.target_formatting() # fofa 查询url 初始化 loop = asyncio.get_event_loop() loop.run_until_complete(self.fetch_all(loop)) # fofa 搜索 self.session.close() self.is_life() # 对fofa搜到的结果,取出其中的web服务,然后对web服务进行验证是否可以访问 except Exception as e: logger.log("ERROR", e) return self.life_urls # 为了防止查询过快被fofa封IP, 这里将目标分割,每30个为一组,组内使用 || 语法拼接,一次性查询多个 def target_formatting(self): for i in range(0, len(self.targets), self.count): keyword = '' targets = self.targets[i:i + self.count] for host in targets: host = host.replace('\n', '').replace('\r', '').strip() keyword += f'"{host}" || ' keyword = keyword[:-4] # 去除最后的 || keywordsBs = base64.b64encode(keyword.encode('utf-8')) keywordsBs = keywordsBs.decode('utf-8') url = "https://fofa.so/api/v1/search/all?email={0}&key={1}&qbase64={2}&full=true&fields=ip,title,port,domain,protocol,host,country,header&size={3}".format( self.email, self.key, keywordsBs, fofaSize) self.urls.append(url) # 回调函数, 刷新进度条 def callback(self, future, progress_bar, count): self.process.advance(progress_bar, advance=count) async def fetch_all(self, loop): # loop = asyncio.get_event_loop() # asyncio.set_event_loop(loop) tasks = [] # 写完才发现 aiohttp 不支持https代理, 改用 loop.run_in_executor()函数 执行阻塞的requests库 # async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False), headers=headers) as session: threads = ThreadPoolExecutor(10) for url in self.urls: # task = asyncio.ensure_future(self.fetch(session, url, sem)) task = loop.run_in_executor(threads, self.fetch, url) task.add_done_callback( partial(self.callback, progress_bar=self.fofa_progress_bar, count=self.count)) tasks.append(task) if platform.system() != "Windows": import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) await asyncio.wait(tasks) def fetch(self, url): try: self.session.headers = self.headers # self.session.proxies = { # "https": "http://127.0.0.1:8080" # } response = self.session.get(url, timeout=10) if response.status_code == 200: datas = json.loads(response.text) # 查询结果没有出错时 if not datas['error']: self.target_info(datas['results']) else: logger.log("ERROR", f'fofa 查询失败,{response.status_code }') except Exception as e: logger.log("ERROR", e) pass def target_info(self, datas): for data in datas: # ip,title,port,domain,protocol,host,country,header # ['127.0.0.1', 'Welcome to CentOS', '443', '', '', '127.0.0.1:443', 'CN', 'HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 4833\r\nAccept-Ranges: bytes\r\nContent-Type: text/html\r\nDate: Sun, 22 Nov 2020 10:40:22 GMT\r\nEtag: "53762af0-12e1"\r\nLast-Modified: Fri, 16 May 2014 15:12:48 GMT\r\nServer: nginx/1.16.1'] # 只要限定国家的信息, 默认为CN if data[6] == fofaCountry: # if data[4] == "http" or data[4] == "https" or "http" in data[5]: if 'HTTP/1.' in data[7]: if "http://" in data[5] or "https://" in data[5]: url = data[5] elif not data[4]: url = "http://{1}".format(data[4], data[5]) else: url = "{0}://{1}".format(data[4], data[5]) self.result_urls.append(url) async def crawler(self, url, semaphore): async with semaphore: try: async with aiohttp.ClientSession( connector=aiohttp.TCPConnector(ssl=False), headers=self.headers) as session: async with session.get(url, timeout=6) as resp: if url in self.urls_list or url in fofa_list: # 已存在 return fofa_list.append(url) text = await resp.text() m = re.search('<title>(.*?)</title>', text) title = m.group(1) if m else '' status = resp.status if status == 200 or status == 404 or status == 403: self.urls_list.append(url) self.life_urls.append((url, title)) self.fofa_result.put((url, title)) except Exception: pass # 筛选存活的web服务 def is_life(self): if len(self.result_urls) == 0: return self.fofa_progress_bar = self.process.add_task( "[cyan]FOFA Web results verify valid...", total=len(self.result_urls)) loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) sem = asyncio.Semaphore(2000) # 限制并发量 tasks = [] for url in self.result_urls: task = loop.create_task(self.crawler(url, sem)) task.add_done_callback( partial(self.callback, progress_bar=self.fofa_progress_bar, count=1)) tasks.append(task) loop.run_until_complete(asyncio.wait(tasks))
class TimeLoop: """ A special iterator that will iterate for a specified duration of time. Uses a progress meter to show the user how much time is left. Each iteration of the time-loop produces a tick. """ advent: Optional[pendulum.DateTime] moment: Optional[pendulum.DateTime] last_moment: Optional[pendulum.DateTime] counter: int progress: Optional[Progress] duration: pendulum.Duration message: str color: str def __init__( self, duration: Union[pendulum.Duration, int], message: str = "Processing", color: str = "green", ): """ Initialize the time-loop. Duration may be either a count of seconds or a ``pendulum.duration``. """ self.moment = None self.last_moment = None self.counter = 0 self.progress = None if isinstance(duration, int): JobbergateCliError.require_condition( duration > 0, "The duration must be a positive integer") self.duration = pendulum.duration(seconds=duration) else: self.duration = duration self.message = message self.color = color def __del__(self): """ Explicitly clear the progress meter if the time-loop is destroyed. """ self.clear() def __iter__(self) -> "TimeLoop": """ Start the iterator. Creates and starts the progress meter """ self.advent = self.last_moment = self.moment = pendulum.now() self.counter = 0 self.progress = Progress() self.progress.add_task( f"[{self.color}]{self.message}...", total=self.duration.total_seconds(), ) self.progress.start() return self def __next__(self) -> Tick: """ Iterates the time loop and returns a tick. If the duration is complete, clear the progress meter and stop iteration. """ # Keep mypy happy assert self.progress is not None self.counter += 1 self.last_moment = self.moment self.moment: pendulum.DateTime = pendulum.now() elapsed: pendulum.Duration = self.moment - self.last_moment total_elapsed: pendulum.Duration = self.moment - self.advent for task_id in self.progress.task_ids: self.progress.advance(task_id, elapsed.total_seconds()) if self.progress.finished: self.clear() raise StopIteration return Tick( counter=self.counter, elapsed=elapsed, total_elapsed=total_elapsed, ) def clear(self): """ Clear the time-loop. Stops the progress meter (if it is set) and reset moments, counter, progress meter. """ if self.progress is not None: self.progress.stop() self.counter = 0 self.progress = None self.moment = None self.last_moment = None
def main(self): q_targets = multiprocessing.Manager().Queue() # targets Queue q_targets_list = [] q_results = multiprocessing.Manager().Queue() # results Queue fofa_result = multiprocessing.Manager().Queue() # results Queue # 目标处理完成,扫描进程才可以开始退出 process_targets_done = multiprocessing.Value('i', 0) for input_file in self.input_files: # 读取目标 if self.host: target_list = self.host.replace(',', ' ').strip().split() elif self.file or self.dire: with open(input_file, encoding='UTF-8', errors='ignore') as inFile: target_list = list(set(inFile.readlines())) try: import threading # 实时生成报告 target_count = len(target_list) # 目标数 # 生成报告,管理标准输出 threading.Thread(target=save_report, args=(self, q_results, input_file, target_count)).start() clear_queue(q_results) clear_queue(q_targets) process_targets_done.value = 0 start_time = time.time() p = multiprocessing.Process(target=prepare_targets, args=(target_list, q_targets, self, fofa_result)) p.daemon = True p.start() p.join() # join 是用来阻塞当前线程的,p.start()之后,p 就提示主进程,需要等待p结束才向下执行 logger.log( 'INFOR', f'All preparations have been completed and it took %.1f seconds!' % (time.time() - start_time)) # 根据电脑 CPU 的内核数量, 创建相应的进程池 # count = multiprocessing.cpu_count() count = 30 # 少量目标,至多创建2倍扫描进程 if len(target_list) * 2 < count: count = len(target_list) * 2 if self.fofa and fofa_result.qsize() > 0: # fofa 搜索结果保存 save_fofa(self, fofa_result, input_file) while True: if not q_targets.empty(): q_targets_list.append(q_targets.get()) else: break # q_targets.get() {'scheme': 'https', 'host': '127.0.0.1', 'port': 443, 'path': '', 'ports_open': [80, 443], 'is_neighbor': 0} progress = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.1f}%", "•", "[bold green]{task.completed}/{task.total}", transient=True, # 100%后隐藏进度条 ) with progress: targets = [] for target in q_targets_list: tmp = [target, q_results, self] targets.append(tmp) progress_bar = progress.add_task("[cyan]Leak detection...", total=len(targets), start=False) with multiprocessing.Pool(processes=count) as pool: results = pool.imap_unordered(scan_process, targets) for result in results: # progress.print(result) progress.advance(progress_bar) pool.close() pool.join() cost_time = time.time() - start_time cost_min = int(cost_time / 60) cost_min = '%s min ' % cost_min if cost_min > 0 else '' cost_seconds = '%.1f' % (cost_time % 60) logger.log( 'INFOR', f'Scanned {len(q_targets_list)} targets in {cost_min}{cost_seconds} seconds.' ) except Exception as e: logger.log('FATAL', f'[__main__.exception] %s' % repr(e)) import traceback logger.log('FATAL', traceback.format_exc()) setting.stop_me = True
class PortScan(object): def __init__(self, targets, port_list, rate=2000, timeout=3): super(PortScan, self).__init__() self.targets = targets self.hosts = [] self.rate = rate # 限制并发量 self.timeout = timeout self.open_list = {} self.port_list = port_list # 待扫描的端口列表 self.process = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.1f}%", "•", "[bold green]{task.completed}/{task.total}", "•", TransferSpeedColumn(), "•", TimeRemainingColumn(), transient=True, # 100%后隐藏进度条 ) self.progress_bar = self.process.add_task("[cyan]port scan...", total=len(self.targets) * len(self.port_list)) async def async_port_check(self, semaphore, host_port): async with semaphore: host, port = host_port try: conn = asyncio.open_connection(host, port) reader, writer = await asyncio.wait_for(conn, timeout=self.timeout) conn.close() return host, port, 'open' except Exception: conn.close() return host, port, 'close' # 回调函数,更新进度条,存储开放的端口 def callback(self, future): host, port, status = future.result() self.process.advance(self.progress_bar, advance=1) if status == "open": # print(ip,port,status) try: if host in self.open_list: self.open_list[host].append(port) else: self.open_list[host] = [port] except Exception as e: print(e) else: pass def async_tcp_port_scan(self): # 不支持带协议的url,比如 https://127.0.0.1,格式化一下目标 for url in self.targets: host, scheme = get_host(url) self.hosts.append(host) host_port_list = [(host, int(port)) for host in self.hosts for port in self.port_list] print(host_port_list) sem = asyncio.Semaphore(self.rate) # 限制并发量 loop = asyncio.get_event_loop() # 打乱一下,随机排序 random.shuffle(host_port_list) tasks = list() with self.process: for host_port in host_port_list: task = asyncio.ensure_future( self.async_port_check(sem, host_port)) task.add_done_callback(self.callback) tasks.append(task) if platform.system() != "Windows": import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop.run_until_complete(asyncio.wait(tasks)) return self.open_list
) job1 = job_progress.add_task("[green]Cooking") job2 = job_progress.add_task("[magenta]Baking", total=200) job3 = job_progress.add_task("[cyan]Mixing", total=400) total = sum(task.total for task in job_progress.tasks) overall_progress = Progress() overall_task = overall_progress.add_task("All Jobs", total=int(total)) progress_table = Table.grid() progress_table.add_row( Panel.fit(overall_progress, title="Overall Progress", border_style="green", padding=(2, 2)), Panel.fit(job_progress, title="[b]Jobs", border_style="red", padding=(1, 2)), ) with Live(progress_table, refresh_per_second=10): while not overall_progress.finished: sleep(0.1) for job in job_progress.tasks: if not job.finished: job_progress.advance(job.id) completed = sum(task.completed for task in job_progress.tasks) overall_progress.update(overall_task, completed=completed)
class PortScan(object): def __init__(self, targets, rate=2000, timeout=3): super(PortScan, self).__init__() self.targets = targets self.hosts = [] self.rate = rate # 限制并发量 self.timeout = timeout self.result = [] self.process = Progress( "[progress.description]{task.description}", BarColumn(), "[progress.percentage]{task.percentage:>3.1f}%", "•", "[bold green]{task.completed}/{task.total}", "•", TransferSpeedColumn(), "•", TimeRemainingColumn(), transient=True, # 100%后隐藏进度条 ) self.progress_bar = self.process.add_task("[cyan]port scan...", total=len(self.targets)) async def async_port_check(self, semaphore, target): # target ('127.0.0.1', 8080, 'http', '/', 8080) async with semaphore: host, port = target[0], target[1] try: conn = asyncio.open_connection(host, port) reader, writer = await asyncio.wait_for(conn, timeout=self.timeout) conn.close() # '127.0.0.1 80' open 'unknown' '/test.html' 80 return host, port, 'open', target[2], target[3], target[4] except Exception: conn.close() return host, port, 'close', target[2], target[3], target[4] # 回调函数,更新进度条,存储开放的端口 def callback(self, future): # future.result() '127.0.0.1' 80 open 'unknown' '/test.html' 80 result = future.result() self.process.advance(self.progress_bar, advance=1) if result[2] == "open": self.result.append(result) else: pass def async_tcp_port_scan(self): try: sem = asyncio.Semaphore(self.rate) # 限制并发量 loop = asyncio.get_event_loop() # self.targets [('127.0.0.1', 8080, 'http', '/', 8080), ('www.baidu.cn', 80, 'unknown', '/', 80), ('www.baidu.cn', 443, 'unknown', '/', 443)] # 打乱一下,随机排序 random.shuffle(self.targets) tasks = list() with self.process: for target in self.targets: task = asyncio.ensure_future( self.async_port_check(sem, target)) task.add_done_callback(self.callback) tasks.append(task) if platform.system() != "Windows": import uvloop asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) loop.run_until_complete(asyncio.wait(tasks)) except Exception: pass return self.result
def hook_advance(progress: Progress, task: TaskID): return lambda _: progress.advance(task_id=task)
def __folder_thread_func( self, img: np.ndarray, zip_fs: zipfs.WriteZipFS, img_output_path_rel: Path, device: torch.device = None, task_upscaling: TaskID = None, progress: Progress = None, progress_text: str = "", output_zip_path: Path = None, ): # img = self.image(img, device, progress, progress_text) img = self.image(img, device) if device.type == "cuda": device_name = torch.cuda.get_device_name(device.index) else: device_name = "CPU" self.log.info( f'Upscaling "{img_output_path_rel.name}" using "{device_name}"') if self.imagemagick: img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA) img = WandImage.from_array(img) if self.jpg: # https://developers.google.com/speed/docs/insights/OptimizeImages # img.format = "jpg" img.sampling_factors = "4:2:0" img.interlace_scheme = "jpeg" img.colorspace = "srgb" else: img.format = "png" img.strip() if self.resize != 100: img.transform(resize=f"{self.resize}%") else: if self.resize != 100: width = int(img.shape[1] * self.resize / 100) height = int(img.shape[0] * self.resize / 100) img = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA) if self.zip: if self.imagemagick: buffer = io.BytesIO() img.save(file=buffer) buffer.seek(0) else: is_success, buffer = cv2.imencode( ".jpg" if self.jpg else ".png", img) buffer = io.BytesIO(buffer) img_output_path_rel = img_output_path_rel.relative_to( output_zip_path.parent) if not zip_fs.isdir(img_output_path_rel.parent.as_posix()): zip_fs.makedirs(img_output_path_rel.parent.as_posix()) zip_fs.writefile( img_output_path_rel.as_posix(), buffer, ) else: if self.imagemagick: img.save(filename=str(img_output_path_rel.absolute())) else: cv2.imwrite(str(img_output_path_rel.absolute()), img) progress.advance(task_upscaling)
from rich.panel import Panel from rich.progress import Progress JOBS = [100, 150, 25, 70, 110, 90] progress = Progress(auto_refresh=False) master_task = progress.add_task("overall", total=sum(JOBS)) jobs_task = progress.add_task("jobs") progress.console.print( Panel( "[bold blue]A demonstration of progress with a current task and overall progress.", padding=1, ) ) with progress: for job_no, job in enumerate(JOBS): progress.log(f"Starting job #{job_no}") sleep(0.2) progress.reset(jobs_task, total=job, description=f"job [bold yellow]#{job_no}") progress.start_task(jobs_task) for wait in progress.track(range(job), task_id=jobs_task): sleep(0.01) progress.advance(master_task, job) progress.log(f"Job #{job_no} is complete") progress.log( Panel(":sparkle: All done! :sparkle:", border_style="green", padding=1) )