Пример #1
0
def init(args):
    if os.path.exists(args.path):
        rprint(f'[bold red]Path already exists![/bold red]')
        return

    rprint(f'Creating notes directory in: [bold blue]{args.path}[/bold blue]')
    os.mkdir(args.path)
Пример #2
0
def runSchedulerInspect():
    """
    Starts Scheduler task to check unverified tokens
    """
    cease_operation = threading.Event()

    class tokenCheckThread(threading.Thread):
        """
        Thread to run the checks
        """
        @classmethod
        def run(cls):
            """
            runs thread if cease_operation.is_set() returns false
            """
            rprint(
                "[black][Schedule][/black][bold green]Starting operation[/bold green]"
            )
            while not cease_operation.is_set():
                schedule.run_pending()
                sleep(1)

    thread = tokenCheckThread()
    thread.daemon = True
    rprint("[black][Schedule][/black][red]Starting Schedule Thread...[/red]")
    thread.start()
    return cease_operation
Пример #3
0
    def do_disasm(self, opt):
        num = 10
        front_num = 3
        if opt.count:
            num = opt.count
        if opt.frontcount:
            front_num = opt.frontcount

        pc = self.executor.vm_context.code.pc + 1
        current_insn_index = 0

        for i, k in enumerate(self.executor.disasm_dict):
            if k >= pc:
                current_insn_index = i
                break

        begin_index = max(current_insn_index - front_num, 0)
        show_disasm_list = list(
            self.executor.disasm_dict.values())[begin_index:]
        index = 0

        for insn in show_disasm_list:
            if index >= num + 1:
                break
            rprint(f'[{insn.pc}] {insn.byte} {insn.mnemonic} {insn.imm_op}')
            index += 1
Пример #4
0
def report_on_missings(missings, path):
    """
    Summary report on potentially unaknowledged contributors
    Args:
        missings: list; Names of users

    Returns:
    """
    no = len(missings)
    if no == 0:
        console.print(
            "\n:clap: Wonderful! I did not find commits of yet unacknowledged"
            "contributors! :dizzy: \n\n",
            style=welcome)
    else:
        console.print(
            "\n:point_down: [underline]Number of potentially unacknowledged "
            "contributors to this repository:",
            style=attention)
        rprint(rainbow("{}\n".format(no)))
        table = Table(show_header=True, header_style="bold magenta")
        table.add_column("Contributor", style='dim', width=12)
        table.add_column("Their contribution")
        for i, name in enumerate(missings):
            author = '--author={}'.format(name)

            cmd = ["git", "shortlog", author]
            if path != Path.cwd():
                # ... we need to modify the git call
                cmd = ["git", "-C", str(path), "shortlog", author]
            commits = run(cmd, stdout=PIPE).stdout.decode().splitlines()[1:]
            #import pdb; pdb.set_trace()
            table.add_row(name + '\n',
                          '\n'.join(commits).replace('  ', '') + '\n')
        console.print(table)
Пример #5
0
def cache(namespace):
    """
    Outputs the stevedore entrypoints cache from which plugins are loaded.
    """
    from stevedore._cache import _c

    data = _c._get_data_for_path(None)

    tree = Tree("Entrypoints")
    for group, entry_points in data.get("groups").items():
        if namespace and group != namespace:
            continue
        node = tree.add(f"[bold]{group}")

        t = Table()
        t.add_column("Name")
        t.add_column("Value")

        for key, value, _ in entry_points:
            t.add_row(key, value)

        node.add(t)

        if namespace:
            rprint(t)
            return

    rprint(tree)
Пример #6
0
def search(query: str, cache: bool = True, kind="table"):
    """search for videos with query on youtube."""
    try:
        typer.secho(f"searching for videos: {query}...", fg=typer.colors.GREEN)
        if kind == "json":
            content = main(query, cache, kind="json")
            rprint(content)
        elif kind == "table":
            content = main(query, False, kind="list")
            table = Table(title="Search Results")
            table.add_column("Title",
                             justify="right",
                             style="cyan",
                             no_wrap=False)
            table.add_column("Video URL", style="magenta", no_wrap=True)
            table.add_column("Duration", style="green")
            table.add_column("View count", style="green")
            table.add_column("Published Time", style="green")
            for c in content:
                table.add_row(
                    " ".join(c["title"].split(" ")[:4]),
                    c["url"],
                    c["duration"],
                    str(c["view_count"]),
                    c["publish_time"],
                )
            console = Console()
            console.print(table)
    except YtsException:
        typer.echo("internal API raised an exception")
        raise typer.Abort()
    except YtsException:
        typer.echo("failed to get response from youtube")
        raise typer.Abort()
Пример #7
0
def start_backup(group):
    vcenter_id = config['vcenter']['id']
    base_url = config['api']['base_url']
    backup_api_path = '/backups/vmware/vms'
    url = (base_url + backup_api_path)
    headers = headers_func()
    df = pd.read_csv('vms.csv')
    print(df)
    vm_list = ast.literal_eval(config.get("backup_groups", group))
    rprint(
        Panel('[green]START: Attempting to Start on-demand backup for group ' +
              group + ' via Clumio API',
              title="EXECUTION STATUS"))
    log(type(vm_list))
    log(vm_list)
    for vm in vm_list:
        print(Fore.CYAN + Style.BRIGHT +
              'Starting backup on-demand backup for: ' + vm + Style.RESET_ALL)
        for index, row in df.iterrows():
            if (row['vm_name'] == vm):
                vmName = (row['vm_name'])
                log('VM Name: ' + vmName)
                ID = (row['id'])
                log('VM ID: ' + ID)
                data = {'vcenter_id': vcenter_id, 'vm_id': ID}
                payload = json.dumps(data)
                response = requests.request('POST',
                                            url,
                                            headers=headers,
                                            data=payload)
                print(response.text)
Пример #8
0
def find(where, exclude, include, output):
    """
    Find plugins by scanning the given path for PluginSpecs.
    It starts from the current directory if --where is not specified.
    This is what a setup.py method would run as a build step, i.e., discovering entry points.
    """
    with console.status(f"Scanning path {where}"):
        plugins = find_plugins(where, exclude, include)

    if output == "tree":
        tree = Tree("Entrypoints")
        for namespace, entry_points in plugins.items():
            node = tree.add(f"[bold]{namespace}")

            t = Table()
            t.add_column("Name")
            t.add_column("Location")

            for ep in entry_points:
                key, value = ep.split("=")
                t.add_row(key, value)

            node.add(t)

        rprint(tree)
    elif output == "dict":
        rprint(dict(plugins))
    else:
        raise click.ClickException("unknown output format %s" % output)
Пример #9
0
def letterboxdRun():
    if args.testing:
        args.reviews = True
        args.html = True
        user = '******'
    else:
        user = args.user

    if args.reviews:
        fname = f'{user}_reviews.json'
        console.print(
            '[cyan]--Making requests to letterboxd.com--\n[red]This may take some time depending on how many reviews there are.\n'
        )

        reviewsText = getReviews(user, args=args)

        outputDict = {'user': user, 'reviews': reviewsText}

        if args.html:
            writeReviews(outputDict, args=args)
        if args.json:
            rprint(f'json={fname}')
            jsonStr = json.dumps(outputDict, indent=3)
            with open(fname, 'w+') as f:
                f.write(jsonStr)
Пример #10
0
def writeReviews(reviewsDict={}, args=''):
    user = reviewsDict['user']
    if not args.search:
        fname = f'{user}_all_reviews.html'

    else:
        fname = f'{user}_searched_reviews.html'
    rprint(f'html={fname}')

    with open(fname, 'w+') as f:
        f.write('<!DOCTYPE html>\n'
                '<html>\n'
                '<head>\n'
                '</head>\n'
                '<body>\n')

        f.write(f'<h1>{user} - letterboxd.com reviews </h1>\n<br>\n')

        for i, (movie, review) in enumerate(reviewsDict['reviews'].items()):
            htmlMovieTitle = movie.replace('-', ' ').title()
            f.write(
                f'<b>{i + 1}: {htmlMovieTitle}</b>\n<br>\n{review}\n<br>\n')

        f.write('</body>\n</html>\n')

    if args.browserOpen:
        open_new_tab(fname)
Пример #11
0
def scores():
    """- swap team scores"""
    team1_score = read(team1scoreF)
    team2_score = read(team2scoreF)
    write(team1scoreF, team2_score)
    write(team2scoreF, team1_score)
    rprint(f"{main_colour}Swapped team scores.")
Пример #12
0
def teams():
    """- swap team names"""
    team1_name = read(team1F)
    team2_name = read(team2F)
    write(team1F, team2_name)
    write(team2F, team1_name)
    rprint(f"{main_colour}Swapped team names.")
Пример #13
0
def format_numeric_column(dataframe, column_name, TYPE):
    '''Format column in dataframe to specified TYPE and convert to string.

    Parameters
    ----------
    dataframe : Pandas DataFrame
        Pandas DataFrame with AGS4 data
    column_name : str
        Name of column to be formatted
    TYPE : str
        AGS4 TYPE for specified column

    Returns
    -------
    Pandas DataFrame
        Pandas DataFrame with formatted data.
    '''

    from rich import print as rprint

    df = dataframe.copy()
    col = column_name

    try:
        if 'DP' in TYPE:
            i = int(TYPE.strip('DP'))
            # Apply formatting DATA rows with real numbers. NaNs will be avoided so that they will be exported
            # as "" rather than "nan"
            mask = (df.HEADING == "DATA") & df[col].notna()
            df.loc[mask, col] = df.loc[mask, col].apply(lambda x: f"{x:.{i}f}")

        elif 'SCI' in TYPE:
            i = int(TYPE.strip('SCI'))
            # Apply formatting DATA rows with real numbers. NaNs will be avoided so that they will be exported
            # as "" rather than "nan"
            mask = (df.HEADING == "DATA") & df[col].notna()
            df.loc[mask, col] = df.loc[mask, col].apply(lambda x: f"{x:.{i}E}")

        elif 'SF' in TYPE:

            # Apply formatting DATA rows with real numbers. NaNs will be avoided so that they will be exported
            # as "" rather than "nan"
            mask = (df.HEADING == "DATA") & df[col].notna()
            df.loc[mask, [col]] = df.loc[mask, [col]].applymap(
                lambda x: _format_SF(x, TYPE))

        else:
            pass

    except ValueError:
        rprint(
            f"[yellow]  WARNING: Numeric data in [bold]{col:<9}[/bold] not reformatted as it had one or more non-numeric entries.[/yellow]"
        )

    except TypeError:
        rprint(
            f"[yellow]  WARNING: Numeric data in [bold]{col:<9}[/bold] not reformatted as it had one or more non-numeric entries.[/yellow]"
        )

    return df
Пример #14
0
def print_output(result, verbose, state, col):
    """
    this function use to formate the output according to terminal size
    col - > column
    """
    state_color = {"OPEN": "green", "CLOSED": "red", "MERGED": "yellow"}
    if verbose:
        click.secho(state, fg=state_color[state], bold=True)
        click.secho("Verbose mode", fg="red", bold=True)
        print("\n")
        for i in result["data"]["repository"]["pullRequests"]["nodes"]:
            number = json_extract(i, "number")
            title = truncate(json_extract(i, "title")[0], math.ceil(col / 5))
            # totalCount = json_extract(i, 'totalCount')
            create_date = json_extract(i, "createdAt")
            closed_date = json_extract(i, "closedAt")
            dates = prdate(create_date, closed_date)
            lname = truncate(", ".join(json_extract(i, "name")),
                             math.ceil(col / 5))
            rprint(
                f"[bold {state_color[state]}]#{number[0]}[/bold {state_color[state]}]",
                title.ljust(math.ceil(col / 4)),
                f"( {lname} )".ljust(math.ceil(col / 4)),
                dates,
            )
    else:
        click.secho(state, fg=state_color[state], bold=True)
        print("\n")
        for i in result["data"]["repository"]["pullRequests"]["nodes"]:
            number = json_extract(i, "number")
            title = truncate(json_extract(i, "title")[0], math.ceil(col / 3))
            rprint(
                f"[bold {state_color[state]}]#{number[0]}[/bold {state_color[state]}]",
                title.ljust(math.ceil(col / 4)),
            )
Пример #15
0
def download_sentinel_data(item, bands):
    # get paths w.r.t. id
    paths = file_paths_wrt_id(item._data["id"])
    # get meta info on path, to be used by boto3
    info_response = requests.get(item.assets["info"]["href"])
    info_response_json = json.loads(info_response.text)
    # save bands generically
    for band in bands:
        # pass band id in metadata
        info_response_json["band_id"] = band
        band_filename = paths["b%s" % band]
        if not data_file_exists(band_filename):
            save_to_file(
                item.assets["B0{}".format(band)]["href"],
                band_filename,
                item._data["id"],
                "✗ required data doesn't exist, downloading %s %s"
                % (band_tag_map["b" + str(band)], "band"),
                meta=info_response_json,
            )
        else:
            rprint(
                "[green] ✓ ",
                "required data exists for {} band".format(
                    band_tag_map["b" + str(band)]
                ),
            )
    return item._data["id"]
Пример #16
0
def replay_video(replay_path, speed, first_n_moves=100_000_000):
    replay = json.loads(open(replay_path, 'r').read())
    environment_name = replay["name"]
    moves = []
    for move_info in replay["replay"]:
        moves.append(move_info["move"])

    env_module = get_env_module(environment_name)
    environment = env_module.Environment()
    current_state = environment.initial_state()
    os.system("clear")
    for i, move in enumerate(moves):
        if i >= first_n_moves:
            break

        # Show state
        to_display = environment.text_display(current_state)
        os.system("clear")
        rprint(to_display)
        time.sleep(speed)

        if move is None:
            break

        current_state = environment.transition_state(current_state, move)
Пример #17
0
    def info(self) -> None:
        """
        Pretty printing of signature information.
        """
        if not self._loaded:
            raise SignatureException("Signature not loaded yet")

        heading = "underline bold"
        severity_style = "green bold"
        if self.severity.lower() == "medium":
            severity_style = "yellow bold"
        elif self.severity.lower() == "high":
            severity_style = "red bold"
        severity = "[{0}]{1}[/{0}]".format(severity_style, self.severity)

        rprint(
            Panel("[{0}]URL[/{0}]\n{1}\n\n".format(heading, self.url) +
                  "[{0}]Detection Name[/{0}]\n{1}\n\n".format(
                      heading, self.name) +
                  "[{0}]Description[/{0}]\n{1}\n\n".format(
                      heading, self.description) +
                  "[{0}]Severity[/{0}]\n{1}\n{2}\n\n".format(
                      heading, severity, self.severity_description) +
                  "[{0}]Affected System(s)[/{0}]\n{1}\n\n".format(
                      heading, self.affected_systems) +
                  "[{0}]Reference(s)[/{0}]\n{1}\n".format(
                      heading, self.references)))
Пример #18
0
 def _get_packages(self):
     rprint("[yellow]get packages...")
     excluded_dirs = [
         ".git",
         ".hg",
         "dist",
         "front",
         "node_modules",
         "shiny",
         "temp",
         "templates",
         "venv-*",
     ]
     example_folder = _wildcard_folder("example")
     if example_folder:
         excluded_dirs.append(example_folder)
     walk = filtered_walk(".",
                          included_files=["__init__.py"],
                          excluded_dirs=excluded_dirs)
     result = []
     for path, subdirs, files in walk:
         if len(files):
             path = path.replace(os.sep, ".").strip(".")
             if path:
                 result.append("{0}".format(path))
     app_names = ["app"]
     if example_folder:
         app_names.append(example_folder)
     for name in app_names:
         if name in result:
             result.remove(name)
             result.insert(0, name)
     return result
Пример #19
0
    def htcondor_stdout(self, file: str) -> str:
        """
        Read HTCondor stdout files.

        :param: HTCondor stdout file
        :return: content
        """
        output_string = ""
        try:

            if os.path.getsize(file) == 0:
                return ""

            with open(file, "r") as output_content:
                output_string += "".join(output_content.readlines())
        except NameError as err:
            logging.exception(err)
            rprint(f"[red]The smart_output_output method requires a "
                   f"{self.ext_out} file as parameter[/red]")
        except FileNotFoundError:
            relevant = file.split(os.path.sep)[-2:]
            match = re.match(r".*?([0-9]{3,}_[0-9]+)" + self.ext_out,
                             relevant[1])
            rprint(
                f"[yellow]There is no related {self.ext_out} "
                f"file: {relevant[1]} in the directory:\n"
                f"[/yellow][cyan]'{os.path.abspath(relevant[0])}'\n"
                f"with the prefix: {match[1]}[/cyan]"
            )
        except TypeError as err:
            logging.exception(err)
        finally:
            return output_string
Пример #20
0
def inspect_trap_walls():
    blocked_passages = initial_blocked_passages()
    vertical_wall_states = [[0] * 8 for _ in range(8)]
    horizontal_wall_states = [[0] * 8 for _ in range(8)]
    state = State(
        whose_move=0,
        p1_x=4,
        p1_y=0,
        p2_x=4,
        p2_y=8,
        p1_wall_count=10,
        p2_wall_count=10,
        blocked_passages=blocked_passages,
        vertical_wall_states=vertical_wall_states,
        horizontal_wall_states=horizontal_wall_states,
    )

    # Place wall at (3/4, 0) to see if it finds the trap at (3/4, 1)
    update_blocked_passages(state.blocked_passages, 3, 0, True)
    update_wall_states(state.vertical_wall_states, state.horizontal_wall_states, 3, 0, True)

    update_blocked_passages(state.blocked_passages, 4, 0, True)
    update_wall_states(state.vertical_wall_states, state.horizontal_wall_states, 4, 0, True)

    blocked_before = set(state.blocked_passages)
    tw = find_trap_walls(state)

    assert blocked_passages == blocked_before, "finding trap walls modified it"
    print("traps:", tw)

    from rich import print as rprint # noqa
    env = Environment()
    rprint(env.text_display(state))
Пример #21
0
def shutdown():
    clear()
    print('P l e a s e  w a i t . . .\n\n\n')
    sleep(3)
    rprint(
        '[bold yellow]It is now safe to close your Command Line Interface.[/bold yellow]'
    )
    sleep(2)
Пример #22
0
 def tohex(self, cmds, byte):
     addr = None
     if cmds.addr:
         addr = cmds.addr
     if addr:
         rprint(hexdump(byte, start=int(addr)))
         return
     rprint(hexdump(byte))
Пример #23
0
 def tohex(cmds, byte):
     addr = None
     if len(cmds) == 3:
         addr = cmds[-1]
     if addr:
         rprint(self.hexdump(byte, start=int(addr)))
         return
     rprint(self.hexdump(byte))
Пример #24
0
 def _get_description(self):
     rprint("[yellow]get description...")
     check_setup_yaml_exists()
     with open(FILENAME_SETUP_YAML) as f:
         data = yaml.safe_load(f)
     if not "description" in data:
         abort("Package 'description' not found in 'setup.yaml'")
     return data["description"]
Пример #25
0
def json_dump_droplets(droplets):
    data = []
    file_name = "droplets.json"
    for droplet in droplets:
        data.append(attr.asdict(droplet))
    with open(file_name, "w") as f:
        json.dump(data, f, indent=4)
    rprint("[yellow]2. 'json_dump_droplets' to '{}'...".format(file_name))
Пример #26
0
def zeige_hilfe_an():
    hilfe_schliessen = False
    text = hilfe_text()
    while not hilfe_schliessen:
        system(loeschte_terminal_inhalt)
        rprint(Panel(text))
        hilfe_schliessen = input(
            "Um die Hilfe zu schliessen drücke bitte 'q': ").lower() == 'q'
Пример #27
0
 def release(self):
     if not self.prefix:
         msg = "Cannot release a project without a 'prefix'"
         rprint("[red]{}".format(msg))
         raise KbError(msg)
     self._check_is_project_or_app()
     url, user, email = self._get_scm_config()
     if not self.TESTING:
         self._check_scm_status()
     description = self._get_description()
     packages = self._get_packages()
     package_data = self._get_package_data(packages)
     is_project = self._has_project_package(packages)
     name = self._get_name()
     self._check_requirements(is_project)
     version = self._get_version()
     self._write_manifest_in(is_project, packages)
     self._write_setup(
         name,
         packages,
         package_data,
         version,
         url,
         user,
         email,
         description,
         self.prefix,
     )
     if not self.TESTING:
         self._commit_and_tag(version)
         # command = "python setup.py clean sdist upload -r {}".format(
         #    self.pypi
         # )
         rprint("[blue]clean sdist upload...")
         command = [
             "python",
             "setup.py",
             "clean",
             "sdist",
             "upload",
             "-r",
             self.pypi,
         ]
         try:
             result = subprocess.run(command,
                                     capture_output=True,
                                     text=True)
             if result.returncode == 0:
                 rprint("[green]'setup.py', 'upload' to {} - success...".
                        format(self.pypi))
             else:
                 rprint("[red]{}".format(command))
                 for x in result.stderr.split("\n"):
                     rprint("[red]{}".format(x))
                 raise KbError("Failed to run the 'sdist upload' process")
         except Exception as e:
             raise KbError("Failed to run the 'sdist upload' process", e)
Пример #28
0
def pages(param):
    try:
        r = session.get(param).json()
    except json.decoder.JSONDecodeError:
        rprint('[red]ERROR: no result was found.[/red]')
        sys.exit(1)
    perpage = int(r['perpage'])
    results = int(r['total_results'])
    return math.ceil(results / perpage)
Пример #29
0
def edit(ctx: ClickContext, as_json: bool, color: str, label: str, tag_id: int):
    ctx.obj.logger.info("Updating new Tag..")
    try:
        ans = ctx.obj.edit_tag(tag_id, label, color)
        if as_json:
            rprint(json.dumps(ans, indent=4))
        else:
            _print_tags_table(ctx.obj.logger, [ans])
    except IntelOwlClientException as e:
        ctx.obj.logger.fatal(str(e))
Пример #30
0
def new(ctx: ClickContext, as_json: bool, color: str, label: str):
    ctx.obj.logger.info("Adding new Tag..")
    try:
        ans = ctx.obj.create_tag(label, color)
        if as_json:
            rprint(json.dumps(ans, indent=4))
        else:
            _print_tags_table(ctx.obj.logger, [ans])
    except IntelOwlClientException as e:
        ctx.obj.logger.fatal(str(e))