示例#1
0
def download_subcommand(
    ctx: typer.Context,
    component: Component,
    name: str = typer.Option(None,
                             "--name",
                             "-n",
                             help="Name to match against title or package."),
    dest_path: Path = typer.Option(
        Path.cwd(),
        "--dest-path",
        "-d",
        show_default=True,
        help="Destination directory for downloaded files.",
    ),
    file_format: FileFormat = typer.Option(
        FileFormat.pdf,
        "--format",
        "-f",
        show_default=True,
        show_choices=True,
    ),
    overwrite: bool = typer.Option(
        False,
        "--over-write",
        "-W",
        is_flag=True,
        show_default=True,
        help="Over write downloaded files.",
    ),
):
    """Download textbooks from Springer

    This command downloads textbooks from Springer to the local host. Files
    are saved by default in PDF format to the current working directory.

    If a download is interrupted by the user, it can be later restarted where
    the interruption occurred without downloading previous files. 

    Problems encountered while downloading files are logged to:

    `dest-path/DOWNLOAD_REPORT.txt`

    __Examples__

    Download all books in the default catalog in PDF format to the
    current directory:

    `$ springer download books`

    Download all books in EPUB format whose title includes 'python':

    `$ springer download books --name python --file-format epub`

    Download all books into directories grouped by package:

    `$ springer download packages --dest-path by_pkgs

    Download all books in a specific package in EPUB format:

    `$ springer download package --name 'Computer Science' --file-format epub`

    Download all books in packages whose name includes `Science`:

    `$ springer download package --name science --dest sciences`

    Download all books in all catalogs [en-all, de-all, de-med] in EPUB format:

    `$ springer download catalogs --file-format epub`

    The `catalogs` download subcommand will create a set of directories by language
    and topic for each catalog and save downloaded files into the appropriate
    directory, eg:

    \b
    - dest-path/English/All_Disciplines/package_name/title.fmt
    - dest-path/German/All_Disciplines/package_name/title.fmt
    - dest-path/German/Emergency_Nursing/package_name/title.fmt

    The `package` and `packages` subcommands will also save downloaded
    files into directories with package names rooted in the destination
    path:

    \b
    dest-path/package_name/title.fmt
    ...



    See Also: `set-default-catalog`, `get-default-catalog`, `list`
    """

    dest_path = dest_path.resolve()

    dest_path.mkdir(mode=0o755, exist_ok=True, parents=True)

    _configure_logger(dest_path)

    try:
        if component in [Component.Books, Component.Catalog]:
            if not name:
                ctx.obj.download(dest_path, file_format, overwrite)
            else:
                ctx.obj.download_title(name, dest_path, file_format, overwrite)
            return

        if component in [Component.Package, Component.Packages]:

            if component is Component.Package:
                if not name:
                    typer.secho(f"Please supply a `name` for package",
                                fg="red")
                    raise typer.Exit(-1)
                package_names = [name]
            else:
                package_names = ctx.obj.packages.keys()

            for pkgname in package_names:
                path = dest_path / pkgname.replace(" ", "_")
                path.mkdir(mode=0o755, exist_ok=True, parents=True)
                ctx.obj.download_package(pkgname, path, file_format, overwrite)
            return

        if component is Component.Catalogs:

            for catalog in Catalog.all_catalogs():
                path = dest_path / catalog.language.name / catalog.topic.name
                path.mkdir(mode=0o755, exist_ok=True, parents=True)
                for pkgname in catalog.packages:
                    path = dest_path / pkgname.replace(" ", "_")
                    path.mkdir(mode=0o755, exist_ok=True, parents=True)
                    catalog.download_package(pkgname, path, file_format,
                                             overwrite)

    except KeyError as error:
        typer.secho(str(error), fg="red")
        raise typer.Exit(-1) from None

    except PermissionError as error:
        typer.secho("Permission error for: ", nl=False)
        typer.secho(str(error.filename), fg="red")
        raise typer.Exit(-1) from None
def infersent(
    path_to_senteval: str,
    path_to_vectors: str,
    output_filepath: str = None,
    cuda_device: int = -1,
    prototyping_config: bool = False,
    verbose: bool = False,
) -> None:
    """Evaluates an InferSent model against the SentEval benchmark
    (see: https://github.com/facebookresearch/InferSent for information on the pre-trained model).
    Adapted from: https://github.com/facebookresearch/SentEval/blob/master/examples/infersent.py.
    """
    from models import InferSent

    def prepare(params, samples):
        samples = _cleanup_batch(samples)
        params.infersent.build_vocab([" ".join(tokens) for tokens in samples], tokenize=False)

    def batcher(params, batch):
        batch = _cleanup_batch(batch)
        sentences = [" ".join(tokens) for tokens in batch]
        embeddings = params.infersent.encode(sentences, bsize=params.batch_size, tokenize=False)
        return embeddings

    # Determine the torch device
    device = _get_device(cuda_device)

    # Load InferSent model
    # TODO (John): Hardcoded these to move things along, but that should be fixed.
    V = 2
    MODEL_PATH = "resources/encoder/infersent%s.pkl" % V
    params_model = {
        "bsize": 64,
        "word_emb_dim": 300,
        "enc_lstm_dim": 2048,
        "pool_type": "max",
        "dpout_model": 0.0,
        "version": V,
    }
    infersent = InferSent(params_model)
    infersent.load_state_dict(torch.load(MODEL_PATH))
    infersent.to(device)
    # Load and initialize the model with word vectors
    infersent.set_w2v_path(path_to_vectors)

    trainable_params = sum(p.numel() for p in infersent.parameters() if p.requires_grad)
    typer.secho(
        (
            f"{SUCCESS} Loaded InferSent model {MODEL_PATH}"
            f" with {trainable_params} trainable parameters."
        ),
        fg=typer.colors.GREEN,
        bold=True,
    )

    # Performs a few setup steps and returns the SentEval params
    params_senteval = _setup_senteval(path_to_senteval, prototyping_config, verbose)
    params_senteval["infersent"] = infersent
    _run_senteval(params_senteval, path_to_senteval, batcher, prepare, output_filepath)

    return
def _print_aggregate_scores(aggregate_scores: Dict[str, Dict[str, float]]) -> None:
    """Prints out nicely formatted `aggregate_scores`."""
    for partition in ["dev", "test"]:
        typer.secho(f"{SCORE} Aggregate {partition} scores", fg=typer.colors.WHITE, bold=True)
        for task_set in ["downstream", "probing", "all"]:
            typer.secho(f"* {task_set.title()}: {aggregate_scores[task_set][partition]:.2f}%")
示例#4
0
def folder(path: Path = typer.Argument(default='.',
                                       exists=True,
                                       file_okay=True,
                                       dir_okay=True,
                                       readable=True,
                                       resolve_path=True),
           ignore_extension: str = typer.Option(default=None),
           ignore_path: Path = typer.Option(default=None,
                                            exists=True,
                                            file_okay=True,
                                            dir_okay=True,
                                            readable=True,
                                            resolve_path=True)):
    """
    Convert all videos and audios in a folder
    """

    videos = []
    audios = []

    for dir, folders, files in os.walk(path):
        base_dir = Path(dir)
        for item in files:

            file_path = base_dir / item
            guess = filetype.guess(str(file_path))

            if check_ignore(file_path, ignore_extension, ignore_path):
                continue

            if guess and 'video' in guess.mime:

                videos.append(file_path)

            if guess and 'audio' in guess.mime:

                audios.append(file_path)

    manager = enlighten.get_manager()
    errors_files = []
    pbar = manager.counter(total=len(videos) + len(audios),
                           desc='Files',
                           unit='files')

    for video in videos:
        typer.secho(f'Processing: {video}')
        if get_codec(str(video)) != 'hevc':
            new_path = convertion_path(video, False)

            if new_path.exists():
                os.remove(str(new_path))

            try:
                convert_video_progress_bar(str(video), str(new_path), manager)
                os.remove(str(video))
                if video.suffix == new_path.suffix:
                    shutil.move(new_path, str(video))

            except ffmpeg._run.Error:
                typer.secho(f'ffmpeg could not process: {str(video)}', fg=RED)
                errors_files.append(video)

        pbar.update()

    for audio in audios:
        typer.secho(f'Processing: {audio}')
        if get_codec(str(audio)) != 'hevc':

            new_path = convertion_path(audio, True)

            if new_path.exists():
                os.remove(str(new_path))

            try:

                convert_file(str(audio), str(new_path))

                os.remove(str(audio))
                if audio.suffix == new_path.suffix:
                    shutil.move(new_path, str(audio))

            except ffmpeg._run.Error:
                typer.secho(
                    f'ffmpeg could not process this file: {str(audio)}',
                    fg=RED)
                errors_files.append(audio)

        pbar.update()

    if errors_files:
        typer.secho('This videos could not be processed:', fg=RED)
        typer.secho(str(errors_files), fg=RED)
示例#5
0
def generate(username: str):
    primary_key = PGPKey.new(PubKeyAlgorithm.RSAEncryptOrSign, 4096)
    primary_key.add_uid(PGPUID.new(username),
                        usage={KeyFlags.EncryptCommunications},
                        uidhashes=[HashAlgorithm.SHA512],
                        ciphers=[SymmetricKeyAlgorithm.AES256],
                        compression=[CompressionAlgorithm.ZIP])

    secho("Primary key, to be saved in server", fg=colors.BLUE)
    secho(primary_key.fingerprint, fg=colors.RED)
    secho(str(primary_key), fg=colors.YELLOW)

    secho("Public key, to be saved in server", fg=colors.BLUE)
    secho(primary_key.fingerprint, fg=colors.RED)
    secho(str(primary_key.pubkey), fg=colors.GREEN)
示例#6
0
    def process_toml(self, filename: str) -> None:
        """
        Open and prepare toml files for querying.
        """

        from pathlib import Path

        import toml

        queries_path = Path.home() / ".queries"

        if not Path(queries_path).exists():
            typer.secho(
                "We couldn't find queries folder, make sure you are in the same directory.",
                fg=typer.colors.RED,
                bold=True,
            )

        if not filename.endswith(".toml"):
            if "." not in filename:
                filename = filename + ".toml"

        file_path = queries_path / filename
        with file_path.open("r") as file:
            query_file = toml.load(file)

        for key, value in query_file["query"].items():

            if key == "dimensions":
                if "all" in value:
                    self.update_body({
                        "dimensions":
                        ["date", "page", "query", "country", "device"]
                    })
                else:
                    self.body.update(
                        {"dimensions": [dimension for dimension in value]})

            if key == "filters":
                self.body.update({
                    "dimensionFilterGroups": [{
                        "filters": [
                            {
                                # Country
                                "dimension": filters.split()[0],
                                # Equals
                                "operator": filters.split()[1],
                                "expression":
                                " ".join(filters.split()[2:]),  # FRA
                            } for filters in value
                        ]
                    }]
                })

            if key == "start-date":
                self.body.update({"startDate": value})

            if key == "end-date":
                self.body.update({"endDate": value})

            if key == "start-date":
                self.body.update({"startDate": value})

            if key == "search-type":
                self.body.update({"searchType": value})

            if key == "row-limit":
                self.body.update({"rowLimit": value})

            if key == "start-row":
                self.body.update({"startRow": value})

            if key == "export-type":
                self.utils.update({"export-type": value})

            if key == "url":
                self.utils.update({"url": value})
示例#7
0
def print_success(msg: str):
    typer.secho(
        msg,
        fg=typer.colors.GREEN,
    )
示例#8
0
def authenticate(
    client_config: Union[str, Path] = None,
    credentials: Union[Any, Dict[str, Dict[str, str]]] = None,
    serialize: Union[str, Path] = None,
    flow: str = "web",
) -> SearchAnalytics:

    if not credentials:

        if isinstance(client_config, collections.abc.Mapping):

            auth_flow = InstalledAppFlow.from_client_config(
                client_config=client_config,
                scopes=["https://www.googleapis.com/auth/webmasters.readonly"],
            )

        elif isinstance(client_config, str):
            try:
                auth_flow = InstalledAppFlow.from_client_secrets_file(
                    client_secrets_file=client_config,
                    scopes=[
                        "https://www.googleapis.com/auth/webmasters.readonly"
                    ],
                )
            except FileNotFoundError:
                typer.secho(
                    "\nAuthentication failed ❌\nReason: client_secrets.json not found in your current directory.",
                    fg=typer.colors.RED,
                    bold=True,
                )
                sys.exit()

        else:
            raise BrokenFileError(
                "Client secrets must be a mapping or path to file")

        if flow == "web":
            auth_flow.run_local_server()
        elif flow == "console":
            auth_flow.run_console()
        else:
            raise ValueError(
                "Authentication flow '{}' not supported".format(flow))

        credentials = auth_flow.credentials

    else:

        if isinstance(credentials, str):
            try:
                with open(credentials, "r") as f:
                    credentials = json.load(f)
            except FileNotFoundError:
                typer.secho(
                    "\nAuthentication failed ❌\nReason: credentials.json not found in your current directory.",
                    fg=typer.colors.RED,
                    bold=True,
                )
                sys.exit()

        credentials = Credentials(
            token=credentials["token"],
            refresh_token=credentials["refresh_token"],
            id_token=credentials["id_token"],
            token_uri=credentials["token_uri"],
            client_id=credentials["client_id"],
            client_secret=credentials["client_secret"],
            scopes=credentials["scopes"],
        )

    service = discovery.build(
        serviceName="searchconsole",
        version="v1",
        credentials=credentials,
        cache_discovery=False,
    )

    if serialize:

        if isinstance(serialize, str):

            serialized = {
                "token": credentials.token,
                "refresh_token": credentials.refresh_token,
                "id_token": credentials.id_token,
                "token_uri": credentials.token_uri,
                "client_id": credentials.client_id,
                "client_secret": credentials.client_secret,
                "scopes": credentials.scopes,
            }

            with open(serialize, "w") as f:
                json.dump(serialized, f, indent=4)

        else:
            raise TypeError("`serialize` must be a path.")

    return SearchAnalytics(service, credentials)
示例#9
0
def ingest(
    refresh: bool = False,
    leagues: typing.List[str] = typer.Option(
        _DEFAULT_INGEST_LEAGUES,
        help='Leagues to import',
        callback=lambda xs: [understatdb.understat.League(x) for x in xs]),
    seasons: typing.List[int] = typer.Option(
        _DEFAULT_INGEST_SEASONS, help='Seasons to import (by start year)'),
):
    """ Ingest match and shot data from Understat.com """

    initialize_db()
    client = understatdb.understat.Understat()

    for league, season in itertools.product(
        [understatdb.understat.League(l) for l in leagues], seasons):
        # Add league & season to DB
        with understatdb.db.DB.atomic():
            db_league, _ = understatdb.db.League.get_or_create(
                name=league.value)
            db_season, _ = understatdb.db.Season.get_or_create(name=season)

        # Check if a record for this league and season already exists. If so, skip it.
        existing_record = understatdb.db.Matches.get_or_none(
            league_id=db_league.id, season_id=db_season.id)
        if not refresh and existing_record:
            typer.secho(
                f'Data for {league.value}, {season} already exists. Skipping. '
                'To update data for this league and season, use the `--refresh` flag',
                fg=typer.colors.BRIGHT_BLACK)
            continue

        # Add match and shot data to DB
        typer.secho(f'Ingesting data for {league.value}, {season}',
                    fg=typer.colors.BLUE)
        with understatdb.db.DB.atomic():

            # Fetch match data from understat
            matches = client.matches(league, season)

            # Delete any old match data
            if refresh:
                understatdb.db.Matches.delete().where(
                    (understatdb.db.Matches.league_id == db_league.id)
                    & (understatdb.db.Matches.season_id == db_season.id)
                ).execute()

            db_matches = understatdb.db.Matches.create(
                league_id=db_league.id,
                season_id=db_season.id,
                json=matches,
                version=understatdb.__version__)

            with typer.progressbar(matches, label="Shots") as progress:
                for match in progress:
                    if not match['isResult']:
                        continue

                    # Add an artificial crawl delay to avoid bombarding
                    # understat with requests
                    # There's no robots.txt or ToS available on the site,
                    # So we just use a relatively conservative delay of
                    # 5 seconds per (shots) request
                    time.sleep(5)

                    match_id = int(match['id'])
                    shots = client.shots(match_id)

                    # Delete any old shots data
                    if refresh:
                        understatdb.db.Shots.delete().where(
                            understatdb.db.Shots.match_id ==
                            match_id).execute()

                    db_shots = understatdb.db.Shots.create(
                        match_id=match_id,
                        json=shots,
                        version=understatdb.__version__)

    # Rebuild tables in dbt
    build_tables(args=[])
示例#10
0
def templates(
    # args: List[str] = typer.Argument(None, metavar=iden_meta.dev, hidden=False),
    name: str = typer.Argument(None, hidden=False, help=f"Template: [name] or Device: {iden_meta.dev}"),
    group: List[str] = typer.Argument(None, help="Get Templates for Group"),
    _group: str = typer.Option(None, "--group", help="Get Templates for Group"),
    # _name: str = typer.Option(None, "--template", help="Get details for template by name"),
    device_type: str = typer.Option(None, "--dev-type", metavar="[IAP|ArubaSwitch|MobilityController|CX]>",
                                    help="[Templates] Filter by Device Type"),
    version: str = typer.Option(None, metavar="<version>", help="[Templates] Filter by dev version Template is assigned to"),
    model: str = typer.Option(None, metavar="<model>", help="[Templates] Filter by model"),
    #  variablised: str = typer.Option(False, "--with-vars",
    #                                  help="[Templates] Show Template with variable place-holders and vars."),
    do_json: bool = typer.Option(False, "--json", is_flag=True, help="Output in JSON"),
    do_yaml: bool = typer.Option(False, "--yaml", is_flag=True, help="Output in YAML"),
    do_csv: bool = typer.Option(False, "--csv", is_flag=True, help="Output in CSV"),
    do_table: bool = typer.Option(False, "--table", help="Output in table format",),
    outfile: Path = typer.Option(None, "--out", help="Output to file (and terminal)", writable=True),
    # sort_by: SortOptions = typer.Option(None, "--sort"),show
    no_pager: bool = typer.Option(False, "--no-pager", help="Disable Paged Output"),
    update_cache: bool = typer.Option(False, "-U", hidden=True),  # Force Update of cli.cache for testing
    default: bool = typer.Option(False, "-d", is_flag=True, help="Use default central account",
                                 callback=cli.default_callback),
    debug: bool = typer.Option(False, "--debug", envvar="ARUBACLI_DEBUG", help="Enable Additional Debug Logging",
                               callback=cli.debug_callback),
    account: str = typer.Option("central_info",
                                envvar="ARUBACLI_ACCOUNT",
                                help="The Aruba Central Account to use (must be defined in the config)",
                                callback=cli.account_name_callback),
) -> None:
    if _group:
        group = _group
    elif group:
        group = group[-1]

    if group:
        group = cli.cache.get_group_identifier(group)
        group = group.name

    central = cli.central
    cli.cache(refresh=update_cache)

    params = {
        # "name": name,
        "device_type": device_type,  # valid = IAP, ArubaSwitch, MobilityController, CX
        "version": version,
        "model": model
    }

    params = {k: v for k, v in params.items() if v is not None}

    # TODO simplify
    if name:
        log_name = name
        name = cli.cache.get_identifier(name, ("dev", "template"), device_type=device_type, group=group)
        if not name:
            typer.secho(f"Unabled to find a match for {log_name}.  Listing all templates.", fg="red")

    if not name:
        if not group:
            if not params:  # show templates - Just update and show data from cache
                if central.get_all_templates not in cli.cache.updated:
                    asyncio.run(cli.cache.update_template_db())
                    resp = Response(output=cli.cache.templates)
                else:
                    # Can't use cache due to filtering options
                    resp = central.request(central.get_all_templates, **params)
        else:  # show templates --group <group name>
            resp = central.request(central.get_all_templates_in_group, group, **params)
    elif group:  # show template <name> --group <group_name> or show template <name> <group name>
        if name.is_template:
            resp = central.request(central.get_template, group, name.name)
        elif name.is_dev:  # They provided a dev identifier
            resp = central.request(central.get_variablised_template, name.serial)
        else:
            typer.secho(f"Something went wrong {name}", fg="red")
    else:  # provided args but no group get group from device iden
        if name.is_dev:
            resp = central.request(central.get_variablised_template, name.serial)
        elif name.is_template:
            resp = central.request(central.get_template, name.group, name.name)

    tablefmt = cli.get_format(do_json=do_json, do_yaml=do_yaml, do_csv=do_csv, do_table=do_table)

    cli.display_results(resp, tablefmt=tablefmt, pager=not no_pager, outfile=outfile)
示例#11
0
def logs(
    args: List[str] = typer.Argument(None, metavar='[LOG_ID]', help="Show details for a specific log_id"),
    user: str = typer.Option(None, help="Filter logs by user"),
    start: str = typer.Option(None, help="Start time of range to collect logs, provide value in epoch", hidden=True,),
    end: str = typer.Option(None, help="End time of range to collect logs, provide value in epoch", hidden=True,),
    device: str = typer.Option(None, metavar=iden_meta.dev, help="Collect logs for a specific device",),
    do_json: bool = typer.Option(False, "--json", is_flag=True, help="Output in JSON"),
    do_yaml: bool = typer.Option(False, "--yaml", is_flag=True, help="Output in YAML"),
    do_csv: bool = typer.Option(False, "--csv", is_flag=True, help="Output in CSV"),
    do_table: bool = typer.Option(False, "--table", help="Output in table format"),
    outfile: Path = typer.Option(None, "--out", help="Output to file (and terminal)", writable=True),
    update_cache: bool = typer.Option(False, "-U", hidden=True),  # Force Update of cli.cache for testing
    sort_by: SortOptions = typer.Option(None, "--sort", hidden=True,),  # TODO Unhide after implemented
    reverse: SortOptions = typer.Option(None, "-r", hidden=True,),  # TODO Unhide after implemented
    verbose: bool = typer.Option(False, "-v", hidden=True,),  # TODO Unhide after implemented
    no_pager: bool = typer.Option(False, "--no-pager", help="Disable Paged Output"),
    default: bool = typer.Option(
        False, "-d",
        is_flag=True,
        help="Use default central account",
        callback=cli.default_callback,
    ),
    debug: bool = typer.Option(
        False,
        "--debug",
        envvar="ARUBACLI_DEBUG",
        help="Enable Additional Debug Logging",
        callback=cli.debug_callback,
    ),
    account: str = typer.Option(
        "central_info",
        envvar="ARUBACLI_ACCOUNT",
        help="The Aruba Central Account to use (must be defined in the config)",
        callback=cli.account_name_callback,
    ),
) -> None:
    cli.cache(refresh=update_cache)
    if device:
        device = cli.cache.get_dev_identifier(device)
    kwargs = {
        "log_id": None if not args else args[-1],
        "username": user,
        "start_time": start or int(time.time() - 172800),
        "end_time": end,
        # "description": description,
        "target": None if not device else device.serial,
        # "classification": classification,  # TODO  add support for filters
        # "customer_name": customer_name,
        # "ip_address": ip_address,
        # "app_id": app_id,
        # "offset": offset,
        # "limit": limit,
    }
    # TODO start_time typer.Option pendumlum.... 3H 5h 20m etc. add other filter options
    central = cli.central
    resp = central.request(central.get_audit_logs, **kwargs)

    # TODO add -v flag or something to trigger auto index of log_ids and provide a menu where they can select the log
    # they want to see details on.
    if kwargs.get("log_id"):
        typer.secho(str(resp), fg="green" if resp else "red")
    else:
        tablefmt = cli.get_format(do_json=do_json, do_yaml=do_yaml, do_csv=do_csv, do_table=do_table)

        cli.display_results(
            resp,
            tablefmt=tablefmt,
            pager=not no_pager,
            outfile=outfile,
            # sort_by=sort_by,
            # reverse=reverse,
            cleaner=cleaner.get_audit_logs,
        )
def train(epochs: int):
    # loading dataset
    df = pd.read_csv('data/trans_per_month.csv', index_col='customer_id')

    # calculating product frequency  per months
    X = []
    y = []
    for i in range(len(df.columns) - 24):
        start = datetime.date(2017, 1, 1) + relativedelta(months=i)
        end = start + relativedelta(months=24)
        new_x, new_y = product_frequency_between(df, start, end)
        X.append(new_x)
        y.append(new_y)

    X = np.concatenate(X)
    y = np.concatenate(y)

    # normalizing data
    x_scaler = MinMaxScaler()
    y_scaler = MinMaxScaler()
    X = x_scaler.fit_transform(X)
    y = y_scaler.fit_transform(y.reshape(-1, 1))[:, 0]

    # saving scalers
    joblib.dump(x_scaler, 'models/serialized/x_scaler.mod')
    joblib.dump(y_scaler, 'models/serialized/y_scaler.mod')

    # spliting data
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.2,
                                                        random_state=41)

    # reshaping for lstm
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)

    # create model
    model = Sequential()
    model.add(
        LSTM(16,
             input_shape=(X_train.shape[1], X_train.shape[2]),
             return_sequences=True))
    model.add(LSTM(8, input_shape=(X_train.shape[1], X_train.shape[2])))
    model.add(Dense(1, activation='relu'))

    model.compile(loss='mean_squared_error', optimizer='adam')
    model.summary()

    # training model
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        epochs=epochs,
                        verbose=1)

    # saveing model
    model.save('models/serialized/lstm_model')

    # predicting data
    trainPredict = model.predict(X_train)
    model.reset_states()
    testPredict = model.predict(X_test)

    # invert predictions
    trainPredict = y_scaler.inverse_transform(trainPredict)
    trainY = y_scaler.inverse_transform([y_train])
    testPredict = y_scaler.inverse_transform(testPredict)
    testY = y_scaler.inverse_transform([y_test])

    # calculate root mean squared error
    trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:, 0]))
    typer.secho(f'🍻 Train Score: {trainScore:.2f} RMSE',
                fg=typer.colors.BRIGHT_GREEN)
    testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:, 0]))
    typer.secho(f'🍻 Test Score: {testScore:.2f} RMSE',
                fg=typer.colors.BRIGHT_GREEN)

    # ploting
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='validation')
    plt.title(f'Model loss with {epochs} epoch')
    plt.legend()
    plt.show()
示例#13
0
def err(msg: str, fg: str = typer.colors.RED) -> None:
    typer.secho(msg, fg=fg, err=True)
示例#14
0
def info(msg: str, fg: str = typer.colors.GREEN) -> None:
    typer.secho(msg, fg=fg)
示例#15
0
def new_post(
    names: List[str] = typer.Option(
        None, "-n", "--name", help="Select one or more newspaper"
    ),
    headless: Optional[bool] = typer.Option(
        False, "-h", "--headless", help="Start driver without GUI."
    ),
):
    """Download then upload to Instagram images for one or more given newspapers.

    Args:
        names (List[str]): One or more newspaper to process.
    """

    global _REFERENCE_DICT
    global _CONFIG

    selected_refs = []

    # Exit if no argument given by user
    if len(names) == 0:
        typer.secho(
            "You have to enter at least one newspaper name.",
            fg=typer.colors.RED,
            bold=True,
        )
        raise typer.Exit()

    # Convert names argument from tuple to list to remove items smoother
    names = list(names)

    # Search if argument match exactly a ref in database
    for name in names:
        for ref in _REFERENCE_DICT:
            if name.lower() == ref.lower():
                selected_refs.append(ref)
                names.remove(name)

    # If no match were found for one or more argument, search if string
    # is included in a ref name.
    if len(names) != 0:
        for name in names:
            for ref in _REFERENCE_DICT:
                if name.lower() in ref.lower():
                    selected_refs.append(ref)
                    names.remove(name)

        # If there is still unfound arguments, exit with an error
        if len(names) != 0:
            typer.secho(
                "Unable to find reference(s): {refs}".format(refs=", ".join(names)),
                fg=typer.colors.RED,
                bold=True,
            )
            raise typer.Exit()

    today_date = datetime.today().strftime("%m%d")

    for ref in selected_refs:
        chosen_date = _REFERENCE_DICT[ref]["year"] + today_date

        typer.secho(
            "Posting {ref} at date {date}".format(ref=ref, date=chosen_date),
            fg=typer.colors.GREEN,
            bold=True,
        )

        save_image(
            name=ref,
            simple_name=_REFERENCE_DICT[ref]["simple_name"],
            url=_REFERENCE_DICT[ref]["url"],
            chosen_date=chosen_date,
            save_path=_CONFIG["image_local_save_path"],
        )

        app = _INSTAGRAM_APP.InstagramApp(config=_CONFIG)
        app.upload_image(
            name=ref,
            simple_name=_REFERENCE_DICT[ref]["simple_name"],
            chosen_date=chosen_date,
        )

        app.stop_driver()

        typer.secho(
            "Upload successful",
            fg=typer.colors.GREEN,
            bold=True,
        )
示例#16
0
def log(msg) -> None:
    typer.secho(msg, fg=typer.colors.WHITE, nl=False, err=True)
示例#17
0
文件: docs.py 项目: xaviml/fastapi
def build_lang(lang: str = typer.Argument(
    ..., callback=lang_callback, autocompletion=complete_existing_lang)):
    """
    Build the docs for a language, filling missing pages with translation notifications.
    """
    lang_path: Path = Path("docs") / lang
    if not lang_path.is_dir():
        typer.echo(
            f"The language translation doesn't seem to exist yet: {lang}")
        raise typer.Abort()
    typer.echo(f"Building docs for: {lang}")
    build_dir_path = Path("docs_build")
    build_dir_path.mkdir(exist_ok=True)
    build_lang_path = build_dir_path / lang
    en_lang_path = Path("docs/en")
    site_path = Path("site").absolute()
    if lang == "en":
        dist_path = site_path
    else:
        dist_path: Path = site_path / lang
    shutil.rmtree(build_lang_path, ignore_errors=True)
    shutil.copytree(lang_path, build_lang_path)
    shutil.copytree(en_docs_path / "data", build_lang_path / "data")
    overrides_src = en_docs_path / "overrides"
    overrides_dest = build_lang_path / "overrides"
    for path in overrides_src.iterdir():
        dest_path = overrides_dest / path.name
        if not dest_path.exists():
            shutil.copy(path, dest_path)
    en_config_path: Path = en_lang_path / mkdocs_name
    en_config: dict = mkdocs.utils.yaml_load(
        en_config_path.read_text(encoding="utf-8"))
    nav = en_config["nav"]
    lang_config_path: Path = lang_path / mkdocs_name
    lang_config: dict = mkdocs.utils.yaml_load(
        lang_config_path.read_text(encoding="utf-8"))
    lang_nav = lang_config["nav"]
    # Exclude first 2 entries FastAPI and Languages, for custom handling
    use_nav = nav[2:]
    lang_use_nav = lang_nav[2:]
    file_to_nav = get_file_to_nav_map(use_nav)
    sections = get_sections(use_nav)
    lang_file_to_nav = get_file_to_nav_map(lang_use_nav)
    use_lang_file_to_nav = get_file_to_nav_map(lang_use_nav)
    for file in file_to_nav:
        file_path = Path(file)
        lang_file_path: Path = build_lang_path / "docs" / file_path
        en_file_path: Path = en_lang_path / "docs" / file_path
        lang_file_path.parent.mkdir(parents=True, exist_ok=True)
        if not lang_file_path.is_file():
            en_text = en_file_path.read_text(encoding="utf-8")
            lang_text = get_text_with_translate_missing(en_text)
            lang_file_path.write_text(lang_text, encoding="utf-8")
            file_key = file_to_nav[file]
            use_lang_file_to_nav[file] = file_key
            if file_key:
                composite_key = ()
                new_key = ()
                for key_part in file_key:
                    composite_key += (key_part, )
                    key_first_file = sections[composite_key]
                    if key_first_file in lang_file_to_nav:
                        new_key = lang_file_to_nav[key_first_file]
                    else:
                        new_key += (key_part, )
                use_lang_file_to_nav[file] = new_key
    key_to_section = {(): []}
    for file, orig_file_key in file_to_nav.items():
        if file in use_lang_file_to_nav:
            file_key = use_lang_file_to_nav[file]
        else:
            file_key = orig_file_key
        section = get_key_section(key_to_section=key_to_section, key=file_key)
        section.append(file)
    new_nav = key_to_section[()]
    export_lang_nav = [lang_nav[0], nav[1]] + new_nav
    lang_config["nav"] = export_lang_nav
    build_lang_config_path: Path = build_lang_path / mkdocs_name
    build_lang_config_path.write_text(
        yaml.dump(lang_config, sort_keys=False, width=200, allow_unicode=True),
        encoding="utf-8",
    )
    current_dir = os.getcwd()
    os.chdir(build_lang_path)
    mkdocs.commands.build.build(
        mkdocs.config.load_config(site_dir=str(dist_path)))
    os.chdir(current_dir)
    typer.secho(f"Successfully built docs for: {lang}",
                color=typer.colors.GREEN)
示例#18
0
def err(msg) -> None:
    typer.secho(msg, fg=typer.colors.RED, nl=False, err=True)
示例#19
0
def create_date_range(
    days: Optional[int] = None,
    start: Optional[str] = None,
    end: Optional[str] = None,
    granularity: Optional[str] = None,
) -> List[str]:
    """
    Creates a list of dates from the given range and granularity
    Example: [2020-03-15, 2020-03-16,...]
    """

    if start is not None and end is not None:
        if granularity is not None:
            if get_weekday_by_name(granularity) != 10:
                granularity = get_weekday_by_name(granularity)  # type: ignore
                return granularity_days(granularity=granularity, start=start, end=end)
            else:
                day_interval = get_day_granularity(granularity)

        new_end, new_start = (
            datetime.strptime(
                end, "%Y-%m-%d").date() + timedelta(days=1),
            datetime.strptime(
                start, "%Y-%m-%d").date(),
        )

        day_diff = (new_end - new_start).days

        if day_diff < 0:
            typer.secho(
                f"There must be a problem with your start({new_start}) and end date({new_end}).",
                fg=typer.colors.GREEN,
                bold=True,
            )
            typer.secho(
                "Exiting..", fg=typer.colors.RED,
            )
            sys.exit()
        if day_interval > day_diff:
            typer.secho(
                f"Your date range is {day_diff}, it can not be smaller than your frequency which is {day_interval}.",
                fg=typer.colors.GREEN,
                bold=True,
            )
            typer.secho(
                "Exiting..", fg=typer.colors.RED,
            )
            sys.exit()
        dates = sorted(
            [
                (new_start + timedelta(days=x)).strftime("%Y-%m-%d")
                for x in range(0, abs(day_diff), day_interval)
            ]
        )
        if dates[0] != start:
            dates.insert(0, start)
        if dates[-1] != end:
            dates.append(end)
        return dates

    elif granularity is not None:
        start_date, end_date = create_date(year=2020), get_today()
        granularity = (
            get_weekday_by_name(granularity)  # type: ignore
            if get_weekday_by_name(granularity) != 10
            else get_day_granularity(granularity)
        )
        return granularity_days(granularity=granularity, start=start_date, end=end_date)

    elif days is not None:
        return sorted(
            [
                (datetime.today().date() - timedelta(days=x)).strftime("%Y-%m-%d")
                for x in range(days)
            ]
        )
示例#20
0
def wlan(
    group: str = typer.Argument(...,
                                metavar="[GROUP NAME|SWARM ID]",
                                autocompletion=lambda incomplete: []),
    name: str = typer.Argument(..., ),
    kw1: Tuple[AddWlanArgs, str] = typer.Argument(
        ("psk", None),
        metavar="psk [WPA PASSPHRASE]",
    ),
    kw2: Tuple[AddWlanArgs, str] = typer.Argument(
        ("type", "employee"),
        metavar="type ['employee'|'guest']",
    ),
    kw3: Tuple[AddWlanArgs, str] = typer.Argument(
        ("vlan", ""),
        metavar="vlan [VLAN]",
    ),
    kw4: Tuple[AddWlanArgs, str] = typer.Argument(
        ("zone", ""),
        metavar="zone [ZONE]",
    ),
    kw5: Tuple[AddWlanArgs, str] = typer.Argument(
        ("ssid", None),
        metavar="ssid [SSID]",
    ),
    kw6: Tuple[AddWlanArgs, str] = typer.Argument(
        ("bw_limit_up", ""),
        metavar="bw-limit-up [LIMIT]",
    ),
    kw7: Tuple[AddWlanArgs, str] = typer.Argument(
        ("bw_limit_down", ""),
        metavar="bw-limit-down [LIMIT]",
    ),
    kw8: Tuple[AddWlanArgs, str] = typer.Argument(
        ("bw_limit_user_up", ""),
        metavar="bw-limit-user-up [LIMIT]",
    ),
    kw9: Tuple[AddWlanArgs, str] = typer.Argument(
        ("bw_limit_user_down", ""),
        metavar="bw-limit-user-down [LIMIT]",
    ),
    kw10: Tuple[AddWlanArgs, str] = typer.Argument(
        ("portal_profile", ""),
        metavar="portal-profile [PORTAL PROFILE]",
    ),
    hidden: bool = typer.Option(False, "--hidden", help="Make WLAN hidden"),
    yes: bool = typer.Option(False,
                             "-Y",
                             help="Bypass confirmation prompts - Assume Yes"),
    yes_: bool = typer.Option(False, "-y", hidden=True),
    debug: bool = typer.Option(
        False,
        "--debug",
        envvar="ARUBACLI_DEBUG",
        help="Enable Additional Debug Logging",
    ),
    default: bool = typer.Option(
        False,
        "-d",
        is_flag=True,
        help="Use default central account",
    ),
    account: str = typer.Option(
        "central_info",
        envvar="ARUBACLI_ACCOUNT",
        help="The Aruba Central Account to use (must be defined in the config)",
    ),
) -> None:
    yes = yes_ if yes_ else yes
    group = cli.cache.get_group_identifier(group)
    kwarg_list = [kw1, kw2, kw3, kw4, kw5, kw6, kw7, kw8, kw9, kw10]
    _to_name = {
        "psk": "wpa_passphrase",
        "ssid": "essid",
        "bw_limit_up": "bandwidth_limit_up",
        "bw_limit_down": "bandwidth_limit_down",
        "bw_limit_user_up": "bandwidth_limit_peruser_up",
        "bw_limit_user_down": "bandwidth_limit_peruser_down",
        "portal_profile": "captive_profile_name",
    }
    kwargs = {_to_name.get(kw[0], kw[0]): kw[1] for kw in kwarg_list}
    if hidden:
        kwargs["hide_ssid"] = True

    if not kwargs["wpa_passphrase"]:
        typer.secho("psk/passphrase is currently required for this command")
        raise typer.Exit(1)

    if yes or typer.confirm(
            typer.style(f"Please Confirm Add wlan {name} to {group.name}",
                        fg="cyan")):
        resp = cli.central.request(cli.central.create_wlan, group.name, name,
                                   **kwargs)
        typer.secho(str(resp), fg="green" if resp else "red")
    else:
        raise typer.Abort()
示例#21
0
def print_error(msg: str):
    typer.secho(
        msg,
        fg=typer.colors.RED,
        err=True,
    )
示例#22
0
def program_extract(
        source: List[str] = Arg(
            None, help="Data source to describe [default: stdin]"),
        source_type: str = Opt(None,
                               help='Specify source type e.g. "package"'),
        # File
        scheme: str = Opt(None, help="Specify schema  [default: inferred]"),
        format: str = Opt(None, help="Specify format  [default: inferred]"),
        hashing: str = Opt(
            None, help="Specify hashing algorithm  [default: inferred]"),
        encoding: str = Opt(None,
                            help="Specify encoding  [default: inferred]"),
        compression: str = Opt(
            None, help="Specify compression  [default: inferred]"),
        compression_path: str = Opt(
            None, help="Specify in-archive path  [default: first]"),
        # Control/Dialect/Query/Header
        header_rows: str = Opt(
            None, help="Comma-separated row numbers  [default: 1]"),
        header_join: str = Opt(None,
                               help="A separator to join a multiline header"),
        pick_fields: str = Opt(
            None, help='Comma-separated fields to pick e.g. "1,name1"'),
        skip_fields: str = Opt(
            None, help='Comma-separated fields to skip e.g. "2,name2"'),
        limit_fields: int = Opt(None, help="Limit fields by this integer"),
        offset_fields: int = Opt(None, help="Offset fields by this integer"),
        pick_rows: str = Opt(
            None, help='Comma-separated rows to pick e.g. "1,<blank>"'),
        skip_rows: str = Opt(
            None, help='Comma-separated rows to skip e.g. "2,3,4,5"'),
        limit_rows: int = Opt(None, help="Limit rows by this integer"),
        offset_rows: int = Opt(None, help="Offset rows by this integer"),
        # Schema
        schema: str = Opt(None, help="Specify a path to a schema"),
        sync_schema: bool = Opt(None, help="Sync the schema based on headers"),
        # Infer
        infer_type: str = Opt(None,
                              help="Force all the fields to have this type"),
        infer_names: str = Opt(None,
                               help="Comma-separated list of field names"),
        infer_volume: int = Opt(None,
                                help="Limit data sample size by this integer"),
        infer_confidence: float = Opt(None, help="A float from 0 to 1"),
        infer_missing_values: str = Opt(
            None, help="Comma-separated list of missing values"),
        # Package/Resource
        basepath: str = Opt(None, help="Basepath of the resource/package"),
        # Output
        yaml: bool = Opt(False, help="Return in pure YAML format"),
        json: bool = Opt(False, help="Return in JSON format"),
        csv: bool = Opt(False, help="Return in CSV format"),
):
    """
    Extract a data source.

    Based on the inferred data source type it will return resource or package data.
    Default output format is tabulated with a front matter.
    """

    # Support stdin
    is_stdin = False
    if not source:
        is_stdin = True
        source = [helpers.create_byte_stream(sys.stdin.buffer.read())]

    # Normalize parameters
    source = list(source) if len(source) > 1 else source[0]
    header_rows = helpers.parse_csv_string(header_rows, convert=int)
    pick_fields = helpers.parse_csv_string(pick_fields,
                                           convert=int,
                                           fallback=True)
    skip_fields = helpers.parse_csv_string(skip_fields,
                                           convert=int,
                                           fallback=True)
    pick_rows = helpers.parse_csv_string(pick_rows, convert=int, fallback=True)
    skip_rows = helpers.parse_csv_string(skip_rows, convert=int, fallback=True)
    infer_names = helpers.parse_csv_string(infer_names)
    infer_missing_values = helpers.parse_csv_string(infer_missing_values)

    # Prepare dialect
    dialect = (Dialect(
        header_rows=header_rows,
        header_join=header_join,
    ) or None)

    # Prepare query
    query = (Query(
        pick_fields=pick_fields,
        skip_fields=skip_fields,
        limit_fields=limit_fields,
        offset_fields=offset_fields,
        pick_rows=pick_rows,
        skip_rows=skip_rows,
        limit_rows=limit_rows,
        offset_rows=offset_rows,
    ) or None)

    # Prepare options
    options = helpers.remove_non_values(
        dict(
            source_type=source_type,
            # File
            scheme=scheme,
            format=format,
            hashing=hashing,
            encoding=encoding,
            compression=compression,
            compression_path=compression_path,
            # Control/Dialect/Query/Header
            dialect=dialect,
            query=query,
            # Schema
            schema=schema,
            sync_schema=sync_schema,
            # Infer
            infer_type=infer_type,
            infer_names=infer_names,
            infer_volume=infer_volume,
            infer_confidence=infer_confidence,
            infer_missing_values=infer_missing_values,
            # Package/Resource
            basepath=basepath,
        ))

    # Extract data
    try:
        process = (
            lambda row: row.to_dict(json=True)) if json or yaml else None
        data = extract(source, process=process, **options)
    except Exception as exception:
        typer.secho(str(exception), err=True, fg=typer.colors.RED, bold=True)
        raise typer.Exit(1)

    # Normalize data
    normdata = data
    if isinstance(data, list):
        normdata = {source: data}

    # Return JSON
    if json:
        content = pyjson.dumps(data, indent=2, ensure_ascii=False)
        typer.secho(content)
        raise typer.Exit()

    # Return YAML
    if yaml:
        content = pyyaml.safe_dump(data).strip()
        typer.secho(content)
        raise typer.Exit()

    # Return CSV
    if csv:
        for number, rows in enumerate(normdata.values(), start=1):
            for row in rows:
                if row.row_number == 1:
                    typer.secho(
                        helpers.stringify_csv_string(row.schema.field_names))
                typer.secho(row.to_str())
            if number < len(normdata):
                typer.secho("")
        raise typer.Exit()

    # Return default
    for number, (name, rows) in enumerate(normdata.items(), start=1):
        if is_stdin:
            name = "stdin"
        typer.secho("---")
        typer.secho(f"data: {name}", bold=True)
        typer.secho("---")
        typer.secho("")
        subdata = helpers.rows_to_data(rows)
        typer.secho(
            str(petl.util.vis.lookall(subdata, vrepr=str, style="simple")))
        if number < len(normdata):
            typer.secho("")
示例#23
0
def main(config_path: str = 'config.yaml'):

    typer.secho("----- starting convert to tfrecors -----", fg=typer.colors.GREEN)

    config = load_yaml('config.yaml')

    typer.secho(f"loaded config : {config_path}", fg=typer.colors.GREEN)

    typer.secho(f"target train dataset : {config['train_row_images']}", fg=typer.colors.GREEN)
    typer.secho(f"target test dataset : {config['test_row_images']}", fg=typer.colors.GREEN)

    p_train_images = pathlib.Path(config['train_row_images'])
    p_test_images = pathlib.Path(config['test_row_images'])

    labels = {}

    for index, i in enumerate(list(p_train_images.iterdir())):
        labels[i.stem] = index

    typer.secho(f"----- convert train dataset -----", fg=typer.colors.GREEN)
    train_images = make_image_labels(p_train_images, labels, config)
    train_record_file = f'{config["saved_dir"]}/{config["tfrecod_train_name"]}'
    write_tfrecords(train_record_file, train_images)

    typer.secho(f"----- convert test dataset -----", fg=typer.colors.GREEN)
    test_images = make_image_labels(p_test_images, labels, config)
    test_record_file = f'{config["saved_dir"]}/{config["tfrecod_test_name"]}'
    write_tfrecords(test_record_file, test_images)

    typer.secho(f"----- finishing convert to tfrecors -----", fg=typer.colors.GREEN)
示例#24
0
def info():
    """information about the tool."""
    typer.secho("a tool to peek into a github repo.")
    typer.secho("version: {}".format(__version__))
示例#25
0
def main(skip_existing: bool = False, force: bool = False):
    """
    Pull company metadata from LinkedIn and write to tags in README.md.

    Add tags <!--linkedin:company_name--><!--endlinkedin--> to README.md, where
    `company_name` corresponds to the last piece of the company's LinkedIn URL.
    """

    # Read LinkedIn account details from .env or terminal.
    load_dotenv()
    email = os.getenv("LINKEDIN_EMAIL")
    password = os.getenv("LINKEDIN_PASSWORD")
    if email is None or password is None:
        typer.echo(
            "Enter LinkedIn account to query the API (or use .env file)")
        typer.echo(
            "WARNING: Accounts with excessive API calls are sometimes blocked "
            "by LinkedIn.")
        email = input("LinkedIn email: ")
        password = getpass.getpass()
    else:
        typer.echo("LinkedIn account details read from .env")

    # Set up LinkedIn API.
    api = Linkedin(email, password, refresh_cookies=True)

    def create_company_description(name):
        """Create a markup description of the company from its LinkedIn `name`."""
        company = api.get_company(name)

        # Number of staff members.
        staff = company["staffCount"]
        staff_url = f"https://www.linkedin.com/company/{name}/people/"
        md = f" &nbsp;[👷 {staff}]({staff_url})"

        # Number of job openings.
        # Search for all jobs by the (full) company name first.
        # For generic company names, this will return a lot of false positives.
        full_name = company["name"]
        jobs_list = api.search_jobs(full_name, location_name="Berlin, Germany")
        # Then, filter by the company URN (unique identifier from LinkedIn).
        urn = company["entityUrn"]
        filtered_jobs_list = [
            job for job in jobs_list
            if job["companyDetails"].get("company", "") == urn
        ]
        jobs = len(filtered_jobs_list)
        if jobs > 0:
            jobs_url = f"https://www.linkedin.com/company/{name}/jobs/"
            md += f" &nbsp;[🔎 {jobs}]({jobs_url})"

        # Funding round.
        if "fundingData" in company:
            funding_type = company["fundingData"]["lastFundingRound"][
                "fundingType"]
            # Only show "Seed" or "Series X", otherwise show "X rounds" (there are some
            # other weird funding type names).
            if funding_type in [
                    "SEED", "SERIES_A", "SERIES_B", "SERIES_C", "SERIES_D"
            ]:
                funding = funding_type.replace("_", " ").title()
            else:
                funding_rounds = company["fundingData"]["numFundingRounds"]
                funding = f"{funding_rounds} round"
                if funding_rounds > 1:
                    funding += "s"
            funding_url = company["fundingData"][
                "fundingRoundListCrunchbaseUrl"]
            md += f" &nbsp;[💰 {funding}]({funding_url})"

        return md

    # Read README.md.
    with open("README.md", "r") as f:
        text = f.read()

    # Replace old descriptions with new ones.
    typer.echo("-" * 80)
    for name, old_desc in re.findall(
            "<!--linkedin:(.*?)-->(.*?)<!--endlinkedin-->", text):
        if skip_existing and old_desc:
            typer.echo(name + ": skipped")
        else:
            typer.echo(name + ":")
            new_desc = create_company_description(name)
            typer.echo(new_desc)
            text = text.replace(
                f"<!--linkedin:{name}-->{old_desc}<!--endlinkedin-->",
                f"<!--linkedin:{name}-->{new_desc}<!--endlinkedin-->",
            )
            typer.echo()

    # typer.echo updated file content.
    typer.echo("-" * 80)
    typer.echo()
    typer.echo(text)
    typer.echo()
    typer.echo("-" * 80)

    # Write to file.
    if force:
        write = "y"
    else:
        write = input("Review modified text above. Write to README.md? (Y/n) ")
    if write.lower() in ["", "y", "yes"]:
        os.rename("README.md", "old-README.md")
        with open("README.md", "w") as f:
            f.write(text)
        typer.secho("✓ Updated README.md (old file stored in old-README.md",
                    fg="green")
    else:
        typer.secho("✗ Did NOT update README.md", fg="red")
示例#26
0
def method_test(
    method: str = typer.Argument(...),
    kwargs: List[str] = typer.Argument(None),
    do_json: bool = typer.Option(True,
                                 "--json",
                                 is_flag=True,
                                 help="Output in JSON"),
    do_yaml: bool = typer.Option(False,
                                 "--yaml",
                                 is_flag=True,
                                 help="Output in YAML"),
    do_csv: bool = typer.Option(False,
                                "--csv",
                                is_flag=True,
                                help="Output in CSV"),
    do_table: bool = typer.Option(False,
                                  "--table",
                                  is_flag=True,
                                  help="Output in Table"),
    outfile: Path = typer.Option(None,
                                 help="Output to file (and terminal)",
                                 writable=True),
    no_pager: bool = typer.Option(True, "--pager", help="Enable Paged Output"),
    update_cache: bool = typer.Option(
        False, "-U", hidden=True),  # Force Update of cache for testing
    default: bool = typer.Option(False,
                                 "-d",
                                 is_flag=True,
                                 help="Use default central account",
                                 callback=cli.default_callback),
    debug: bool = typer.Option(False,
                               "--debug",
                               envvar="ARUBACLI_DEBUG",
                               help="Enable Additional Debug Logging",
                               callback=cli.debug_callback),
    account: str = typer.Option(
        "central_info",
        envvar="ARUBACLI_ACCOUNT",
        help="The Aruba Central Account to use (must be defined in the config)",
        callback=cli.account_name_callback),
) -> None:
    """dev testing commands to run CentralApi methods from command line

    Args:
        method (str, optional): CentralAPI method to test.
        kwargs (List[str], optional): list of args kwargs to pass to function.

    format: arg1 arg2 keyword=value keyword2=value
        or  arg1, arg2, keyword = value, keyword2=value

    Displays all attributes of Response object
    """
    cli.cache(refresh=update_cache)
    central = CentralApi(account)
    if not hasattr(central, method):
        typer.secho(f"{method} does not exist", fg="red")
        raise typer.Exit(1)
    args = [k for k in kwargs if "=" not in k]
    kwargs = [
        k.replace(" =", "=").replace("= ",
                                     "=").replace(",", " ").replace("  ", " ")
        for k in kwargs
    ]
    kwargs = [k.split("=") for k in kwargs if "=" in k]
    kwargs = {k[0]: k[1] for k in kwargs}

    typer.secho(
        f"session.{method}({', '.join(a for a in args)}, "
        f"{', '.join([f'{k}={kwargs[k]}' for k in kwargs]) if kwargs else ''})",
        fg="cyan")
    resp = central.request(getattr(central, method), *args, **kwargs)

    for k, v in resp.__dict__.items():
        if k != "output":
            if debug or not k.startswith("_"):
                typer.echo(f"  {typer.style(k, fg='cyan')}: {v}")

    tablefmt = cli.get_format(do_json, do_yaml, do_csv, do_table)

    typer.echo(f"\n{typer.style('CentralCLI Response Output', fg='cyan')}:")
    cli.display_results(resp,
                        tablefmt=tablefmt,
                        pager=not no_pager,
                        outfile=outfile)
    typer.echo(f"\n{typer.style('Raw Response Output', fg='cyan')}:")
    cli.display_results(data=resp.raw,
                        tablefmt=tablefmt,
                        pager=not no_pager,
                        outfile=outfile)
def transformers(
    path_to_senteval: str,
    pretrained_model_name_or_path: str,
    output_filepath: str = None,
    mean_pool: bool = False,
    cuda_device: int = -1,
    prototyping_config: bool = False,
    verbose: bool = False,
) -> None:
    """Evaluates a pre-trained model from the Transformers library against the SentEval benchmark."""

    from transformers import AutoModel, AutoTokenizer

    # SentEval prepare and batcher
    def prepare(params, samples):
        return

    @torch.no_grad()
    def batcher(params, batch):
        batch = _cleanup_batch(batch)
        # Re-tokenize the input text using the pre-trained tokenizer
        batch = [" ".join(tokens) for tokens in batch]
        # HACK (John): This will save us in the case of tokenizers with no default max_length
        # Why does this happen? Open an issue on Transformers.
        max_length = params.tokenizer.max_length if hasattr(tokenizer, "max_length") else 512
        inputs = params.tokenizer.batch_encode_plus(
            batch, pad_to_max_length=True, max_length=max_length, return_tensors="pt"
        )
        # Place all input tensors on same device as the model
        inputs = {name: tensor.to(params.device) for name, tensor in inputs.items()}

        sequence_output, pooled_output = model(**inputs)

        # If mean_pool, we take the average of the token-level embeddings, accounting for pads.
        # Otherwise, we take the pooled output for this specific model, which is typically the
        # embedding of a special tokens embedding, like [CLS] or <s>, which is prepended to the
        # input during tokenization.
        if mean_pool:
            embeddings = torch.sum(
                sequence_output * inputs["attention_mask"].unsqueeze(-1), dim=1
            ) / torch.clamp(torch.sum(inputs["attention_mask"], dim=1, keepdims=True), min=1e-9)
        else:
            embeddings = pooled_output
        embeddings = embeddings.cpu().numpy()

        return embeddings

    # Determine the torch device
    device = _get_device(cuda_device)

    # Load the Transformers tokenizer
    tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path)
    typer.secho(
        (
            f"{SUCCESS} Tokenizer '{pretrained_model_name_or_path}' from Transformers loaded"
            " successfully."
        ),
        fg=typer.colors.GREEN,
        bold=True,
    )

    # Load the Transformers model
    model = AutoModel.from_pretrained(pretrained_model_name_or_path)
    model.to(device)
    model.eval()
    typer.secho(
        f'{SUCCESS} Model "{pretrained_model_name_or_path}" from Transformers loaded successfully.',
        fg=typer.colors.GREEN,
        bold=True,
    )

    # Performs a few setup steps and returns the SentEval params
    params_senteval = _setup_senteval(path_to_senteval, prototyping_config, verbose)
    params_senteval["tokenizer"] = tokenizer
    params_senteval["model"] = model
    params_senteval["device"] = device
    _run_senteval(params_senteval, path_to_senteval, batcher, prepare, output_filepath)

    return
示例#28
0
def program_describe(
        source: List[str] = Arg(
            None, help="Data source to describe [default: stdin]"),
        type: str = Opt(None, help='Specify source type e.g. "package"'),
        # File
        scheme: str = Opt(None, help="Specify schema  [default: inferred]"),
        format: str = Opt(None, help="Specify format  [default: inferred]"),
        hashing: str = Opt(
            None, help="Specify hashing algorithm  [default: inferred]"),
        encoding: str = Opt(None,
                            help="Specify encoding  [default: inferred]"),
        innerpath: str = Opt(None,
                             help="Specify in-archive path  [default: first]"),
        compression: str = Opt(
            None, help="Specify compression  [default: inferred]"),
        # Layout
        header_rows: str = Opt(
            None, help="Comma-separated row numbers  [default: 1]"),
        header_join: str = Opt(None,
                               help="A separator to join a multiline header"),
        pick_fields: str = Opt(
            None, help='Comma-separated fields to pick e.g. "1,name1"'),
        skip_fields: str = Opt(
            None, help='Comma-separated fields to skip e.g. "2,name2"'),
        limit_fields: int = Opt(None, help="Limit fields by this integer"),
        offset_fields: int = Opt(None, help="Offset fields by this integer"),
        pick_rows: str = Opt(
            None, help='Comma-separated rows to pick e.g. "1,<blank>"'),
        skip_rows: str = Opt(
            None, help='Comma-separated rows to skip e.g. "2,3,4,5"'),
        limit_rows: int = Opt(None, help="Limit rows by this integer"),
        offset_rows: int = Opt(None, help="Offset rows by this integer"),
        # Infer
        infer_type: str = Opt(None,
                              help="Force all the fields to have this type"),
        infer_names: str = Opt(None,
                               help="Comma-separated list of field names"),
        infer_volume: int = Opt(None,
                                help="Limit data sample size by this integer"),
        infer_confidence: float = Opt(None, help="A float from 0 to 1"),
        infer_missing_values: str = Opt(
            None, help="Comma-separated list of missing values"),
        # Package/Resource
        basepath: str = Opt(None, help="Basepath of the resource/package"),
        # Description
        expand: bool = Opt(None, help="Expand default values"),
        nostats: bool = Opt(None, help="Do not infer stats"),
        # Output
        yaml: bool = Opt(False, help="Return in pure YAML format"),
        json: bool = Opt(False, help="Return in JSON format"),
):
    """
    Describe a data source.

    Based on the inferred data source type it will return resource or package descriptor.
    Default output format is YAML with a front matter.
    """

    # Support stdin
    is_stdin = False
    if not source:
        is_stdin = True
        source = [helpers.create_byte_stream(sys.stdin.buffer.read())]

    # Normalize parameters
    source = list(source) if len(source) > 1 else source[0]
    header_rows = helpers.parse_csv_string(header_rows, convert=int)
    pick_fields = helpers.parse_csv_string(pick_fields,
                                           convert=int,
                                           fallback=True)
    skip_fields = helpers.parse_csv_string(skip_fields,
                                           convert=int,
                                           fallback=True)
    pick_rows = helpers.parse_csv_string(pick_rows, convert=int, fallback=True)
    skip_rows = helpers.parse_csv_string(skip_rows, convert=int, fallback=True)
    infer_names = helpers.parse_csv_string(infer_names)
    infer_missing_values = helpers.parse_csv_string(infer_missing_values)

    # Prepare layout
    layout = (Layout(
        header_rows=header_rows,
        header_join=header_join,
        pick_fields=pick_fields,
        skip_fields=skip_fields,
        limit_fields=limit_fields,
        offset_fields=offset_fields,
        pick_rows=pick_rows,
        skip_rows=skip_rows,
        limit_rows=limit_rows,
        offset_rows=offset_rows,
    ) or None)

    # Prepare options
    options = helpers.remove_non_values(
        dict(
            type=type,
            # File
            scheme=scheme,
            format=format,
            hashing=hashing,
            encoding=encoding,
            innerpath=innerpath,
            compression=compression,
            # Layout
            layout=layout,
            # Infer
            infer_type=infer_type,
            infer_names=infer_names,
            infer_volume=infer_volume,
            infer_confidence=infer_confidence,
            infer_missing_values=infer_missing_values,
            # Description
            expand=expand,
            nostats=nostats,
        ))

    # Describe source
    try:
        metadata = describe(source, **options)
    except Exception as exception:
        typer.secho(str(exception), err=True, fg=typer.colors.RED, bold=True)
        raise typer.Exit(1)

    # Return JSON
    if json:
        descriptor = metadata.to_json()
        typer.secho(descriptor)
        raise typer.Exit()

    # Return YAML
    if yaml:
        descriptor = metadata.to_yaml().strip()
        typer.secho(descriptor)
        raise typer.Exit()

    # Return default
    if is_stdin:
        source = "stdin"
    elif isinstance(source, list):
        source = " ".join(source)
    typer.secho("---")
    typer.secho(f"metadata: {source}", bold=True)
    typer.secho("---")
    typer.secho("")
    typer.secho(metadata.to_yaml().strip())
    typer.secho("")
示例#29
0
def install(
    registry_app: str = typer.Argument(None),
    name: str = typer.Option(None, "--name", "-n"),
    source: str = typer.Option(None, "--from", "-f"),
    destination: Path = typer.Option(None, "--to", "-t"),
    compose_file: str = typer.Option("docker-compose.yml", "--compose-file",
                                     "-c"),
):
    """
    Install an app into backplane.
    """

    if conf.verbose > 0:
        typer.secho(
            f"Installing {name} from {destination}",
            err=False,
            fg=typer.colors.BRIGHT_BLACK,
        )
    app_name = name if name else os.path.basename(os.getcwd())
    app_source = source if source else os.getcwd()
    app_destination = destination if destination else os.getcwd()

    # Check if an app has been named to be taken from the app registry
    if registry_app:
        app_name = name if name else registry_app
        app_destination = os.path.join(conf.app_dir, app_name)

        try:
            backplane_app = App(
                compose_file=compose_file,
                config=conf,
                destination=app_destination,
                name=app_name,
                registry_app=registry_app,
                source="registry",
            )
            backplane_app.install()
        except Exception as e:
            typer.secho(
                f"Failed to install {name} from {path}: {e}",
                err=True,
                fg=typer.colors.RED,
            )
    elif source and not destination:
        app_name = utils.get_repo_name_from_url(source)
        app_destination = os.path.join(conf.app_dir, app_name)

        # Specified a source but no destination
        # set destination to app_dir/app_name
        try:
            backplane_app = App(
                compose_file=compose_file,
                config=conf,
                destination=app_destination,
                name=app_name,
                registry_app=registry_app,
                source=app_source,
            )
            backplane_app.install()
        except Exception as e:
            typer.secho(
                f"Failed to install {name} from {destination}: {e}",
                err=True,
                fg=typer.colors.RED,
            )
    else:
        try:
            # Set name if given or default to app name
            backplane_app = App(
                compose_file=compose_file,
                config=conf,
                destination=app_destination,
                name=app_name,
                registry_app=registry_app,
                source=app_source,
            )
            backplane_app.install()
        except Exception as e:
            typer.secho(
                f"Failed to install {name} from {destination}: {e}",
                err=True,
                fg=typer.colors.RED,
            )
示例#30
0
def main(
    ctx: typer.Context,
    language: Language = typer.Option(
        None,
        "--language",
        "-L",
        show_choices=True,
        show_default=True,
        help="Choose catalog language",
    ),
    topic: Topic = typer.Option(
        None,
        "--topic",
        "-T",
        show_default=True,
        show_choices=True,
        help="Choose a catalog topic.",
    ),
):
    """![Downloading](https://github.com/JnyJny/springer_downloader/raw/master/demo/animations/download-catalog.gif)
    __Springer Textbook Bulk Download Tool__
    
    ## NOTICE

    The author of this software is not affiliated with Springer and this
    tool is not authorized or supported by Springer. Thank you to
    Springer for making these high quality textbooks available at no
    cost.

    \b
    >"With the Coronavirus outbreak having an unprecedented impact on
    >education, Springer Nature is launching a global program to support
    >learning and teaching at higher education institutions
    >worldwide. Remote access to educational resources has become
    >essential. We want to support lecturers, teachers and students
    >during this challenging period and hope that this initiative will go
    >some way to help.
    >
    >Institutions will be able to access more than 500 key textbooks
    >across Springer Nature’s eBook subject collections for free. In
    >addition, we are making a number of German-language Springer medical
    >training books on emergency nursing freely accessible.  These books
    >will be available via SpringerLink until at least the end of July."

    [Source](https://www.springernature.com/gp/librarians/news-events/all-news-articles/industry-news-initiatives/free-access-to-textbooks-for-institutions-affected-by-coronaviru/17855960)

    ## Overview

    This tool automates the process of downloading the Springer-provided
    Excel catalogs, locating URLs and downloading the files in PDF or epub
    format.

    Catalogs are lists of books in a specific _language_, spanning a
    _topic_. Catalogs are further subdivided into _packages_ which are
    books grouped by sub-topics.
    
    Textbooks can be downloaded by; title, package name or the entire
    catalog. Title and package names can be incompletely specified and
    are case-insensitive. 

    The available languages are: English & German.

    The available topics are: _All Disciplines_ and _Emergency Nursing_.

    **Note: The _Emergency Nursing_ topic is not available in English.**

    ## Source and License

    Full source is available on
    [GitHub](https://github.com/JnyJny/springer_downloader) and it is
    licensed under the
    [Apache-2.0](https://www.apache.org/licenses/LICENSE-2.0)
    license.

    ## Installation

    This utility can be installed using `pip`:

    `$ python3 -m pip install springer`

    Or from the latest source on GitHub:

    `$ python3 -m pip install git+https://github.com/JnyJny/springer_downloader`
    """

    # EJO The callback function is called before any of the command functions
    #     are invoked. Since all the subcommands work with an instantiation of
    #     springer.catalog.Catalog, we create one in the callback and attach it
    #     to the typer.Context object using the attribute 'obj'. I don't
    #     particularly care for accessing the catalog as 'ctx.obj' in the
    #     subcommands, but I haven't found a better solution to this "problem"
    #     yet.

    try:
        ctx.obj = Catalog(language, topic)

    except KeyError as error:
        typer.secho(
            f"Failed to locate a catalog for: '{error.args[0].value!s}'",
            fg="red")
        raise typer.Exit(-1)