Пример #1
0
def read_pyproject_toml(ctx: click.Context, param: click.Parameter,
                        value: Optional[str]) -> Optional[str]:
    """Inject Black configuration from "pyproject.toml" into defaults in `ctx`.

    Returns the path to a successfully found and read configuration file, None
    otherwise.
    """
    if not value:
        value = find_pyproject_toml(ctx.params.get("src", ()))
        if value is None:
            return None

    try:
        config = parse_pyproject_toml(value)
    except (OSError, ValueError) as e:
        raise click.FileError(
            filename=value,
            hint=f"Error reading configuration file: {e}") from None

    if not config:
        return None
    else:
        # Sanitize the values to be Click friendly. For more information please see:
        # https://github.com/psf/black/issues/1458
        # https://github.com/pallets/click/issues/1567
        config = {
            k: str(v) if not isinstance(v, (list, dict)) else v
            for k, v in config.items()
        }

    target_version = config.get("target_version")
    if target_version is not None and not isinstance(target_version, list):
        raise click.BadOptionUsage("target-version",
                                   "Config key target-version must be a list")

    default_map: Dict[str, Any] = {}
    if ctx.default_map:
        default_map.update(ctx.default_map)
    default_map.update(config)

    ctx.default_map = default_map
    return value
Пример #2
0
def logs(ctx, pod_name):
    '''
    Show logs for POD_NAME
    '''
    kube_config = ctx.obj['kube_config']
    if not os.path.exists(kube_config):
        raise click.FileError(
            kube_config,
            'File doesn\'t exist. "k3s_config.yaml" either needs to be in your '
            +
            'current working directory or you can specify a kube config file path with '
            + '--kube-config')

    pod_names = [p['name'] for p in get_pod_list(config_path=kube_config)]

    if pod_name not in pod_names:
        click.echo('error: pod name not found')
        return

    print(get_pod_logs(pod_name))
Пример #3
0
 def get_config(self, appliance_profile):
     """
     Creates an ApplianceConfig object out of the config file
     :param appliance_profile: The appliance profile name
     :return: The ApplianceConfig object
     """
     self.validate_config_present(appliance_profile)
     appliance_config_file = self.get_config_file(appliance_profile)
     try:
         config = configparser.ConfigParser()
         config.read(appliance_config_file)
         appliance_url = config.get(DEFAULT_PROFILE, APPLIANCE_ENDPOINT_KEY)
         access_token = config.get(DEFAULT_PROFILE,
                                   APPLIANCE_ACCESS_TOKEN_KEY)
         serial_id = config.get(DEFAULT_PROFILE, APPLIANCE_SERIAL_ID_KEY)
         return ApplianceConfig(appliance_url, access_token, serial_id)
     except IOError as e:
         click.FileError(
             'Unable to parse the appliance config file %s: %s' %
             (appliance_config_file, e))
Пример #4
0
def read_config_toml(project_root: Path, config_file: str) -> Config:
    path = project_root / config_file
    if not path.is_file():
        return {}

    try:
        pyproject_toml = toml.load(str(path))
        config = pyproject_toml.get("tool", {}).get("ward", {})
    except (toml.TomlDecodeError, OSError) as e:
        raise click.FileError(filename=config_file,
                              hint=f"Error reading {config_file}:\n{e}")

    if not config:
        return {}

    config = {
        k.replace("--", "").replace("-", "_"): v
        for k, v in config.items()
    }
    return config
Пример #5
0
def find_plate_file(plate_folder, plate_file):
	"""Finds a plate file, either in the current working dir or in the package data resources.

	Parameters
	----------
	plate_file : str
		the filename of the plate file, may include absolute or relative path

	Returns
	-------
	str
		the full path of the plate file.
	"""
	plate_path = os.path.join(plate_folder, plate_file)
	if not os.path.exists(plate_path):
		# if plate path doesn't exist try to get it from package data
		plate_path = pkg_resources.resource_filename(plate_folder, plate_file)
	if not os.path.exists(plate_path):
		raise click.FileError(plate_path, hint="can't find file.")
	return plate_path
Пример #6
0
def get_stack_refs(refs: list):
    '''
    >>> get_stack_refs(['foobar-stack'])
    [StackReference(name='foobar-stack', version=None)]

    >>> get_stack_refs(['foobar-stack', '1'])
    [StackReference(name='foobar-stack', version='1')]

    >>> get_stack_refs(['foobar-stack', '1', 'other-stack'])
    [StackReference(name='foobar-stack', version='1'), StackReference(name='other-stack', version=None)]
    >>> get_stack_refs(['foobar-stack', 'v1', 'v2', 'v99', 'other-stack'])
    [StackReference(name='foobar-stack', version='v1'), StackReference(name='foobar-stack', version='v2'), \
StackReference(name='foobar-stack', version='v99'), StackReference(name='other-stack', version=None)]
    '''
    refs = list(refs)
    refs.reverse()
    stack_refs = []
    last_stack = None
    while refs:
        ref = refs.pop()
        if last_stack is not None and re.compile(
                r'v[0-9][a-zA-Z0-9-]*$').match(ref):
            stack_refs.append(StackReference(last_stack, ref))
        else:
            try:
                with open(ref) as fd:
                    data = yaml.safe_load(fd)
                ref = data['SenzaInfo']['StackName']
            except Exception as e:
                if not STACK_NAME_PATTERN.match(ref):
                    # we can be sure that ref is a file path,
                    # as stack names cannot contain dots or slashes
                    raise click.FileError(ref, str(e))

            if refs:
                version = refs.pop()
            else:
                version = None
            stack_refs.append(StackReference(ref, version))
            last_stack = ref
    return stack_refs
Пример #7
0
    def _locate_nupkg_at_path(self, path, output_dir):
        """
        Finds .nupkg file.
        If no explicit path to the .nupkg file is provided, .csproj file will be searched for in path.
        .csproj will be used to determine name and version of the package, while package itself will be seached
        in the output_dir.
        """
        if path.endswith(".nupkg"):
            if not os.path.isfile(path):
                raise click.FileError(path)
            return path

        csproj_path = CsProj.get_csproj_at_path(path)

        if csproj_path:
            csproj = CsProj(csproj_path)

            assembly_name = csproj.get_assembly_name()
            version = csproj.get_assembly_version()
            normalized_version = NuGetRunner.get_normalized_nuget_pack_version(version)

            nupkg_filename = "{0}.{1}.nupkg".format(assembly_name, normalized_version)
            nupkg_path = os.path.normpath(os.path.join(output_dir, nupkg_filename))

            if not os.path.isfile(nupkg_path):
                raise click.UsageError("Failed to find Nuget Package (.nupkg) at path " + nupkg_path)

        else:
            nuspec_file_path = NuSpec.get_nuspec_at_path(path)
            if nuspec_file_path is not None:
                nuspec = NuSpec(path, self.debug)
                package_id = nuspec.get_package_id()
                version = nuspec.get_package_version()
                normalized_version = NuGetRunner.get_normalized_nuget_pack_version(version)

                nupkg_filename = "{0}.{1}.nupkg".format(package_id, normalized_version)
                nupkg_path = os.path.normpath(os.path.join(output_dir, nupkg_filename))
            else:
                raise click.UsageError("Path must be a valid path to .nuspec, .csproj, or directory containing either")

        return nupkg_path
Пример #8
0
def g_model(padmode, padvalue, interpolation, resource_names,
            datasource, binning, output):
    """Derives a model from DATASOURCE with given BINNING.
    The model is written to OUTPUT.

    BINNING can be a path to a previously created binning, or custom bin edges
    in all dimension: dimensions are separated by colons, edge values in
    each dimension are separated by commas.
    """
    # datasource checks
    try:
        source = DataSourceIO.read(datasource)
    except:
        raise click.FileError(datasource, "does not exist or is not readable.")

    # validate dimensionality match between binning and source
    if binning.dimensions != len(source.domain):
        raise click.UsageError(
            "Dimensions of binning (%d) and datasource (%d) mismatch."
            % (binning.dimensions, len(source.domain)))

    # resources checks: split list and verify dim match with source
    if not resource_names is None:
        resource_names = resource_names.split(",")
        if len(resource_names) != len(source.column_names):
            raise click.BadOptionUsage("resource-names",
            "Dimensions of resource names (%d) and datasource (%d) mismatch."
            % (len(resource_names), len(source.column_names)))

    # convert model params to enums and create ModelParams object
    model_params = ModelParams(
        Pad_Modes[padmode.upper()],
        Pad_Values[padvalue.upper()],
        Interpolation_Modes[interpolation.upper()]
    )

    # histogram the data with given binning
    histogram = source.get_histogram(binning)

    model = Model.from_histogram(model_params, histogram, resource_names)
    model.to_file(output)
Пример #9
0
def _read_pyproject_toml(ctx, param, value):
    value = value or "pyproject.toml"  # Only accept local pyproject.toml if not specified

    try:
        pyproject_data = toml.load(value)
    except FileNotFoundError:
        return None
    except (toml.TomlDecodeError, OSError) as e:
        raise click.FileError(filename=value, hint=f"Error reading configuration file: {e}")

    ctx.ensure_object(dict)
    ctx.obj["PYPROJECT"] = pyproject_data
    config = pyproject_data.get("tool", {}).get("ni-python-styleguide", {})

    config.pop("quiet", None)
    config.pop("verbose", None)

    if ctx.default_map is None:
        ctx.default_map = {}
    ctx.default_map.update(config)
    return value
Пример #10
0
def get_filepaths(paths: Union[List, str], paths_ignore: Iterable[str],
                  recursive: bool) -> Set[str]:
    """
    Retrieve the filepaths from the command.

    :param paths: List of file/dir paths from the command
    :param recursive: Recursive option
    :raise: click.FileError if directory is given without --recursive option
    """
    targets = set()
    for path in paths:
        if os.path.isfile(path):
            targets.add(path)
        elif os.path.isdir(path):
            if not recursive:
                raise click.FileError(click.format_filename(path),
                                      "Use --recursive to scan directories.")
            top_dir = Path(path)
            _targets = {str(target) for target in top_dir.rglob(r"*")}
            _targets.difference_update(path_filter_set(top_dir, paths_ignore))
            targets.update(_targets)
    return targets
Пример #11
0
def pkginfo(lang, dependency, for_completion, _from):
    fmt = "plain"
    if not _from == None:
        langs_info[lang]["file"] = f"{_from}/{langs_info[lang]['file']}"
    if not os.path.isfile(langs_info[lang]["file"]):
        raise click.FileError(langs_info[lang]["file"],
                              hint="file doesn't exist")
    info = get_info(langs_info[lang])
    if for_completion:
        if dependency == "all":
            click.echo("\n".join(info["prod"].keys()))
            click.echo("\n".join(info["dev"].keys()))
        else:
            click.echo("\n".join(info[dependency].keys()))
    else:
        if dependency == "all":
            click.echo("PROD")
            click.echo(tabulate(as_table(info["prod"]), tablefmt=fmt))
            click.echo("DEV")
            click.echo(tabulate(as_table(info["dev"]), tablefmt=fmt))
        else:
            click.echo(tabulate(as_table(info[dependency]), tablefmt=fmt))
Пример #12
0
def pods(ctx, show_nodes=False):
    '''
    Show table of pods running on kubernetes
    '''
    kube_config = ctx.obj['kube_config']

    if not os.path.exists(kube_config):
        raise click.FileError(
            kube_config,
            'File doesn\'t exist. "k3s_config.yaml" either needs to be in your '
            +
            'current working directory or you can specify a kube config file path with '
            + '--kube-config')

    pod_list = get_pod_list(config_path=kube_config)

    table = []
    header = ['Name', 'Status', 'Restarts', 'Age']
    for pod in pod_list:
        row = []
        row.append(pod['name'])

        if pod['deletion_timestamp'] is not None:
            row.append('Terminating')
        else:
            row.append(pod['status'])

        row.append(pod['restart_count'])

        row.append(str(pod['age']).split('.')[0])

        if show_nodes:
            row.append(pod['node_name'])
            header.append('Node Name')

        table.append(row)

    print(tabulate(table, headers=header))
Пример #13
0
def read_pyproject_toml(ctx: click.Context, _param: click.Parameter,
                        value: str) -> Optional[str]:
    """Read config values from a file and load them as defaults.

    :param ctx: A context from a currently executing Click command
    :param _param: The command parameter that triggered this callback
    :param value: The value passed to the command parameter
    """
    if not value:
        root_path = ctx.params.get("repo_path", None)
        if not root_path:
            root_path = "."
        root_path = pathlib.Path(root_path).resolve()
        config_path = root_path / "tartufo.toml"
        if config_path.is_file():
            value = str(config_path)
        else:
            config_path = root_path / "pyproject.toml"
            if config_path.is_file():
                value = str(config_path)
            else:
                return None
    try:
        toml_file = toml.load(value)
        config = toml_file.get("tool", {}).get("tartufo", {})
    except (toml.TomlDecodeError, OSError) as exc:
        raise click.FileError(
            filename=str(value),
            hint="Error reading configuration file: {}".format(exc),
        )
    if not config:
        return None
    if ctx.default_map is None:
        ctx.default_map = {}
    ctx.default_map.update(  # type: ignore
        {k.replace("--", "").replace("-", "_"): v
         for k, v in config.items()})
    return str(value)
Пример #14
0
def json_cmd(ctx: Context, force: bool, analysis_dir: str,
             callables: List[str]) -> None:
    """Show the original json for the given callables"""

    diagnostics = JSONDiagnostics(AnalysisOutput.from_directory(analysis_dir),
                                  ctx.parser_class)

    try:
        diagnostics.load(force)
    except JSONDiagnosticsException as e:
        raise click.FileError(e.file, e.description)

    output = []
    for callable in callables:
        entries = diagnostics.entries(callable, pretty_print=True)
        if len(entries) == 0:
            click.echo(f"Missing json for {callable}", err=True)
            continue

        output.extend(entries)

    if len(output) > 0:
        click.echo_via_pager("".join(output))
Пример #15
0
    def _iter_folder(self) -> Generator[Tuple[str, str], None, None]:
        folder_path = pathlib.Path(self.target)
        files = folder_path.rglob(
            "**/*") if self.recurse else folder_path.glob("*")
        for file_path in files:
            relative_path = file_path.relative_to(folder_path)
            if file_path.is_file() and self.should_scan(str(relative_path)):
                try:
                    with file_path.open("rb") as fhd:
                        data = fhd.read()
                except OSError as exc:
                    raise click.FileError(filename=str(file_path),
                                          hint=str(exc))

                try:
                    blob = data.decode("utf-8")
                    if self.global_options.scan_filenames:
                        blob = str(relative_path) + "\n" + blob
                except UnicodeDecodeError:
                    # binary file, continue
                    continue

                yield blob, str(relative_path)
Пример #16
0
def nodes(ctx):
    '''
    List all nodes and pymada images on each node
    '''
    kube_config = ctx.obj['kube_config']
    if not os.path.exists(kube_config):
        raise click.FileError(
            kube_config,
            'File doesn\'t exist. "k3s_config.yaml" either needs to be in your '
            +
            'current working directory or you can specify a kube config file path with '
            + '--kube-config')

    node_list = get_node_list()
    table = []

    header = ['node name', 'images']

    for node in node_list:
        row = [node['name'], '\n'.join(node['images'])]
        table.append(row)

    print(tabulate(table, headers=header))
Пример #17
0
def read_pyproject_toml(
    ctx: click.Context,
    param: click.Parameter,
    value: Union[str, int, bool, None],
) -> Optional[str]:
    """Inject configuration from "pyproject.toml" into defaults in `ctx`.
    Returns the path to a successfully found and read configuration file, None
    otherwise.
    """
    assert not isinstance(value, (int, bool)), "Invalid parameter type passed"
    print_config = ctx.params.get("print_config", False)

    if not value:
        value = find_pyproject_toml(ctx.params.get("src", ()))
        if value is None:
            if print_config:
                dump_toml_config(ctx)
            return None

    try:
        config = parse_pyproject_toml(value)
    except (toml.TomlDecodeError, OSError) as e:
        raise click.FileError(filename=value,
                              hint=f"Error reading configuration file: {e}")

    if not config:
        if print_config:
            dump_toml_config(ctx)
        return None

    if print_config:
        dump_toml_config(ctx, config)

    if ctx.default_map is None:
        ctx.default_map = {}
    ctx.default_map.update(config)  # type: ignore  # bad types in .pyi
    return value
Пример #18
0
def main(ctx, config, verbose):
    """ lnt is a command line tool designed to be a better lncli for sysadmins
    and consumers
    """
    # TODO: Allow for custom lnt dir
    config_path = config

    if not config_path:

        # Default lnt dir is in constants.py
        if not check_lnt_folder_exists():
            create_lnt_folder()

        # Checks if the default config is available
        if not check_config_exists():
            create_config()
            raise click.FileError(filename="config",
                                  hint="Error: please configure config at " +
                                  const.DEFAULT_CONF_PATH)

        config_path = const.DEFAULT_CONF_PATH

    config = ConfigParser()

    # Config validation
    try:
        config.read(config_path)
        config, passed = validate_config(config)

        if not passed:
            raise Exception

    except Exception:
        raise Exception("Invalid config file provided")

    ctx.config = config
    ctx.verbose = verbose
Пример #19
0
def runtest(config: Config, workdir: Path, results_dir: Path):
    print(f"========== Grading {workdir.name}")

    (workdir / config["output_dir"]).mkdir(exist_ok=True, parents=True)
    secret_files = []
    with (results_dir / f"{workdir.name}.txt").open("w", encoding="utf-8") as logfile:
        if "build" in config:
            logfile.write(utils.box_text("Build Step") + "\n")
            # copy files from project root to build location
            if "required_files" in config["build"]:
                for file in config["build"]["required_files"]:
                    (workdir / file["dest"]).mkdir(exist_ok=True, parents=True)
                    try:
                        copy(
                            Path(".config") / file["file"], Path(workdir / file["dest"])
                        )
                    except FileNotFoundError as ex:
                        raise click.FileError(ex.filename, "are you sure it exists?")
                    if file.get("secret"):
                        secret_files.append(Path(workdir / file["dest"] / file["file"]))

            if "commands" in config["build"]:
                for command in config["build"]["commands"]:
                    br = TestResult(test_type="build", cmd=command)
                    command = shlex.split(command)
                    br.retval, br.stdout, br.stderr = utils.run_command(
                        command, cwd=workdir
                    )
                    logfile.write(br.log(config["output"]["build"]))

        # loop through and run all tests
        test_runner = TestRunner(logfile, workdir, config)
        test_runner.run_all()
        test_runner.log()

        for file in secret_files:
            file.unlink()
Пример #20
0
def tts_cli(text, file, output, speed, tld, lang, nocheck):
    """ Read <text> to mp3 format using NAVER Papago's Text-to-Speech API
    (set <text> or --file <file> to - for standard input)
    """

    # stdin for <text>
    if text == '-':
        text = click.get_text_stream('stdin').read()

    # stdout (when no <output>)
    if not output:
        output = click.get_binary_stream('stdout')

    # <file> input (stdin on '-' is handled by click.File)
    if file:
        try:
            text = file.read()
        except UnicodeDecodeError as e:  # pragma: no cover
            log.debug(str(e), exc_info=True)
            raise click.FileError(
                file.name,
                "<file> must be encoded using '%s'." %
                sys_encoding())

    # TTS
    try:
        tts = NaverTTS(
            text=text,
            lang=lang,
            speed=speed,
            tld=tld,
            lang_check=not nocheck)
        tts.write_to_fp(output)
    except (ValueError, AssertionError) as e:
        raise click.UsageError(str(e))
    except NaverTTSError as e:
        raise click.ClickException(str(e))
Пример #21
0
def load_plate(plate_path):
	"""Loads a plate template from a CSV file.

	Parameters
	----------
	plate_path : str
		full or relative path to the plate template file.

	Returns
	-------
	pandas.DataFrame
		the plate template in tidy data format (see :py:mod:`.ioutils`).

	See also
	--------
	find_plate_file
	"""	
	try:
		plate = pd.read_csv(plate_path)
	except IOError as e:
		ioerror_to_click_exception(e)
	except pd.parser.CParserError as e:
		raise click.FileError(plate_path, hint="parser error, probably not a CSV file, {0}".format(e.args[0]))
	return plate
Пример #22
0
def get_filepaths(
    paths: Union[List, str],
    exclusion_regexes: Set[re.Pattern],
    recursive: bool,
    ignore_git: bool,
) -> Set[str]:
    """
    Retrieve the filepaths from the command.

    :param paths: List of file/dir paths from the command
    :param recursive: Recursive option
    :param ignore_git: Ignore that the folder is a git repository
    :raise: click.FileError if directory is given without --recursive option
    """
    targets = set()
    for path in paths:
        if os.path.isfile(path):
            targets.add(path)
        elif os.path.isdir(path):
            if not recursive:
                raise click.FileError(click.format_filename(path),
                                      "Use --recursive to scan directories.")
            top_dir = Path(path)

            if not ignore_git and is_git_dir(path):
                _targets = {
                    os.path.join(path, target)
                    for target in git_ls(path)
                }
            else:
                _targets = {str(target) for target in top_dir.rglob(r"*")}

            for file_path in _targets:
                if not is_filepath_excluded(file_path, exclusion_regexes):
                    targets.add(file_path)
    return targets
Пример #23
0
def main(filename):
	try:
		click.echo(myzcat.gzip_reader.read_gzip_file(filename))
	except OSError as e:
		raise click.FileError(filename, str(e))
Пример #24
0
def g_bundles(use_recommended, print_ebv, datasource,
              model, amount, binning, output):
    """Generates AMOUNT bundles based on MODEL and BINNING.
    The bundles are written to OUTPUT.yaml and OUTPUT.csv.

    MODEL has to be a previously generated model file.

    AMOUNT is an integer that is the amount of bundles that should be
    generated. When --use-recommended is provided, AMOUNT functions as an upper
    limit if set to a value >0.

    BINNING can be a path to a previously created binning, or custom bin edges
    in all dimension: dimensions are separated by colons, edge values in
    each dimension are separated by commas.
    """
    # load model
    try:
        model = Model.from_file(model)
    except Exception:
        # raise this if file exists but does not contan a model
        raise click.FileError(model, 'malformed model file.')

    # check dimensions match for binning and model
    if binning.dimensions != len(model.column_names):
        raise click.UsageError("Dimensions of binning (%d) and model (%d) mismatch."
                               % (binning.dimensions, len(model.domain)))

    # if ebv and recommended amount are requested, real datasource is required
    # load datasource and histogram it using the desired binning
    if use_recommended or print_ebv:
        if datasource is None:
            raise click.UsageError("Datasource required for --use-recommended and --print-ebv options.")
        try:
            source = DataSourceIO.read(datasource)
        except:
            raise click.FileError(datasource, "does not exist or is not readable.")
        real_histogram = source.get_histogram(binning)

    # option to generate uniform prob instead of using model

    # create DatasetGenerator
    bg = BundleGenerator(model, binning)

    # create dict for additional information
    addi = {}

    # use recommended amount
    if use_recommended:
        recommended = bg.recommended_amount(real_histogram)
        if amount <= 0:
            amount = recommended
        else:
            amount = min(amount, recommended)
        addi["recommended"] = recommended
        click.echo("Using recommended amount: %d" % amount)

    ebv = bg.expected_best_quality(amount, real_histogram)
    addi["ebv"] = np.float(ebv)
    addi["amount"] = amount

    # print ebv
    if print_ebv:
        click.echo("Expected best quality: %f" % ebv)

    # generate bundles and save to OUTPUT
    bundles = bg.generate(amount)
    DataSourceIO.write(bundles,
                       output,
                       additional_info={"metadata_from_bundle_generation": addi})
Пример #25
0
def launch(data, verbose, debug, open_browser, port, host, embedding,
           obs_names, var_names, max_category_items, diffexp_lfc_cutoff, title,
           scripts, about, experimental_label_file, backed, disable_diffexp):
    """Launch the cellxgene data viewer.
    This web app lets you explore single-cell expression data.
    Data must be in a format that cellxgene expects, read the
    "getting started" guide.

    Examples:

    > cellxgene launch example_dataset/pbmc3k.h5ad --title pbmc3k

    > cellxgene launch <your data file> --title <your title>

    > cellxgene launch <url>"""

    e_args = parse_engine_args(embedding, obs_names, var_names,
                               max_category_items, diffexp_lfc_cutoff,
                               experimental_label_file, backed,
                               disable_diffexp)
    try:
        data_locator = DataLocator(data)
    except RuntimeError as re:
        raise click.ClickException(
            f"Unable to access data at {data}.  {str(re)}")

    # Startup message
    click.echo("[cellxgene] Starting the CLI...")

    # Argument checking
    if data_locator.islocal():
        # if data locator is local, apply file system conventions and other "cheap"
        # validation checks.  If a URI, defer until we actually fetch the data and
        # try to read it.  Many of these tests don't make sense for URIs (eg, extension-
        # based typing).
        if not data_locator.exists():
            raise click.FileError(data, hint="file does not exist")
        if not data_locator.isfile():
            raise click.FileError(data, hint="data is not a file")
        name, extension = splitext(data)
        if extension != ".h5ad":
            raise click.FileError(basename(data),
                                  hint="file type must be .h5ad")

    if debug:
        verbose = True
        open_browser = False
    else:
        warnings.formatwarning = custom_format_warning

    if not verbose:
        sys.tracebacklimit = 0

    if scripts:
        click.echo(r"""
    / / /\ \ \__ _ _ __ _ __ (_)_ __   __ _
    \ \/  \/ / _` | '__| '_ \| | '_ \ / _` |
     \  /\  / (_| | |  | | | | | | | | (_| |
      \/  \/ \__,_|_|  |_| |_|_|_| |_|\__, |
                                      |___/
    The --scripts flag is intended for developers to include google analytics etc. You could be opening yourself to a
    security risk by including the --scripts flag. Make sure you trust the scripts that you are including.
            """)
        scripts_pretty = ", ".join(scripts)
        click.confirm(
            f"Are you sure you want to inject these scripts: {scripts_pretty}?",
            abort=True)

    if not title:
        file_parts = splitext(basename(data))
        title = file_parts[0]

    if port:
        if debug:
            raise click.ClickException(
                "--port and --debug may not be used together (try --verbose for error logging)."
            )
        if not is_port_available(host, int(port)):
            raise click.ClickException(
                f"The port selected {port} is in use, please specify an open port using the --port flag."
            )
    else:
        port = find_available_port(host)

    if experimental_label_file:
        lf_name, lf_ext = splitext(experimental_label_file)
        if lf_ext and lf_ext != ".csv":
            raise click.FileError(basename(experimental_label_file),
                                  hint="label file type must be .csv")

    if about:

        def url_check(url):
            try:
                result = urlparse(url)
                if all([result.scheme, result.netloc]):
                    return True
                else:
                    return False
            except ValueError:
                return False

        if not url_check(about):
            raise click.ClickException(
                "Must provide an absolute URL for --about. (Example format: http://example.com)"
            )

    # Setup app
    cellxgene_url = f"http://{host}:{port}"

    # Import Flask app
    server = Server()

    server.create_app()
    server.app.config.update(SCRIPTS=scripts)

    if not verbose:
        log = logging.getLogger("werkzeug")
        log.setLevel(logging.ERROR)

    file_size = data_locator.size() if data_locator.islocal() else 0

    # if a big file, let the user know it may take a while to load.
    if file_size > BIG_FILE_SIZE_THRESHOLD:
        click.echo(
            f"[cellxgene] Loading data from {basename(data)}, this may take a while..."
        )
    else:
        click.echo(f"[cellxgene] Loading data from {basename(data)}.")

    from server.app.scanpy_engine.scanpy_engine import ScanpyEngine

    try:
        server.attach_data(ScanpyEngine(data_locator, e_args),
                           title=title,
                           about=about)
    except ScanpyFileError as e:
        raise click.ClickException(f"{e}")

    if not disable_diffexp and server.app.data.config['diffexp_may_be_slow']:
        click.echo(f"[cellxgene] CAUTION: due to the size of your dataset, "
                   f"running differential expression may take longer or fail.")

    if open_browser:
        click.echo(
            f"[cellxgene] Launching! Opening your browser to {cellxgene_url} now."
        )
        webbrowser.open(cellxgene_url)
    else:
        click.echo(
            f"[cellxgene] Launching! Please go to {cellxgene_url} in your browser."
        )

    click.echo("[cellxgene] Type CTRL-C at any time to exit.")

    if not verbose:
        f = open(devnull, "w")
        sys.stdout = f

    try:
        server.app.run(host=host,
                       debug=debug,
                       port=port,
                       threaded=False if debug else True,
                       use_debugger=False)
    except OSError as e:
        if e.errno == errno.EADDRINUSE:
            raise click.ClickException(
                "Port is in use, please specify an open port using the --port flag."
            ) from e
        raise
Пример #26
0
def list(ctx, tt, web, start_, end_, week, month, day, num, tags, output):
    """
    Display applications being tracked.

    Add web to show web sites being tracked.

    Example:

    \b
    $ tm list

        2016 Sep 05 - 2016 Sep 06\n
        ─────────────────────────────\n
        Google Chrome     32m 09s  ████████████████████ 66.6%\n
        终端               08m 28s  █████ 17.5%


    $ tm list web

        2016 Sep 05 - 2016 Sep 06\n
        ─────────────────────────────\n
        https://github.com/       06m 12s  ██████ 19.2%\n
        https://www.v2ex.com/     06m 01s  ██████ 18.6%

    $ tm list web -w -n 100 -O trackdata.json

    Successfully written to /position/to/trackdata.json

    """
    if not trackmac.utils.has_set_up():
        click.echo(
            trackmac.utils.style(
                'error',
                'Could not find db or plist file.Run `tm setup` first.\n'))
        ctx.abort()

    if tt.is_not_running:
        click.echo(
            trackmac.utils.style(
                'error',
                'Warning:Trackmac daemon not running.Run `tm start` first.\n'))

    start_ = datetime.strptime(start_, "%Y-%m-%d").date()
    for start_date in (_ for _ in [day, week, month] if _ is not None):
        start_ = start_date
    end = datetime.strptime(end_, "%Y-%m-%d").date() + timedelta(days=1)
    if start_ > end:
        raise click.ClickException("'from' must be anterior to 'to'")
    if tags:
        records = tt.report(start_, end, 'tag_name')
        name = 'tag_name'
        others = sum(
            records.pop(i)['duration'] for (i, r) in enumerate(records)
            if not r[name])
        if others:
            records.append({'tag_name': 'Others', 'duration': others})
    elif web and web.lower() == 'web':
        records = tt.web_report(start_, end)
        name = 'domain'
    elif web is None:
        records = tt.report(start_, end, 'app_name')
        name = 'app_name'
    else:
        raise click.UsageError(
            'Use `web` to display web browsing statistics', )
    if not records:
        click.echo(
            trackmac.utils.style(
                'time', 'No data being collected.Please wait for a moment.'))
        return
    records = sorted(records, key=lambda x: x['duration'], reverse=True)[:num]
    max_len = max(
        len(rec[name].encode("utf8")) for rec in records if rec[name])
    # output to file
    if output:
        try:
            with open(output, 'w') as f:
                json.dump(records, f)
        except IOError:
            raise click.FileError(output, hint='IOError')
        else:
            click.echo(
                trackmac.utils.style(
                    'time', 'Successfully written to {}'.format(output)))
    else:
        click.echo(
            trackmac.utils.style(
                'date', "\t" + trackmac.utils.fill_text_to_print_width(
                    start_.strftime("%Y %b %d") + " - " +
                    end.strftime("%Y %b %d"), max_len + 22)))
        click.echo(
            trackmac.utils.style(
                'date', "\t" + trackmac.utils.fill_text_to_print_width(
                    u"─" * 29, max_len + 24)))
        total_time = sum(r['duration'] for r in records)
        for rec in records:
            click.echo(u"\t{project} {time} {percentage}".format(
                time=trackmac.utils.style(
                    'time', '{:>11}'.format(
                        trackmac.utils.format_timedelta(rec['duration']))),
                project=trackmac.utils.style(
                    'project',
                    trackmac.utils.fill_text_to_print_width(
                        (rec[name] or 'Others'), max_len)),
                percentage=trackmac.utils.style(
                    'tag',
                    trackmac.utils.get_progress(rec['duration'], total_time))))
Пример #27
0
def _validate_load_file(ctx, param, value):
    load_file = pathlib.Path(value).expanduser()
    if not load_file.is_file():
        raise click.FileError(str(load_file),
                              'Please run "cibopath update" first.')
    return load_file
Пример #28
0
def launch(data, verbose, debug, open_browser, port, host, layout, obs_names,
           var_names, max_category_items, diffexp_lfc_cutoff, title, scripts):
    """Launch the cellxgene data viewer.
    This web app lets you explore single-cell expression data.
    Data must be in a format that cellxgene expects, read the
    "getting started" guide.

    Examples:

    > cellxgene launch example_dataset/pbmc3k.h5ad --title pbmc3k

    > cellxgene launch <your data file> --title <your title>"""

    e_args = parse_engine_args(layout, obs_names, var_names,
                               max_category_items, diffexp_lfc_cutoff)
    # Startup message
    click.echo("[cellxgene] Starting the CLI...")

    # Argument checking
    name, extension = splitext(data)
    if extension != ".h5ad":
        raise click.FileError(basename(data), hint="file type must be .h5ad")

    if debug:
        verbose = True
        open_browser = False
    else:
        warnings.formatwarning = custom_format_warning

    if not verbose:
        sys.tracebacklimit = 0

    if scripts:
        click.echo(r"""
    / / /\ \ \__ _ _ __ _ __ (_)_ __   __ _
    \ \/  \/ / _` | '__| '_ \| | '_ \ / _` |
     \  /\  / (_| | |  | | | | | | | | (_| |
      \/  \/ \__,_|_|  |_| |_|_|_| |_|\__, |
                                      |___/
    The --scripts flag is intended for developers to include google analytics etc. You could be opening yourself to a
    security risk by including the --scripts flag. Make sure you trust the scripts that you are including.
            """)
        scripts_pretty = ", ".join(scripts)
        click.confirm(
            f"Are you sure you want to inject these scripts: {scripts_pretty}?",
            abort=True)

    if not title:
        file_parts = splitext(basename(data))
        title = file_parts[0]

    if port:
        if debug:
            raise click.ClickException(
                "--port and --debug may not be used together (try --verbose for error logging)."
            )
        if not is_port_available(host, int(port)):
            raise click.ClickException(
                f"The port selected {port} is in use, please specify an open port using the --port flag."
            )
    else:
        port = find_available_port(host)

    # Setup app
    cellxgene_url = f"http://{host}:{port}"

    # Import Flask app
    server = Server()

    server.create_app()
    server.app.config.update(SCRIPTS=scripts)

    if not verbose:
        log = logging.getLogger("werkzeug")
        log.setLevel(logging.ERROR)

    file_size = getsize(data)

    # if a big file, let the user know it may take a while to load.
    if file_size > BIG_FILE_SIZE_THRESHOLD:
        click.echo(
            f"[cellxgene] Loading data from {basename(data)}, this may take awhile..."
        )
    else:
        click.echo(f"[cellxgene] Loading data from {basename(data)}.")

    # Fix for anaconda python. matplotlib typically expects python to be installed as a framework TKAgg is usually
    # available and fixes this issue. See https://matplotlib.org/faq/virtualenv_faq.html
    import matplotlib as mpl

    mpl.use("TkAgg")
    from server.app.scanpy_engine.scanpy_engine import ScanpyEngine

    try:
        server.attach_data(ScanpyEngine(data, e_args), title=title)
    except ScanpyFileError as e:
        raise click.ClickException(f"{e}")

    if open_browser:
        click.echo(
            f"[cellxgene] Launching! Opening your browser to {cellxgene_url} now."
        )
        webbrowser.open(cellxgene_url)
    else:
        click.echo(
            f"[cellxgene] Launching! Please go to {cellxgene_url} in your browser."
        )

    click.echo("[cellxgene] Type CTRL-C at any time to exit.")

    if not verbose:
        f = open(devnull, "w")
        sys.stdout = f

    try:
        server.app.run(host=host,
                       debug=debug,
                       port=port,
                       threaded=True,
                       use_debugger=False)
    except OSError as e:
        if e.errno == errno.EADDRINUSE:
            raise click.ClickException(
                "Port is in use, please specify an open port using the --port flag."
            ) from e
        raise
Пример #29
0
def cli(ctx, inventory, limit, user, key_filename, verbose):

    if inventory.rstrip().endswith(","):
        hostlist = inventory.split(",")[:-1]
        if limit is not None:
            hostlist = list(set(hostlist) & set(limit))
        hostnames = {hostname: hostname for hostname in hostlist}

    else:
        host_path = Path(inventory)
        if not host_path.exists():
            raise click.FileError(inventory)

        with open(host_path, "r") as stream:
            try:
                inventory_data = yaml.safe_load(stream)
            except yaml.YAMLError:
                raise click.UsageError(
                    f"Couldn't read inventory file {host_path}")

        hostlist = list()
        hostnames = dict()
        for hostname, hostvars in inventory_data["sheep"]["hosts"].items():
            if limit is not None:
                if not hostname in limit.split(","):
                    continue

            if "ansible_host" in hostvars.keys():
                hostlist.append(hostvars["ansible_host"])
                hostnames[hostvars["ansible_host"]] = hostname
            else:
                hostlist.append(hostname)
                hostnames[hostname] = hostname

        if user is None:
            try:
                user = inventory_data["sheep"]["vars"]["ansible_user"]
            except KeyError:
                pass

    if user is None:
        raise click.UsageError(
            "Provide user by command line or in inventory file")

    if verbose == 0:
        logger.setLevel(logging.ERROR)
    elif verbose == 1:
        logger.setLevel(logging.WARNING)
    elif verbose == 2:
        logger.setLevel(logging.INFO)
    elif verbose > 2:
        logger.setLevel(logging.DEBUG)

    ctx.obj["verbose"] = verbose

    connect_kwargs = dict()
    if key_filename is not None:
        connect_kwargs["key_filename"] = key_filename

    ctx.obj["fab group"] = Group(*hostlist,
                                 user=user,
                                 connect_kwargs=connect_kwargs)
    ctx.obj["hostnames"] = hostnames
Пример #30
0
def launch(
    data,
    layout,
    diffexp,
    title,
    verbose,
    debug,
    obs_names,
    var_names,
    open_browser,
    port,
    host,
    max_category_items,
    diffexp_lfc_cutoff,
):
    """Launch the cellxgene data viewer.
    This web app lets you explore single-cell expression data.
    Data must be in a format that cellxgene expects, read the
    "getting started" guide.

    Examples:

    > cellxgene launch example_dataset/pbmc3k.h5ad --title pbmc3k

    > cellxgene launch <your data file> --title <your title>"""

    # Startup message
    click.echo("[cellxgene] Starting the CLI...")

    # Argument checking
    name, extension = splitext(data)
    if extension != ".h5ad":
        raise click.FileError(basename(data), hint="file type must be .h5ad")

    if debug:
        verbose = True
        open_browser = False
    else:
        warnings.formatwarning = custom_format_warning

    if not verbose:
        sys.tracebacklimit = 0

    if not title:
        file_parts = splitext(basename(data))
        title = file_parts[0]

    # Setup app
    cellxgene_url = f"http://{host}:{port}"

    # Import Flask app
    from server.app.app import app

    app.config.update(DATASET_TITLE=title)

    if not verbose:
        log = logging.getLogger("werkzeug")
        log.setLevel(logging.ERROR)

    click.echo(
        f"[cellxgene] Loading data from {basename(data)}, this may take awhile..."
    )

    # Fix for anaconda python. matplotlib typically expects python to be installed as a framework TKAgg is usually
    # available and fixes this issue. See https://matplotlib.org/faq/virtualenv_faq.html
    import matplotlib as mpl

    mpl.use("TkAgg")
    from server.app.scanpy_engine.scanpy_engine import ScanpyEngine

    args = {
        "layout": layout,
        "diffexp": diffexp,
        "max_category_items": max_category_items,
        "diffexp_lfc_cutoff": diffexp_lfc_cutoff,
        "obs_names": obs_names,
        "var_names": var_names,
    }

    try:
        app.data = ScanpyEngine(data, args)
    except ScanpyFileError as e:
        raise click.ClickException(f"{e}")

    if open_browser:
        click.echo(
            f"[cellxgene] Launching! Opening your browser to {cellxgene_url} now."
        )
        webbrowser.open(cellxgene_url)
    else:
        click.echo(
            f"[cellxgene] Launching! Please go to {cellxgene_url} in your browser."
        )

    click.echo("[cellxgene] Type CTRL-C at any time to exit.")

    if not verbose:
        f = open(devnull, "w")
        sys.stdout = f

    app.run(host=host, debug=debug, port=port, threaded=True)