Ejemplo n.º 1
0
 def test():
     i = click.get_binary_stream('stdin')
     o = click.get_binary_stream('stdout')
     while 1:
         chunk = i.read(4096)
         if not chunk:
             break
         o.write(chunk)
         o.flush()
Ejemplo n.º 2
0
def build(recipe, python):
    click.echo('Building {} recipe...'.format(recipe))

    cmd = conda['build', recipe, '--channel', 'conda-forge', '--python',
                python]

    cmd(
        stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'),
    )
Ejemplo n.º 3
0
def build(recipe):
    click.echo('\nBuilding {} recipe...'.format(recipe))

    python_version = '.'.join(map(str, sys.version_info[:3]))

    cmd = conda['build', recipe, '--channel', 'conda-forge', '--python',
                python_version]

    cmd(stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'))
Ejemplo n.º 4
0
def clone(repo_uri, destination):
    if Path(destination).exists():
        return

    cmd = git['clone', repo_uri, destination]

    cmd(
        stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'),
    )
Ejemplo n.º 5
0
def build(recipe):
    click.echo('Building {} recipe...'.format(recipe))

    python_version = '{}.{}'.format(sys.version_info.major,
                                    sys.version_info.minor)

    cmd = conda['build', recipe, '--channel', 'conda-forge', '--python',
                python_version]

    cmd(stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'))
Ejemplo n.º 6
0
def build(recipe, python):
    click.echo('Building {} recipe...'.format(recipe))

    cmd = conda[
        'build', recipe, '--channel', 'conda-forge', '--python', python
    ]

    cmd(
        stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'),
    )
Ejemplo n.º 7
0
def deploy(package_location, artifact_directory, architectures):
    artifact_dir = Path(artifact_directory)
    artifact_dir.mkdir(parents=True, exist_ok=True)
    package_loc = Path(package_location)
    assert package_loc.exists(), 'Path {} does not exist'.format(package_loc)

    for architecture in architectures:
        arch_artifact_directory = str(artifact_dir / architecture)
        arch_package_directory = str(package_loc / architecture)
        shutil.copytree(arch_package_directory, arch_artifact_directory)
    cmd = conda['index', artifact_directory]
    cmd(stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'))
Ejemplo n.º 8
0
def clone(repo_uri, destination, branch):
    if not Path(destination).exists():
        cmd = git['clone', repo_uri, destination]
        cmd(
            stdout=click.get_binary_stream('stdout'),
            stderr=click.get_binary_stream('stderr'),
        )

    cmd = git['-C', destination, 'checkout', branch]
    cmd(
        stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'),
    )
Ejemplo n.º 9
0
def trace_simulation(ctx):
    sim_id = ctx.obj["simulation"]

    click.echo('Tracing device {}, CTRL-C to exit trace'.format(sim_id))

    trace_script = "{script_dir}/{rel_path}/trace.sh".format(
        script_dir=os.path.dirname(os.path.realpath(__file__)),
        rel_path=DEFAULT_DEVICE_PATH)

    subprocess.run([trace_script, str(sim_id)],
                   stdout=click.get_binary_stream('stdout'),
                   stdin=click.get_binary_stream('stdin'),
                   stderr=click.get_binary_stream('stderr'))
Ejemplo n.º 10
0
def deploy(package_location, artifact_directory, architecture):
    artifact_dir = Path(artifact_directory)
    artifact_dir.mkdir(parents=True, exist_ok=True)
    package_loc = Path(package_location)
    assert package_loc.exists(), 'Path {} does not exist'.format(package_loc)

    for architecture in (architecture, 'noarch'):
        arch_artifact_directory = str(artifact_dir / architecture)
        arch_package_directory = str(package_loc / architecture)
        shutil.copytree(arch_package_directory, arch_artifact_directory)
    cmd = conda['index', artifact_directory]
    cmd(
        stdout=click.get_binary_stream('stdout'),
        stderr=click.get_binary_stream('stderr'),
    )
Ejemplo n.º 11
0
def uninstall(name: str) -> None:
    try:
        from plumbum.cmd import pip

        ((pip["uninstall", name, "-y"]) > click.get_binary_stream('stdout'))()
    except ImportError:
        raise PluginException(f'You do not have "pip" installed.')
Ejemplo n.º 12
0
def tts_cli(text, file, output, slow, tld, lang, nocheck):
    """ Read <text> to mp3 format using Google Translate's Text-to-Speech API
    (set <text> or --file <file> to - for standard input)
    """

    # stdin for <text>
    if text == '-':
        text = click.get_text_stream('stdin').read()

    # stdout (when no <output>)
    if not output:
        output = click.get_binary_stream('stdout')

    # <file> input (stdin on '-' is handled by click.File)
    if file:
        try:
            text = file.read()
        except UnicodeDecodeError as e:  # pragma: no cover
            log.debug(str(e), exc_info=True)
            raise click.FileError(
                file.name,
                "<file> must be encoded using '%s'." % sys_encoding())

    # TTS
    try:
        tts = gTTS(text=text,
                   lang=lang,
                   slow=slow,
                   tld=tld,
                   lang_check=not nocheck)
        tts.write_to_fp(output)
    except (ValueError, AssertionError) as e:
        raise click.UsageError(str(e))
    except gTTSError as e:
        raise click.ClickException(str(e))
Ejemplo n.º 13
0
def main(click_context, hashmes, s, v, c):
    """
    If there is a file at hashme, read and omnihash that file.
    Elif hashme is a string, omnihash that.
    """

    # Print version and quit
    if v:
        import pkg_resources
        version = pkg_resources.require("omnihash")[0].version
        click.echo(version)
        return

    _init_plugins()

    if not hashmes:
        # If no stdin, just help and quit.
        if not sys.stdin.isatty():
            digesters = make_digesters(c)
            stdin = click.get_binary_stream('stdin')
            bytechunks = iter(lambda: stdin.read(io.DEFAULT_BUFFER_SIZE), b'')
            click.echo("Hashing standard input..")
            produce_hashes(bytechunks, digesters)
        else:
            print(click_context.get_help())
            return
    else:
        for hashme in hashmes:
            digesters = make_digesters(c)
            bytechunks = iterate_bytechunks(hashme, s)
            if bytechunks:
                produce_hashes(bytechunks, digesters)
Ejemplo n.º 14
0
def main(delivery_stream, region, print_record_id):
    """Redirects stdin to Amazon Kinesis Firehose.

    Records will be written to DELIVERY_STREAM. Data should be
    separated by a newline character. Each line will be sent as a
    separate record, so keep in mind that Kinesis Firehose will round
    up each record to the next 5 KB in size.

    The application submits each record one at a time, which limits the
    throughput to roughly ~15 records per second.

    The application submits each record one at a time, which limits the
    throughput. A future version might batch records together for
    performance, but until then beware of using high throughput streams
    (15 records per second or more). Running from within EC2 will allow
    higher throughput.
    """
    client = _get_firehose_client(region_name=region)
    for line in click.get_binary_stream('stdin'):
        response = client.put_record(
            DeliveryStreamName=delivery_stream,
            Record={'Data': line},
        )
        if print_record_id:
            click.echo(message=response['RecordId'])
Ejemplo n.º 15
0
def run_cli(host, port, user, password, database, settings, query, format,
            format_stdin, multiline, stacktrace, version, files):
    """
    A third-party client for the ClickHouse DBMS.
    """
    if version:
        return show_version()

    if password:
        password = click.prompt("Password",
                                hide_input=True,
                                show_default=False,
                                type=str)

    data_input = ()

    # Read from STDIN if non-interactive mode
    stdin = click.get_binary_stream('stdin')
    if not stdin.isatty():
        data_input += (stdin, )

    # Read the given file
    if files:
        data_input += files

    # TODO: Rename the CLI's instance into something more feasible
    cli = CLI(host, port, user, password, database, settings, format,
              format_stdin, multiline, stacktrace)
    cli.run(query, data_input)
def do_read(
    installer: typing.BinaryIO,
    path: str,
    *,
    output_file: typing.Union[str, bytes, os.PathLike],
    unzip: bool,
    name_encoding: str,
    offset: typing.Optional[int],
) -> None:
    """Read the contents of a single file stored in an installer."""

    parsed = parse_sfx(installer, offset=offset)

    encoded_path = path.encode(name_encoding)
    for file in parsed.files:
        if decrypt_path(file.path_encrypted) == encoded_path:
            if os.fspath(output_file) in {"-", b"-"}:
                timestamp = do_read_internal(file,
                                             path,
                                             click.get_binary_stream("stdout"),
                                             None,
                                             unzip=unzip)
            else:
                with open(output_file, "wb") as fout:
                    timestamp = do_read_internal(file, path, fout, unzip=unzip)
                if timestamp is not None:
                    os.utime(output_file, (timestamp, timestamp))

            if timestamp is None:
                sys.exit(1)

            break
    else:
        print(f"Could not find a file with path {path!r}", file=sys.stderr)
        sys.exit(1)
Ejemplo n.º 17
0
def path_writer(path: PathLike) -> Generator[BinaryIO, None, None]:
    """Open a file for writing binary content, or use stdout."""
    if str(path) == "-":
        yield click.get_binary_stream("stdout")
        return
    with open(path, "wb") as fh:
        yield fh
Ejemplo n.º 18
0
def export_identified_glycans_from_glycopeptides(database_connection,
                                                 analysis_identifier,
                                                 output_path):
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    glycans = session.query(GlycanComposition).join(
        GlycanCombinationGlycanComposition).join(GlycanCombination).join(
            Glycopeptide,
            Glycopeptide.glycan_combination_id == GlycanCombination.id).join(
                IdentifiedGlycopeptide,
                IdentifiedGlycopeptide.structure_id == Glycopeptide.id).filter(
                    IdentifiedGlycopeptide.analysis_id == analysis.id).all()
    if output_path is None:
        output_stream = ctxstream(click.get_binary_stream('stdout'))
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        job = ImportableGlycanHypothesisCSVSerializer(output_stream, glycans)
        job.run()
Ejemplo n.º 19
0
def main(delivery_stream, region, print_record_id):
    """Redirects stdin to Amazon Kinesis Firehose.

    Records will be written to DELIVERY_STREAM. Data should be
    separated by a newline character. Each line will be sent as a
    separate record, so keep in mind that Kinesis Firehose will round
    up each record to the next 5 KB in size.

    The application submits each record one at a time, which limits the
    throughput to roughly ~15 records per second.

    The application submits each record one at a time, which limits the
    throughput. A future version might batch records together for
    performance, but until then beware of using high throughput streams
    (15 records per second or more). Running from within EC2 will allow
    higher throughput.
    """
    client = _get_firehose_client(region_name=region)
    for line in click.get_binary_stream('stdin'):
        response = client.put_record(
            DeliveryStreamName=delivery_stream,
            Record={'Data': line},
        )
        if print_record_id:
            click.echo(message=response['RecordId'])
Ejemplo n.º 20
0
def mkexcel(output, input, sort_, row, type_):
    if output == sys.stdout and type_ == "xls":
        output = click.get_binary_stream('stdout')

    dumpexcel(input, output, type_, read_row=row, sort_type=sort_)

    input.close()
    output.close()
Ejemplo n.º 21
0
def do_read(
    archive: typing.BinaryIO,
    path: str,
    *,
    name_encoding: str,
    output_file: typing.Union[str, bytes, os.PathLike],
) -> None:
    """Read the data of an archive member."""

    # If the path doesn't contain any backslashes,
    # an empty string is returned for dir_path,
    # which happens to be the right value we need in that case.
    dir_path, _, file_name = path.rpartition("\\")
    encoded_dir_path = dir_path.encode(name_encoding)
    encoded_file_name = file_name.encode(name_encoding)

    parsed = InstallShield3Z.from_io(archive)

    for dir_index, dir in enumerate(parsed.toc_directories):
        if dir.path == encoded_dir_path:
            break
    else:
        if dir_path:
            print(f"Error: Directory {dir_path!r} not found in archive.",
                  file=sys.stderr)
        else:
            print("Error: No top-level directory found in archive.",
                  file=sys.stderr)
            print(
                "Note: To read a file from a subdirectory, use a full path including a directory name.",
                file=sys.stderr)
        if "/" in path:
            print("Note: Use \\ instead of / as the directory separator.",
                  file=sys.stderr)
        sys.exit(1)

    for file in parsed.toc_files:
        if file.directory_index == dir_index and file.name == encoded_file_name:
            if os.fspath(output_file) in {"-", b"-"}:
                extract_file_data(file, click.get_binary_stream("stdout"))
            else:
                with open(output_file, "wb") as fout:
                    extract_file_data(file, fout)
                restore_file_metadata(file, output_file)

            break
    else:
        if dir_path:
            print(
                f"Error: File {file_name!r} not found in directory {dir_path!r}.",
                file=sys.stderr)
        else:
            print(
                f"Error: File {file_name!r} not found in top-level directory.",
                file=sys.stderr)
            print(
                "Note: To read a file from a subdirectory, use a full path including a directory name.",
                file=sys.stderr)
Ejemplo n.º 22
0
def clean():
    if os.path.exists('.artifactdir'):
        with open('.artifactdir', mode='rt') as f:
            artifact_directory = f.read().strip()

        build_artifacts = os.path.join(artifact_directory, 'build_artifacts')

        if glob.glob(os.path.join(build_artifacts, '*')):
            try:
                sh.docker.run(
                    '-a',
                    'stdin',
                    '-a',
                    'stdout',
                    '-a',
                    'stderr',
                    '-v',
                    '{}:/build_artifacts'.format(build_artifacts),
                    '-i',
                    '--rm',
                    '-e',
                    'HOST_USER_ID={:d}'.format(os.getuid()),
                    'condaforge/linux-anvil',
                    'bash',
                    _in='rm -rf /build_artifacts/*',
                )
            except sh.ErrorReturnCode as e:
                click.get_binary_stream('stderr').write(e.stderr)

    files = [path for path in os.listdir(path='.')]

    if files:
        with thread_pool(files) as executor:
            futures = [
                executor.submit(
                    functools.partial(shutil.rmtree, ignore_errors=True)
                    if os.path.isdir(path) else os.remove, path)
                for path in files
            ]

            for future in concurrent.futures.as_completed(futures):
                try:
                    future.result()
                except Exception as e:
                    click.ClickException(str(e))
Ejemplo n.º 23
0
def public(ctx):
    """Generate an (RSA) GPG key and export the public key in ASCII.

    This command relies on pem2openpgp and gpg.
    """
    gpg_key = _get_gpg_key(_get_pem(ctx().source), ctx().user, ctx().verbose)
    _run_gpg_with_key(
        gpg_key, ['--armor', '--export', ctx().user], None,
        click.get_binary_stream('stdin'), ctx().verbose)
Ejemplo n.º 24
0
def install(url: str) -> None:
    try:
        from plumbum.cmd import pip

        ((pip["install", url]) > click.get_binary_stream('stdout'))()
    except ImportError:
        raise PluginException(f'You do not have "pip" installed.')
    except ProcessExecutionError as e:
        raise PluginException(f'Unable to install plugin: "{e.stderr}".')
Ejemplo n.º 25
0
def download(base_url, directory, name):
    directory = Path(directory)
    if not directory.exists():
        directory.mkdir()

    data_url = '{}/{}'.format(base_url, name)
    path = directory / name

    if not path.exists():
        download = curl[data_url, '-o', path, '-L']
        download(stdout=click.get_binary_stream('stdout'),
                 stderr=click.get_binary_stream('stderr'))
    else:
        logger.info('Skipping download: %s already exists', name)

    logger.info('Extracting archive to %s', directory)
    if path.suffix in ('.tar', '.gz', '.bz2', '.xz'):
        with tarfile.open(str(path), mode='r|gz') as f:
            f.extractall(path=str(directory))
Ejemplo n.º 26
0
def stop(ctx):
    """Will stop the dockerized device

    """
    if (ctx.obj["port"]):
        raise click.ClickException(
            "Only possible for dockerized simulated device (--simulation)")
    sim_id = ctx.obj["simulation"]

    click.echo('Try to stop dockerized device with id "{}"'.format(sim_id))

    stop_script = "{script_dir}/{rel_path}/stop.sh".format(
        script_dir=os.path.dirname(os.path.realpath(__file__)),
        rel_path=DEFAULT_DEVICE_PATH)

    subprocess.run([stop_script, str(sim_id)],
                   stdout=click.get_binary_stream('stdout'),
                   stdin=click.get_binary_stream('stdin'),
                   stderr=click.get_binary_stream('stderr'))
Ejemplo n.º 27
0
def cli(ctx):
    if not sys.stdin.isatty():
        stdin = click.get_binary_stream('stdin')
        bytechunks = iter(lambda: stdin.read(io.DEFAULT_BUFFER_SIZE), b'')

        while True:
            try:
                ctx.subjects.append(next(bytechunks))
            except StopIteration:
                break
Ejemplo n.º 28
0
def extract_file_blob(database_connection, blob_identifier, output_path=None):
    if output_path is None:
        output_path = click.get_binary_stream('stdout')
    session = database_connection.session
    blob = get_by_name_or_id(session, FileBlob, blob_identifier)
    with blob.open() as fh:
        chunk_size = 2**16
        chunk = fh.read(chunk_size)
        while chunk:
            output_path.write(chunk)
            chunk = fh.read(chunk_size)
Ejemplo n.º 29
0
def start(ctx, dockerimage=DEFAULT_IMAGE):
    """
    Will start a dockerized device.
    The --simulation arg represents the Openthread ID / communication ID in simulated network
    """
    if (ctx.obj["port"]):
        raise click.ClickException(
            "Only possible for dockerized simulated device (--simulation)")
    sim_id = ctx.obj["simulation"]

    click.echo('Starting device with id "{}" and docker image "{}"'.format(
        sim_id, dockerimage))

    start_script = "{script_dir}/{rel_path}/start.sh".format(
        script_dir=os.path.dirname(os.path.realpath(__file__)),
        rel_path=DEFAULT_DEVICE_PATH)

    subprocess.run([start_script, str(sim_id), dockerimage],
                   stdout=click.get_binary_stream('stdout'),
                   stdin=click.get_binary_stream('stdin'))
Ejemplo n.º 30
0
def download(base_url, data, directory):
    if not data:
        data = 'ibis-testing-data.tar.gz',

    if not os.path.exists(directory):
        os.mkdir(directory)

    for piece in data:
        data_url = '{}/{}'.format(base_url, piece)
        path = os.path.join(directory, piece)

        curl(
            data_url, o=path, L=True,
            _out=click.get_binary_stream('stdout'),
            _err=click.get_binary_stream('stderr'),
        )

        if piece.endswith(('.tar', '.gz', '.bz2', '.xz')):
            with tarfile.open(path, mode='r|gz') as f:
                f.extractall(path=directory)
Ejemplo n.º 31
0
def extract_file_blob(database_connection, blob_identifier, output_path=None):
    if output_path is None:
        output_path = click.get_binary_stream('stdout')
    session = database_connection.session
    blob = get_by_name_or_id(session, FileBlob, blob_identifier)
    with blob.open() as fh:
        chunk_size = 2 ** 16
        chunk = fh.read(chunk_size)
        while chunk:
            output_path.write(chunk)
            chunk = fh.read(chunk_size)
Ejemplo n.º 32
0
    def spinner(self):
        def noop(y=0):
            return noop

        self._progress_line_length = 0
        size = get_terminal_size((20, 20)).columns - 2
        stream = click.get_binary_stream("stdout")
        isatty = stream.isatty() or os.environ.get("_TEST_HORST_", False)
        yield noop(0) if not isatty else self.signal_progress(0, size)
        # print("max len", size)
        self._clear_current_line(size) if isatty else noop()
Ejemplo n.º 33
0
def download(base_url, directory, name):
    if not os.path.exists(directory):
        os.mkdir(directory)

    data_url = '{}/{}'.format(base_url, name)
    path = os.path.join(directory, name)

    if not os.path.exists(path):
        curl(data_url,
             o=path,
             L=True,
             _out=click.get_binary_stream('stdout'),
             _err=click.get_binary_stream('stderr'))
    else:
        click.echo('Skipping download due to {} already exists.'.format(name))

    click.echo('Extracting archive to {} ...'.format(directory))
    if path.endswith(('.tar', '.gz', '.bz2', '.xz')):
        with tarfile.open(path, mode='r|gz') as f:
            f.extractall(path=directory)
Ejemplo n.º 34
0
def mkexcel(output, input, sort_, row, type_):
    if output == sys.stdout:
        output = click.get_binary_stream('stdout')

    klass = dumptool.DumpCSV
    if type_ == "xls":
        klass = dumptool.DumpXLS

    dump_excel(input, output, klass, read_row=row, sort_type=sort_)

    input.close()
    output.close()
Ejemplo n.º 35
0
def main(fileobj, fname, ftype):

    pretty = pprint.PrettyPrinter()

    if fileobj:
        fileobj = open(fileobj, 'rb')
        filesize = os.path.getsize(os.path.abspath(fileobj.name))
        if not fname:
            fname = fileobj.name
        stdin = False

    else:
        fileobj = click.get_binary_stream('stdin')
        if not fname:
            fname = ''
        stdin = True

    if not ftype:
        ftype, enc = guess_type(fname)
        if not ftype:
            ftype = 'application/octet-stream'

    offset = 0
    trans_id = ''
    while True:
        read_size = 1 * 1024 * 1024
        raw_data = fileobj.read(read_size)
        raw_data_size = len(raw_data)

        payload = base64.b64encode(raw_data)

        if stdin:
            if raw_data_size < read_size:
                filesize = offset + raw_data_size
            else:
                filesize = offset + raw_data_size + 1

        headers = {
            'content-range': ('bytes %d-%d/%d' % (offset, offset+raw_data_size-1, filesize)),
            'content-type': ftype,
            'content-filename': fname,
            }
        headers['Content-Length'] = filesize
        if not trans_id == '':
            headers['Transaction-ID'] = trans_id

        response = requests.post('http://localhost:5000/api/v1/items', data=payload, headers=headers, auth=('user','foo'))
        offset = offset + raw_data_size
        if response.headers['Transaction-ID']:
            trans_id = response.headers['Transaction-ID']

        if raw_data_size < read_size:
            break
Ejemplo n.º 36
0
def _process_outgoing(ctx):
    account_manager = get_account_manager(ctx)
    Parser = getattr(email.parser, "BytesParser", email.parser.Parser)
    msg = Parser().parse(click.get_binary_stream("stdin"))
    addr = mime.parse_email_addr(msg["From"])
    account = account_manager.get_account_from_emailadr(addr)
    if account is None:
        raise click.ClickException(
            "No Account associated for 'From: {}'".format(addr))
    else:
        r = account.process_outgoing(msg)
        dump_info_outgoing_result(r)
        return r.msg
Ejemplo n.º 37
0
def download(repo_url, directory):
    from shutil import rmtree

    from plumbum.cmd import curl

    directory = Path(directory)
    # download the master branch
    url = repo_url + '/archive/master.zip'
    # download the zip next to the target directory with the same name
    path = directory.with_suffix('.zip')

    if not path.exists():
        logger.info(f'Downloading {url} to {path}...')
        path.parent.mkdir(parents=True, exist_ok=True)
        download = curl[url, '-o', path, '-L']
        download(
            stdout=click.get_binary_stream('stdout'),
            stderr=click.get_binary_stream('stderr'),
        )
    else:
        logger.info(f'Skipping download: {path} already exists')

    logger.info(f'Extracting archive to {directory}')

    # extract all files
    extract_to = directory.with_name(directory.name + '_extracted')
    with zipfile.ZipFile(str(path), 'r') as f:
        f.extractall(str(extract_to))

    # remove existent folder
    if directory.exists():
        rmtree(str(directory))

    # rename to the target directory
    (extract_to / 'testing-data-master').rename(directory)

    # remove temporary extraction folder
    extract_to.rmdir()
Ejemplo n.º 38
0
def get(id):
    "Fetch a file's contents by id."
    from sentry.models import File

    try:
        file = File.objects.get(id=id)
    except File.DoesNotExist:
        raise click.ClickException('File %d does not exist.' % id)

    stdout = click.get_binary_stream('stdout')

    with file.getfile() as fp:
        for chunk in fp.chunks():
            stdout.write(chunk)
Ejemplo n.º 39
0
def get(id):
    """Fetch a file's contents by id."""
    from sentry.models import File

    try:
        file = File.objects.get(id=id)
    except File.DoesNotExist:
        raise click.ClickException("File %d does not exist." % id)

    stdout = click.get_binary_stream("stdout")

    with file.getfile() as fp:
        for chunk in fp.chunks():
            stdout.write(chunk)
Ejemplo n.º 40
0
def decode():
    """Given a Geobuf byte string on stdin, write a GeoJSON feature
    collection to stdout."""
    logger = logging.getLogger('geobuf')
    stdin = click.get_binary_stream('stdin')
    sink = click.get_text_stream('stdout')
    try:
        pbf = stdin.read()
        data = geobuf.decode(pbf)
        json.dump(data, sink)
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
Ejemplo n.º 41
0
def download(repo_url, directory):
    from plumbum.cmd import curl
    from shutil import rmtree

    directory = Path(directory)
    # download the master branch
    url = repo_url + '/archive/master.zip'
    # download the zip next to the target directory with the same name
    path = directory.with_suffix('.zip')

    if not path.exists():
        logger.info('Downloading {} to {}...'.format(url, path))
        path.parent.mkdir(parents=True, exist_ok=True)
        download = curl[url, '-o', path, '-L']
        download(
            stdout=click.get_binary_stream('stdout'),
            stderr=click.get_binary_stream('stderr'),
        )
    else:
        logger.info('Skipping download: {} already exists'.format(path))

    logger.info('Extracting archive to {}'.format(directory))

    # extract all files
    extract_to = directory.with_name(directory.name + '_extracted')
    with zipfile.ZipFile(str(path), 'r') as f:
        f.extractall(str(extract_to))

    # remove existent folder
    if directory.exists():
        rmtree(str(directory))

    # rename to the target directory
    (extract_to / 'testing-data-master').rename(directory)

    # remove temporary extraction folder
    extract_to.rmdir()
Ejemplo n.º 42
0
def img2txt21(server, file_name):
    """ Call the img to text api hosted on the micropayments server"""

    ## If a file isn't specified, read image from stdin
    if file_name:
      upload = requests.post('http://' + server + '/upload', files={'file': open(file_name, 'rb')})
    else:
      file = click.get_binary_stream('stdin')
      file_name = 'test.jpg'
      upload = requests.post('http://' + server + '/upload', files={'file': (file_name, file)})

    # convert image to text
    # print('Converting image file %s to text...' % file_name)
    ocr_url = 'http://' + server + '/ocr?image={0}&payout_address={1}'
    answer = requests.post(url=ocr_url.format(file_name, wallet.get_payout_address()))
    print(answer.text)
Ejemplo n.º 43
0
def encode(precision, with_z):
    """Given GeoJSON on stdin, writes a geobuf file to stdout."""
    logger = logging.getLogger('geobuf')
    stdin = click.get_text_stream('stdin')
    sink = click.get_binary_stream('stdout')
    try:
        data = json.load(stdin)
        pbf = geobuf.encode(
            data,
            precision if precision >= 0 else 6,
            3 if with_z else 2)
        sink.write(pbf)
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
Ejemplo n.º 44
0
def translate(ctx, varformat, pipeline, strings, path):
    """
    Translate a single string or .po file of strings.

    If you want to pull the string from stdin, use "-".

    Note: Translating files is done in-place replacing the original
    file.

    """
    if not (path and path[0] == '-'):
        # We don't want to print this if they're piping to stdin
        out('dennis version {version}'.format(version=__version__))

    if not path:
        raise click.UsageError('nothing to work on. Use --help for help.')

    try:
        translator = Translator(varformat.split(','), pipeline.split(','))
    except InvalidPipeline as ipe:
        raise click.UsageError(ipe.args[0])

    if strings:
        # Args are strings to be translated
        for arg in path:
            data = translator.translate_string(arg)
            out(data)

    elif path[0] == '-':
        # Read everything from stdin, then translate it
        data = click.get_binary_stream('stdin').read()
        data = translator.translate_string(data)
        out(data)

    else:
        # Check all the paths first
        for arg in path:
            if not os.path.exists(arg):
                raise click.UsageError('File {fn} does not exist.'.format(
                    fn=click.format_filename(arg)))

        for arg in path:
            click.echo(translator.translate_file(arg))

    ctx.exit(0)
Ejemplo n.º 45
0
def upload(token, filename, fname, url, ftype, insecure, lifetime):
    """
    determine mime-type and upload to bepasty
    """
    read_size = 1 * 1024 * 1024
    if filename:
        fileobj = open(filename, 'rb')
        filesize = os.path.getsize(filename)
        if not fname:
            fname = os.path.basename(filename)
        stdin = False
    else:
        fileobj = click.get_binary_stream('stdin')
        if not fname:
            fname = ''
        stdin = True

    # we use the first chunk to determine the filetype if not set
    first_chunk = fileobj.read(read_size)
    if not ftype:
        mime = magic.Magic(mime=True)
        ftype = mime.from_buffer(first_chunk).decode()

        if not ftype:
            ftype = 'text/plain'
            print('falling back to {}'.format(ftype))
        else:
            print('guessed filetype: {}'.format(ftype))
    else:
        print('using given filetype {}'.format(ftype))

    offset = 0
    trans_id = ''
    while True:
        if not offset:
            raw_data = first_chunk
        else:
            raw_data = fileobj.read(read_size)
        raw_data_size = len(raw_data)

        if not raw_data:
            break  # EOF
        if stdin:
            if raw_data_size < read_size:
                filesize = offset + raw_data_size
            else:
                filesize = offset + raw_data_size + 1

        payload = base64.b64encode(raw_data)

        headers = {
            'Content-Range': 'bytes %d-%d/%d' % (offset, offset + raw_data_size - 1, filesize),
            'Content-Type': ftype,
            'Content-Filename': fname,
            'Content-Length': str(len(payload)),  # rfc 2616 14.16
            'Maxlife-Unit': lifetime[1],
            'Maxlife-Value': str(lifetime[0]),
        }
        if trans_id != '':
            headers['Transaction-ID'] = trans_id
        response = _make_request(
            'post',
            '{}/apis/rest/items'.format(url),
            data=payload,
            headers=headers,
            auth=('user', token),
            verify=not insecure
        )
        offset += raw_data_size
        if response.status_code in (200, 201):
            sys.stdout.write(
                '\r%dB (%d%%) uploaded of %dB total.' %
                (offset, offset * 100 / filesize, filesize))
        if response.status_code == 200:
            pass
        elif response.status_code == 201:
            loc = response.headers['Content-Location']
            print('\nFile was successfully uploaded and can be found here:')
            print('{}{}'.format(url, loc))
            print('{}/{}'.format(url, loc.split('/')[-1]))
        else:
            print('An error occurred: %d %s' %
                  (response.status_code, response.text))
            return

        if response.headers['Transaction-ID']:
            trans_id = response.headers['Transaction-ID']
Ejemplo n.º 46
0
def sieve_cli(ctx,
              output_format,
              directory,
              input_file,
              compress_command,
              table,
              exclude_table,
              defer_indexes,
              defer_foreign_keys,
              write_binlog,
              table_schema,
              table_data,
              routines,
              events,
              triggers,
              master_data,
              to_stdout):
    """Filter and transform mysqldump output.

    sieve can extract single tables from a mysqldump file and perform useful
    transformations, such as adding indexes after the table data is loaded
    for InnoDB tables, where such indexes can be created much more efficiently
    than the default incremental rebuild that mysqldump performs.

    Example:

        $ dbsake sieve --no-table-data < sakila.sql.gz > sakila_schema.sql
        $ dbsake sieve --format=directory -i sakila.sql.gz -C extracted_sql/
    """
    from dbsake.core.mysql import sieve

    if hasattr(input_file, 'detach'):
        input_file = input_file.detach()

    if output_format == 'stream' and sys.stdout.isatty() and not to_stdout:
        ctx.fail("stdout appears to be a terminal and --format=stream. "
                 "Use -O/--to-stdout to force output or redirect to a file. "
                 "Aborting.")

    if defer_indexes and not table_data:
        click.echo("Disabling index deferment since --no-data requested",
                   file=sys.stderr)
        defer_indexes = False
        defer_foreign_keys = False

    options = sieve.Options(output_format=output_format,
                            table_schema=table_schema,
                            table_data=table_data,
                            routines=routines,
                            events=events,
                            triggers=triggers,
                            master_data=master_data,
                            defer_indexes=defer_indexes,
                            defer_foreign_keys=defer_foreign_keys,
                            table=table,
                            exclude_table=exclude_table,
                            write_binlog=write_binlog,
                            directory=directory,
                            compress_command=compress_command,
                            input_stream=input_file,
                            output_stream=click.get_binary_stream('stdout'))

    try:
        stats = sieve.sieve(options)
    except IOError as exc:
        if exc.errno != errno.EPIPE:
            raise  # generate a traceback, in case this is a bug
        else:
            # note broken pipe in debug mode
            if ctx.obj['debug']:
                click.echo("Broken pipe (errno: %d)" % exc.errno,
                           file=sys.stderr)
            # exit with SIGPIPE to indicate only partial output
            sys.exit(128 + signal.SIGPIPE)
    except sieve.Error as exc:
        click.echo(exc, file=sys.stderr)
        sys.exit(1)
    else:
        click.echo(("Processed %s. "
                    "Output: %d database(s) %d table(s) and %d view(s)") %
                   (options.input_stream.name,
                    stats['createdatabase'] or 1,
                    stats['tablestructure'],
                    stats['view']), file=sys.stderr)
        sys.exit(0)
Ejemplo n.º 47
0
    return template.format(
        maindb=identifier(maindb),
        datastoredb=identifier(datastoredb),
        mainuser=identifier(mainuser),
        writeuser=identifier(writeuser),
        readuser=identifier(readuser))


@datastore_group.command(
    u'dump',
    help=u'Dump a datastore resource in one of the supported formats.')
@click.argument(u'resource-id', nargs=1)
@click.argument(
    u'output-file',
    type=click.File(u'wb'),
    default=click.get_binary_stream(u'stdout'))
@click.help_option(u'-h', u'--help')
@click_config_option
@click.option(u'--format', default=u'csv', type=click.Choice(DUMP_FORMATS))
@click.option(u'--offset', type=click.IntRange(0, None), default=0)
@click.option(u'--limit', type=click.IntRange(0))
@click.option(u'--bom', is_flag=True)  # FIXME: options based on format
@click.pass_context
def dump(ctx, resource_id, output_file, config, format, offset, limit, bom):
    load_config(config or ctx.obj['config'])

    dump_to(
        resource_id,
        output_file,
        fmt=format,
        offset=offset,
Ejemplo n.º 48
0
def main():
    # fetch path
    parser = ArgumentParser()
    parser.add_argument('src')
    args = parser.parse_args()

    # extract xml in zip archive
    with gzip.open(args.src, 'rb') as fp:
        xml = ElementTree(file=fp)

    # render PDF
    dest = BytesIO()
    c = canvas.Canvas(dest, bottomup=0)
    warnings = []
    pdf_background_filename = None
    pdf_background_pages = {}
    for pageno, page in enumerate(xml.getroot().iter('page')):
        # set page size
        c.setPageSize((float(page.attrib['width']), float(
            page.attrib['height'])))

        # fill with background color
        background = page.find('background')
        if background.attrib['type'] == 'solid':
            background_color = background.attrib['color']
            if background.attrib['style'] == 'plain':
                c.setFillColor(background_color)
                c.rect(
                    0,
                    0,
                    float(page.attrib['width']),
                    float(page.attrib['height']),
                    stroke=0,
                    fill=1)
            else:
                warnings.append(
                    "Do not know how to handle background style '%s'" %
                    background.attrib['style'])
        elif background.attrib['type'] == 'pdf':
            if 'domain' in background.attrib:
                # determine filename according to Xournal rules
                domain = background.attrib['domain']
                if domain == 'absolute':
                    pdf_background_filename = background.attrib['filename']
                elif domain == 'attach':
                    pdf_background_filename = "%s.%s" % (
                        args.src, background.attrib['filename'])
                else:
                    warnings.append(
                        "Do not know how to handle PDF background domain '%s'"
                        % domain)

            # add page number mapping
            pdf_background_pages[pageno] = int(background.attrib['pageno']) - 1
        else:
            warnings.append("Do not know how to handle background type '%s'" %
                            background.attrib['type'])

        # render layers
        for layer in page.iter('layer'):
            for item in layer:
                # render stroke?
                if item.tag == 'stroke':
                    # configure pen
                    if item.attrib["tool"] not in ["pen", "highlighter"]:
                        warnings.append(
                            "Do not know how to handle stroke tool '%s'" %
                            item.attrib['tool'])
                    color = toColor(item.attrib["color"])
                    if item.attrib["tool"] == "highlighter":
                        color.alpha = 0.5
                    c.setStrokeColor(color)
                    c.setLineWidth(float(item.attrib["width"]))
                    c.setLineCap(1)  # round cap

                    # draw path
                    coords = item.text.split()
                    p = c.beginPath()
                    p.moveTo(float(coords[0]), float(coords[1]))
                    for i in range(0, len(coords), 2):
                        fn = p.moveTo if i == 0 else p.lineTo
                        fn(float(coords[i]), float(coords[i + 1]))
                    c.drawPath(p)

                # render text?
                elif item.tag == 'text':
                    font = item.attrib["font"]
                    if font not in standardFonts:
                        warnings.append(
                            "Unknown font '%s', falling back to Helvetica." %
                            font)
                        font = "Helvetica"
                    font_size = float(item.attrib["size"])
                    c.setFont(font, font_size)
                    c.setFillColor(item.attrib["color"])
                    dy = 0
                    for line in item.text.split("\n"):
                        c.drawString(item.attrib["x"],
                                     dy + float(item.attrib["y"]) + font_size,
                                     line)
                        dy += float(item.attrib["size"])

                # render image?
                elif item.tag == 'image':
                    # png image base 64 encoded
                    png_data = b64decode(item.text)
                    #png = Image.open(BytesIO(png_data))
                    png = ImageReader(BytesIO(png_data))
                    x = float(item.attrib["left"])
                    y = float(item.attrib["top"])
                    width = float(item.attrib["right"]) - float(
                        item.attrib["left"])
                    height = float(item.attrib["bottom"]) - float(
                        item.attrib["top"])
                    c.saveState()
                    c.translate(x, y + height / 2)
                    c.scale(1, -1)
                    c.drawImage(
                        png, 0, -height / 2, width, height, anchor='nw')
                    c.restoreState()

                # !?
                else:
                    warnings.append("Unknown item '%s'" % item.tag)

        c.showPage()

    # save PDF in the BytesIO object (`dest`)
    c.save()

    # PDF file not found? Attempt to guess better if Xournal filename is of the form 'filename.pdf.xoj'.
    if pdf_background_filename and not os.path.exists(pdf_background_filename):
        if args.src.endswith('.pdf.xoj'):
            warnings.append(
                "File not found '%s', attempting to use '%s' instead." %
                (pdf_background_filename, args.src[:-4]))
            pdf_background_filename = args.src[:-4]

    pdf_writer = None
    if pdf_background_filename:
        if not os.path.exists(pdf_background_filename):
            warnings.append("File not found '%s'." % pdf_background_filename)
        else:
            # open PDF background
            dest.seek(0)
            pdf_journal = PdfFileReader(dest)
            pdf_background = PdfFileReader(open(pdf_background_filename, 'rb'))

            # merge journal and background
            pdf_writer = PdfFileWriter()
            for pageno, _ in enumerate(xml.getroot().iter('page')):
                # page has PDF background?
                if pageno in pdf_background_pages:
                    pdf_pageno = pdf_background_pages[pageno]

                    page = pdf_background.getPage(pdf_pageno)
                    page.mergePage(pdf_journal.getPage(pageno))
                else:
                    page = pdf_journal.getPage(pageno)
                pdf_writer.addPage(page)

    # print warnings
    if warnings:
        sys.stderr.write("WARNINGS:\n")
        for line in warnings:
            sys.stderr.write(" -" + line + "\n")

    # print PDF
    stdout = click.get_binary_stream('stdout')
    if pdf_writer:
        pdf_writer.write(stdout)
    else:
        stdout.write(dest.getvalue())
Ejemplo n.º 49
0
 def create_stdin_reader(self):
     # type: () -> PipeReader
     stream = click.get_binary_stream('stdin')
     reader = PipeReader(stream)
     return reader
Ejemplo n.º 50
0
def main(token, filename, fname, url, ftype):
    """
    determine mime-type and upload to bepasty
    """
    if filename:
        fileobj = open(filename, 'rb')
        filesize = os.path.getsize(filename)
        if not fname:
            fname = filename
    else:
        data = click.get_binary_stream('stdin').read()  # XXX evil for big stuff
        fileobj = BytesIO(data)
        filesize = len(data)
        if not fname:
            fname = ''

    if not ftype:
        mime = magic.Magic(mime=True)
        ftype = mime.from_buffer(fileobj.read(1024)).decode()
        fileobj.seek(0)
        if not ftype:
            print('falling back to {}'.format(ftype))
            ftype = 'text/plain'
        else:
            print('guessed filetype: {}'.format(ftype))
    else:
        print('using given filetype {}'.format(ftype))

    offset = 0
    trans_id = ''
    while True:
        read_size = 1 * 1024 * 1024
        raw_data = fileobj.read(read_size)
        if not raw_data:
            break  # EOF
        raw_data_size = len(raw_data)

        payload = base64.b64encode(raw_data)

        headers = {
            'Content-Range': ('bytes %d-%d/%d' %
                              (offset, offset + raw_data_size - 1, filesize)),
            'Content-Type': ftype,
            'Content-Filename': fname,
            'Content-Length': len(payload),  # rfc 2616 14.16
        }
        if trans_id != '':
            headers['Transaction-ID'] = trans_id
        response = requests.post(
            '{}/apis/rest/items'.format(url),
            data=payload,
            headers=headers,
            auth=('user', token))
        offset += raw_data_size
        if response.status_code in (200, 201):
            sys.stdout.write(
                '\r%dB (%d%%) uploaded of %dB total.' %
                (offset, offset * 100 / filesize, filesize))
        if response.status_code == 200:
            pass
        elif response.status_code == 201:
            loc = response.headers['Content-Location']
            print('\nFile was successfully uploaded and can be found here:')
            print('{}{}'.format(url, loc))
            print('{}/{}'.format(url, loc.split('/')[-1]))
        else:
            print('An error occurred: %d %s' %
                  (response.status_code, response.text))
            return

        if response.headers['Transaction-ID']:
            trans_id = response.headers['Transaction-ID']