示例#1
0
文件: bounds.py 项目: aashish24/Fiona
def bounds(ctx, precision, explode, with_id, with_obj, use_rs):
    """Print the bounding boxes of GeoJSON objects read from stdin.
    
    Optionally explode collections and print the bounds of their
    features.

    To print identifiers for input objects along with their bounds
    as a {id: identifier, bbox: bounds} JSON object, use --with-id.

    To print the input objects themselves along with their bounds
    as GeoJSON object, use --with-obj. This has the effect of updating
    input objects with {id: identifier, bbox: bounds}.
    """
    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')
    stdout = click.get_text_stream('stdout')
    try:
        source = obj_gen(stdin)
        for i, obj in enumerate(source):
            obj_id = obj.get('id', 'collection:' + str(i))
            xs = []
            ys = []
            features = obj.get('features') or [obj]
            for j, feat in enumerate(features):
                feat_id = feat.get('id', 'feature:' + str(i))
                w, s, e, n = fiona.bounds(feat)
                if precision > 0:
                    w, s, e, n = (round(v, precision) 
                                  for v in (w, s, e, n))
                if explode:
                    if with_id:
                        rec = {'parent': obj_id, 'id': feat_id, 'bbox': (w, s, e, n)}
                    elif with_obj:
                        feat.update(parent=obj_id, bbox=(w, s, e, n))
                        rec = feat
                    else:
                        rec = (w, s, e, n)
                    if use_rs:
                        click.echo(u'\u001e', nl=False)
                    click.echo(json.dumps(rec))
                else:
                    xs.extend([w, e])
                    ys.extend([s, n])
            if not explode:
                w, s, e, n = (min(xs), min(ys), max(xs), max(ys))
                if with_id:
                    rec = {'id': obj_id, 'bbox': (w, s, e, n)}
                elif with_obj:
                    obj.update(id=obj_id, bbox=(w, s, e, n))
                    rec = obj
                else:
                    rec = (w, s, e, n)
                if use_rs:
                    click.echo(u'\u001e', nl=False)
                click.echo(json.dumps(rec))

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#2
0
def organizations(metrics, since, until):
    """
    Fetch metrics for organizations.
    """
    from django.utils import timezone
    from sentry.app import tsdb
    from sentry.models import Organization

    stdout = click.get_text_stream('stdout')
    stderr = click.get_text_stream('stderr')

    def aggregate(series):
        return sum(value for timestamp, value in series)

    metrics = OrderedDict((name, getattr(tsdb.models, name)) for name in metrics)
    if not metrics:
        return

    if until is None:
        until = timezone.now()

    if since is None:
        since = until - timedelta(minutes=60)

    if until < since:
        raise click.ClickException('invalid time range provided: {} to {}'.format(since, until))

    stderr.write(
        'Dumping {} from {} to {}...\n'.format(
            ', '.join(metrics.keys()),
            since,
            until,
        ),
    )

    objects = Organization.objects.all()

    for chunk in chunked(objects, 100):
        instances = OrderedDict((instance.pk, instance) for instance in chunk)

        results = {}
        for metric in metrics.values():
            results[metric] = tsdb.get_range(metric, instances.keys(), since, until)

        for key, instance in six.iteritems(instances):
            values = []
            for metric in metrics.values():
                values.append(aggregate(results[metric][key]))

            stdout.write(
                '{} {} {}\n'.format(
                    instance.id,
                    instance.slug,
                    ' '.join(map(six.binary_type, values)),
                ),
            )
示例#3
0
def unpack(input_: typing.io.TextIO, output: typing.io.TextIO):
    """
    Unpack serialized command log.
    """
    tokens_iterator = iter_tokens(input_ or click.get_text_stream("stdin", encoding="utf-8"))
    header = read_header(tokens_iterator)
    commands = list(iter_commands(tokens_iterator))

    json.dump(
        {"header": header, "commands": commands},
        output or click.get_text_stream("stdout", encoding="utf-8"),
        indent=2,
    )
示例#4
0
def distrib(ctx, use_rs):

    """Distribute features from a collection.

    Print the features of GeoJSON objects read from stdin.
    """

    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')
    try:
        source = helpers.obj_gen(stdin)
        for i, obj in enumerate(source):
            obj_id = obj.get('id', 'collection:' + str(i))
            features = obj.get('features') or [obj]
            for j, feat in enumerate(features):
                if obj.get('type') == 'FeatureCollection':
                    feat['parent'] = obj_id
                feat_id = feat.get('id', 'feature:' + str(i))
                feat['id'] = feat_id
                if use_rs:
                    click.echo(u'\u001e', nl=False)
                click.echo(json.dumps(feat))
    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#5
0
def parent(ctx, input, depth):
    """Takes a [x, y, z] tile as input and writes its parent to stdout
    in the same form.

    $ echo "[486, 332, 10]" | mercantile parent

    Output:

    [243, 166, 9]
    """
    verbosity = ctx.obj['verbosity']
    logger = logging.getLogger('mercantile')
    try:
        src = click.open_file(input).readlines()
    except IOError:
        src = [input]
    stdout = click.get_text_stream('stdout')

    try:
        for line in src:
            line = line.strip()
            tile = json.loads(line)[:3]
            if tile[2] - depth < 0:
                raise ValueError("Maximum depth exceeded.")
            for i in range(depth):
                tile = mercantile.parent(tile)
            output = json.dumps(tile)
            stdout.write(output)
            stdout.write('\n')
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#6
0
def children(ctx, input, depth):
    """Takes a [x, y, z] tile as input and writes its children to stdout
    in the same form.

    $ echo "[486, 332, 10]" | mercantile parent

    Output:

    [243, 166, 9]
    """
    verbosity = ctx.obj['verbosity']
    logger = logging.getLogger('mercantile')
    try:
        src = click.open_file(input).readlines()
    except IOError:
        src = [input]
    stdout = click.get_text_stream('stdout')

    try:
        for line in src:
            line = line.strip()
            tiles = [json.loads(line)[:3]]
            for i in range(depth):
                tiles = sum([mercantile.children(t) for t in tiles], [])
            for t in tiles:
                output = json.dumps(t)
                stdout.write(output)
                stdout.write('\n')
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#7
0
文件: tool.py 项目: geomet/geomet
def cli(input, verbose, quiet, output_format, precision, indent):
    """Convert text read from the first positional argument, stdin, or
    a file to GeoJSON and write to stdout."""

    verbosity = verbose - quiet
    configure_logging(verbosity)
    logger = logging.getLogger('geomet')

    # Handle the case of file, stream, or string input.
    try:
        src = click.open_file(input).readlines()
    except IOError:
        src = [input]

    stdout = click.get_text_stream('stdout')

    # Read-write loop.
    try:
        for line in src:
            text = line.strip()
            logger.debug("Input: %r", text)
            output = translate(
                text,
                output_format=output_format,
                indent=indent,
                precision=precision
            )
            logger.debug("Output: %r", output)
            stdout.write(output)
            stdout.write('\n')
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#8
0
文件: cat.py 项目: citterio/Fiona
def cat(ctx, input, precision, indent, compact, ignore_errors, dst_crs,
        x_json_seq_rs):
    """Concatenate and print the features of input datasets as a
    sequence of GeoJSON features."""
    verbosity = ctx.obj['verbosity']
    logger = logging.getLogger('fio')
    sink = click.get_text_stream('stdout')

    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    item_sep = compact and ',' or ', '

    try:
        with fiona.drivers(CPL_DEBUG=verbosity>2):
            for path in input:
                with fiona.open(path) as src:
                    for feat in src:
                        if dst_crs or precision > 0:
                            g = transform_geom(
                                    src.crs, dst_crs, feat['geometry'],
                                    antimeridian_cutting=True,
                                    precision=precision)
                            feat['geometry'] = g
                        if x_json_seq_rs:
                            sink.write(u'\u001e')
                        json.dump(feat, sink, **dump_kwds)
                        sink.write("\n")
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#9
0
def start(ctx, redis_url, log, verbose, backend):
    if redis_url is None:
        redis_url = 'redis://localhost:6379?db=0'

    if log is not None:
        handler = logging.FileHandler(log)
        handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)

    if verbose:
        handler = logging.StreamHandler(click.get_text_stream('stdout'))
        handler.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)

    worker = workers.get(backend, None)

    if worker is None:
        ctx.fail('Unknown backend: %s' % backend)

    click.echo('Starting worker using backend: %s' % backend)

    queue = DistributedQueue(redis_url)
    run(worker(), queue)
示例#10
0
def echo_json_response(response, pretty, limit=None, ndjson=False):
    '''Wrapper to echo JSON with optional 'pretty' printing. If pretty is not
    provided explicity and stdout is a terminal (and not redirected or piped),
    the default will be to indent and sort keys'''
    indent = None
    sort_keys = False
    nl = False
    if not ndjson and (pretty or (pretty is None and sys.stdout.isatty())):
        indent = 2
        sort_keys = True
        nl = True
    try:
        if ndjson and hasattr(response, 'items_iter'):
            items = response.items_iter(limit)
            for item in items:
                click.echo(json.dumps(item))
        elif not ndjson and hasattr(response, 'json_encode'):
            response.json_encode(click.get_text_stream('stdout'), limit=limit,
                                 indent=indent, sort_keys=sort_keys)
        else:
            res = response.get_raw()
            res = json.dumps(json.loads(res), indent=indent,
                             sort_keys=sort_keys)
            click.echo(res)
        if nl:
            click.echo()
    except IOError as ioe:
        # hide scary looking broken pipe stack traces
        raise click.ClickException(str(ioe))
示例#11
0
def msms_intervals(paths, processes=4, time_radius=5, mz_lower=2., mz_higher=3., output=None):
    '''Construct an interval tree spanning time and m/z domains where MSn spectra were acquired
    in the LC-MS map. The interval tree is serialized to JSON.
    '''
    interval_extraction = _MSMSIntervalTask(time_radius, mz_lower, mz_higher)
    interval_set = []
    total_work_items = len(paths) * processes * 4

    def _run():
        for path in paths:
            reader = MSFileLoader(path)
            chunk_out_of_order = quick_index.run_task_in_chunks(
                reader, processes, processes * 4, task=interval_extraction)
            for chunk in chunk_out_of_order:
                interval_set.extend(chunk)
                yield 0
    work_iterator = _run()
    with click.progressbar(work_iterator, length=total_work_items, label='Extracting Intervals') as g:
        for _ in g:
            pass
    tree = scan_interval_tree.ScanIntervalTree(scan_interval_tree.make_rt_tree(interval_set))
    if output is not None:
        with open(output, 'wt') as fh:
            tree.serialize(fh)
    else:
        stream = click.get_text_stream('stdout')
        tree.serialize(stream)
        stream.flush()
示例#12
0
def info(id, format):
    "Show a file's metadata by id."
    from sentry.models import File

    try:
        file = File.objects.get(id=id)
    except File.DoesNotExist:
        raise click.ClickException('File %d does not exist.' % id)

    obj = {
        'id': file.id,
        'name': file.name,
        'headers': file.headers,
        'size': file.size,
        'sha1': file.checksum,
        'dateCreated': file.timestamp,
    }

    stdout = click.get_text_stream('stdout')

    if format == 'yaml':
        from sentry.utils import yaml
        yaml.safe_dump(obj, stdout)
    elif format == 'json':
        from sentry.utils import json
        json.dump(obj, stdout)
        stdout.write('\n')
示例#13
0
文件: fio.py 项目: barrycug/Fiona
def info(ctx, input, indent, meta_member):
    verbosity = ctx.obj['verbosity']
    logger = logging.getLogger('rio')

    stdout = click.get_text_stream('stdout')
    try:
        with fiona.drivers(CPL_DEBUG=verbosity>2):
            with fiona.open(input) as src:
                info = src.meta
                info.update(bounds=src.bounds, count=len(src))
                proj4 = fiona.crs.to_string(src.crs)
                if proj4.startswith('+init=epsg'):
                    proj4 = proj4.split('=')[1].upper()
                info['crs'] = proj4
                if meta_member:
                    if isinstance(info[meta_member], (list, tuple)):
                        print(" ".join(map(str, info[meta_member])))
                    else:
                        print(info[meta_member])
                else:
                    stdout.write(json.dumps(info, indent=indent))
                    stdout.write("\n")
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#14
0
def _fetch(url, filename_template):
    _prepare_archive_or_fail()
    urls = url or click.get_text_stream('stdin')
    for url in urls:
        url = url.rstrip('\n')
        f = archive.fetch_to_filename(url, filename_template=filename_template)
        click.echo(f)
示例#15
0
文件: dns.py 项目: Gandi/gandi.cli
def update(gandi, fqdn, name, type, value, ttl, file):
    """Update record entry for a domain.

    --file option will ignore other parameters and overwrite current zone
    content with provided file content.
    """
    domains = gandi.dns.list()
    domains = [domain['fqdn'] for domain in domains]
    if fqdn not in domains:
        gandi.echo('Sorry domain %s does not exist' % fqdn)
        gandi.echo('Please use one of the following: %s' % ', '.join(domains))
        return

    content = ''
    if file:
        content = file.read()
    elif not sys.stdin.isatty():
        content = click.get_text_stream('stdin').read()

    content = content.strip()
    if not content and not name and not type and not value:
        click.echo('Cannot find parameters for zone content to update.')
        return

    if name and type and not value:
        click.echo('You must provide one or more value parameter.')
        return

    result = gandi.dns.update_record(fqdn, name, type, value, ttl, content)
    gandi.echo(result['message'])
示例#16
0
文件: fio.py 项目: jlivni/Fiona
def load(ctx, output, driver, x_json_seq):
    """Load features from JSON to a file in another format.

    The input is a GeoJSON feature collection or optionally a sequence of
    GeoJSON feature objects."""
    verbosity = ctx.obj['verbosity']
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')

    first_line = next(stdin)

    # If input is RS-delimited JSON sequence.
    if first_line.startswith(u'\x1e'):
        def feature_gen():
            buffer = first_line.strip(u'\x1e')
            for line in stdin:
                if line.startswith(u'\x1e'):
                    if buffer:
                        yield json.loads(buffer)
                    buffer = line.strip(u'\x1e')
                else:
                    buffer += line
            else:
                yield json.loads(buffer)
    elif x_json_seq:
        def feature_gen():
            yield json.loads(first_line)
            for line in stdin:
                yield json.loads(line)
    else:
        def feature_gen():
            for feat in json.load(input)['features']:
                yield feat

    try:
        source = feature_gen()

        # Use schema of first feature as a template.
        # TODO: schema specified on command line?
        first = next(source)
        schema = {'geometry': first['geometry']['type']}
        schema['properties'] = dict([
            (k, FIELD_TYPES_MAP_REV[type(v)])
            for k, v in first['properties'].items()])

        with fiona.drivers(CPL_DEBUG=verbosity>2):
            with fiona.open(
                    output, 'w',
                    driver=driver,
                    crs={'init': 'epsg:4326'},
                    schema=schema) as dst:
                dst.write(first)
                dst.writerecords(source)
        sys.exit(0)
    except IOError:
        logger.info("IOError caught")
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
def command():
    with codecs.open('files.json', encoding='utf-8') as data:
        list_of_lists = json.load(data)

    flattened = list(itertools.chain(*list_of_lists))

    stdout_text = click.get_text_stream('stdout')
    stdout_text.write(json.dumps({'out_files': flattened}))
示例#18
0
def lowercase(in_file, out_dir):
    create_dirs(out_dir)

    text = in_file.read()
    text = text.lower()

    stdout_text = click.get_text_stream('stdout')
    stdout_text.write(text)
示例#19
0
def snorse_cli(text=None, file=None):
    if file is not None:
        text = file.read()
    elif text is None:
        text = click.get_text_stream('stdin').read()
    elif os.path.exists(os.path.expanduser(text)):
        text = open(os.path.expanduser(text)).read()
    click.echo(snorse(text))
示例#20
0
def ls_chunk(in_dir, chunks, name):
    div = json.load(chunks)
    files = div.get(name, [])
    files_out = [cwl_file(os.path.abspath(os.path.join(in_dir, f)))
                 for f in files]

    stdout_text = click.get_text_stream('stdout')
    stdout_text.write(json.dumps({'out_files': files_out}))
示例#21
0
def members(ctx, config, cube_name, cuts, dim_name, output_format):
    """Aggregate a cube"""
    config = read_config(config)
    workspace = Workspace(config)
    browser = workspace.browser(cube_name)
    cube = browser.cube

    cell_cuts = []
    for cut_str in cuts:
        cell_cuts += cuts_from_string(browser.cube, cut_str)

    cell = Cell(browser.cube, cell_cuts)

    (dim_name, hier_name, level_name) = string_to_dimension_level(dim_name)
    dimension = cube.dimension(dim_name)
    hierarchy = dimension.hierarchy(hier_name)

    if level_name:
        depth = hierarchy.level_index(level_name) + 1
    else:
        depth = len(hierarchy)

    # TODO: pagination
    values = browser.members(cell,
                             dimension,
                             depth=depth,
                             hierarchy=hierarchy,
                             page=None,
                             page_size=None)

    attributes = []
    for level in hierarchy.levels_for_depth(depth):
        attributes += level.attributes

    fields = [attr.ref for attr in attributes]
    labels = [attr.label or attr.name for attr in attributes]

    if output_format == "json":
        encoder = SlicerJSONEncoder(indent=4)
        result = encoder.iterencode(values)
    elif output_format == "json_lines":
        result = JSONLinesGenerator(values)
    elif output_format == "csv":
        result = csv_generator(values,
                               fields,
                               include_header=True,
                               header=labels)
    elif output_format == 'xlsx':
        result = xlsx_generator(
            values,
            fields,
            include_header=True,
            header=labels
        )

    out = click.get_text_stream('stdout')
    for row in result:
        out.write(row)
示例#22
0
def _cat(url):
    _prepare_archive_or_fail()
    urls = url or click.get_text_stream('stdin')
    out = click.open_file('-', 'w')
    for url in urls:
        url = url.rstrip('\n')
        f = archive.fetch(url)
        out.write(f.read())
    out.close()
示例#23
0
文件: ptwit.py 项目: ptpt/ptwit
def read_text(words):
    if len(words) == 1 and words[0] == '-':
        text = click.get_text_stream('stdin').read()
    elif words:
        text = ' '.join(words)
        click.confirm('Post "{0}"?'.format(text), abort=True)
    else:
        text = click.edit()

    return text
示例#24
0
文件: login.py 项目: venth/aws-adfs
def _stdin_user_credentials():
    stdin = click.get_text_stream('stdin').read()
    stdin_lines = stdin.strip().splitlines()
    try:
        username, password = stdin_lines[:2]
    except ValueError:
        print('Failed to read newline separated username and password from stdin.')
        username = None
        password = None

    return username, password
示例#25
0
def cli(server, inp_text):

    if not inp_text:
        inp_text = click.get_text_stream('stdin').read()
    print(inp_text)
    # Send request to server with user input text and user's wallet address for payment
    sel_url = 'http://' + server + '/sentiment?text={0}&payout_address={1}'
    response = requests.get(url=sel_url.format(inp_text, wallet.get_payout_address()))

    # Print the translated text out to the terminal
    click.echo(response.text)
示例#26
0
文件: __init__.py 项目: JNRowe/hubugs
def comment(globs: AttrDict, message: str, stdin: bool, bugs: List[int]):
    """Commenting on bugs."""
    if stdin:
        message = click.get_text_stream().read()
    elif message:
        message = message
    else:
        message = template.edit_text()
    for bug in bugs:
        globs.req_post('{}/comments'.format(bug), body={'body': message},
                       model='Comment')
示例#27
0
def import_stock_values(code):
    """Import stock price information."""
    app = create_app(__name__)
    with app.app_context():
        # NOTE: We assume all Asset records are already in the database, but
        # this is a temporary workaround. We should implement some mechanism to
        # automatically insert an Asset record when it is not found.

        stdin = click.get_text_stream('stdin')
        for _ in import_stock_values_(stdin, code):
            pass
示例#28
0
 def done(self, status=OK):
     """
     @type   status: str
     """
     padding = ' ' * ((self.max_term_width - 6) - len(self.message))
     suffix = click.style(']', fg=self.color, bold=self.bold)
     message = '{msg}{pad}[{status}{suf}'.format(msg=self.message, pad=padding, status=status, suf=suffix)
     stdout = click.get_text_stream('stdout')
     stdout.write('\r\033[K')
     stdout.flush()
     click.secho(message, fg=self.color, bold=self.bold)
示例#29
0
def env(ctx, key):
    """Print information about the Rasterio environment: available
    formats, etc.
    """
    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    stdout = click.get_text_stream('stdout')
    with Env(CPL_DEBUG=(verbosity > 2)) as env:
        if key == 'formats':
            for k, v in sorted(env.drivers().items()):
                stdout.write("%s: %s\n" % (k, v))
            stdout.write('\n')
示例#30
0
def env(ctx, key):
    """Print information about the Rasterio environment: available
    formats, etc.
    """
    verbosity = (ctx.obj and ctx.obj.get("verbosity")) or 1
    logger = logging.getLogger("rio")
    stdout = click.get_text_stream("stdout")
    with rasterio.drivers(CPL_DEBUG=(verbosity > 2)) as env:
        if key == "formats":
            for k, v in sorted(env.drivers().items()):
                stdout.write("%s: %s\n" % (k, v))
            stdout.write("\n")
示例#31
0
def blocks(ctx, input, output, precision, indent, compact, projection,
           sequence, use_rs, bidx):
    """Write dataset blocks as GeoJSON features.

    This command writes features describing a raster's internal blocks, which
    are used directly for raster I/O.  These features can be used to visualize
    how a windowed operation would operate using those blocks.

    Output features have two JSON encoded properties: block and window.  Block
    is a two element array like '[0, 0]' describing the window's position
    in the input band's window layout.  Window is a two element array
    containing two more two element arrays like '[[0, 256], [0, 256]' and
    describes the range of pixels the window covers in the input band.  Values
    are JSON encoded for better interoperability.

    Block windows are extracted from the dataset (all bands must have matching
    block windows) by default, or from the band specified using the '--bidx
    option:
    \b

        $ rio shapes --bidx 3 tests/data/RGB.byte.tif

    By default a GeoJSON 'FeatureCollection' is written, but the --sequence'
    option produces a GeoJSON feature stream instead.
    \b

        $ rio shapes tests/data/RGB.byte.tif --sequence

    Output features are reprojected to 'WGS84' unless the '--projected' flag is
    provided, which causes the output to be kept in the input datasource's
    coordinate reference system.

    For more information on exactly what blocks and windows represent, see
    'dataset.block_windows()'.
    """

    dump_kwds = {'sort_keys': True}

    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')

    stdout = click.open_file(
        output, 'w') if output else click.get_text_stream('stdout')

    with ctx.obj['env'], rasterio.open(input) as src:

        if bidx and bidx not in src.indexes:
            raise click.BadParameter("Not a valid band index")

        collection = _Collection(dataset=src,
                                 bidx=bidx,
                                 precision=precision,
                                 geographic=projection != 'projected')

        write_features(stdout,
                       collection,
                       sequence=sequence,
                       geojson_type='feature' if sequence else 'collection',
                       use_rs=use_rs,
                       **dump_kwds)
示例#32
0
def cli(
    database,
    dbname,
    version,
    prompt,
    logfile,
    auto_vertical_output,
    table,
    csv,
    warn,
    execute,
    liteclirc,
):
    """A SQLite terminal client with auto-completion and syntax highlighting.

    \b
    Examples:
      - litecli lite_database

    """

    if version:
        print("Version:", __version__)
        sys.exit(0)

    litecli = LiteCli(
        prompt=prompt,
        logfile=logfile,
        auto_vertical_output=auto_vertical_output,
        warn=warn,
        liteclirc=liteclirc,
    )

    # Choose which ever one has a valid value.
    database = database or dbname

    litecli.connect(database)

    litecli.logger.debug("Launch Params: \n" "\tdatabase: %r", database)

    #  --execute argument
    if execute:
        try:
            if csv:
                litecli.formatter.format_name = "csv"
            elif not table:
                litecli.formatter.format_name = "tsv"

            litecli.run_query(execute)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg="red")
            exit(1)

    if sys.stdin.isatty():
        litecli.run_cli()
    else:
        stdin = click.get_text_stream("stdin")
        stdin_text = stdin.read()

        try:
            sys.stdin = open("/dev/tty")
        except (FileNotFoundError, OSError):
            litecli.logger.warning("Unable to open TTY as stdin.")

        if (litecli.destructive_warning
                and confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            new_line = True

            if csv:
                litecli.formatter.format_name = "csv"
            elif not table:
                litecli.formatter.format_name = "tsv"

            litecli.run_query(stdin_text, new_line=new_line)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg="red")
            exit(1)
def load(ctx, output, driver, src_crs, dst_crs, sequence, layer, encoding):
    """Load features from JSON to a file in another format.

    The input is a GeoJSON feature collection or optionally a sequence of
    GeoJSON feature objects."""
    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')

    dst_crs = dst_crs or src_crs

    if src_crs and dst_crs and src_crs != dst_crs:
        transformer = partial(transform_geom,
                              src_crs,
                              dst_crs,
                              antimeridian_cutting=True,
                              precision=-1)
    else:
        transformer = lambda x: x

    first_line = next(stdin)

    # If input is RS-delimited JSON sequence.
    if first_line.startswith(u'\x1e'):

        def feature_gen():
            buffer = first_line.strip(u'\x1e')
            for line in stdin:
                if line.startswith(u'\x1e'):
                    if buffer:
                        feat = json.loads(buffer)
                        feat['geometry'] = transformer(feat['geometry'])
                        yield feat
                    buffer = line.strip(u'\x1e')
                else:
                    buffer += line
            else:
                feat = json.loads(buffer)
                feat['geometry'] = transformer(feat['geometry'])
                yield feat
    elif sequence:

        def feature_gen():
            yield json.loads(first_line)
            for line in stdin:
                feat = json.loads(line)
                feat['geometry'] = transformer(feat['geometry'])
                yield feat
    else:

        def feature_gen():
            text = "".join(itertools.chain([first_line], stdin))
            for feat in json.loads(text)['features']:
                feat['geometry'] = transformer(feat['geometry'])
                yield feat

    try:
        source = feature_gen()

        # Use schema of first feature as a template.
        # TODO: schema specified on command line?
        first = next(source)
        schema = {'geometry': first['geometry']['type']}
        schema['properties'] = dict([(k, FIELD_TYPES_MAP_REV.get(type(v))
                                      or 'str')
                                     for k, v in first['properties'].items()])

        with fiona.drivers(CPL_DEBUG=verbosity > 2):
            with fiona.open(output,
                            'w',
                            driver=driver,
                            crs=dst_crs,
                            schema=schema,
                            layer=layer,
                            encoding=encoding) as dst:
                dst.write(first)
                dst.writerecords(source)

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#34
0
def collect(ctx, precision, indent, compact, record_buffered, ignore_errors,
            src_crs, with_ld_context, add_ld_context_item, parse):
    """Make a GeoJSON feature collection from a sequence of GeoJSON
    features and print it."""
    logger = logging.getLogger(__name__)
    stdin = click.get_text_stream('stdin')
    sink = click.get_text_stream('stdout')

    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    item_sep = compact and ',' or ', '

    if src_crs:
        if not parse:
            raise click.UsageError("Can't specify --src-crs with --no-parse")
        transformer = partial(transform_geom, src_crs, 'EPSG:4326',
                              antimeridian_cutting=True, precision=precision)
    else:
        def transformer(x):
            return x

    first_line = next(stdin)

    # If parsing geojson
    if parse:
        # If input is RS-delimited JSON sequence.
        if first_line.startswith('\x1e'):
            def feature_text_gen():
                buffer = first_line.strip('\x1e')
                for line in stdin:
                    if line.startswith('\x1e'):
                        if buffer:
                            feat = json.loads(buffer)
                            feat['geometry'] = transformer(feat['geometry'])
                            yield json.dumps(feat, **dump_kwds)
                        buffer = line.strip('\x1e')
                    else:
                        buffer += line
                else:
                    feat = json.loads(buffer)
                    feat['geometry'] = transformer(feat['geometry'])
                    yield json.dumps(feat, **dump_kwds)
        else:
            def feature_text_gen():
                feat = json.loads(first_line)
                feat['geometry'] = transformer(feat['geometry'])
                yield json.dumps(feat, **dump_kwds)

                for line in stdin:
                    feat = json.loads(line)
                    feat['geometry'] = transformer(feat['geometry'])
                    yield json.dumps(feat, **dump_kwds)

    # If *not* parsing geojson
    else:
        # If input is RS-delimited JSON sequence.
        if first_line.startswith('\x1e'):
            def feature_text_gen():
                buffer = first_line.strip('\x1e')
                for line in stdin:
                    if line.startswith('\x1e'):
                        if buffer:
                            yield buffer
                        buffer = line.strip('\x1e')
                    else:
                        buffer += line
                else:
                    yield buffer
        else:
            def feature_text_gen():
                yield first_line
                for line in stdin:
                    yield line

    try:
        source = feature_text_gen()

        if record_buffered:
            # Buffer GeoJSON data at the feature level for smaller
            # memory footprint.
            indented = bool(indent)
            rec_indent = "\n" + " " * (2 * (indent or 0))

            collection = {
                'type': 'FeatureCollection',
                'features': []}
            if with_ld_context:
                collection['@context'] = helpers.make_ld_context(
                    add_ld_context_item)

            head, tail = json.dumps(collection, **dump_kwds).split('[]')

            sink.write(head)
            sink.write("[")

            # Try the first record.
            try:
                i, first = 0, next(source)
                if with_ld_context:
                    first = helpers.id_record(first)
                if indented:
                    sink.write(rec_indent)
                sink.write(first.replace("\n", rec_indent))
            except StopIteration:
                pass
            except Exception as exc:
                # Ignoring errors is *not* the default.
                if ignore_errors:
                    logger.error(
                        "failed to serialize file record %d (%s), "
                        "continuing",
                        i, exc)
                else:
                    # Log error and close up the GeoJSON, leaving it
                    # more or less valid no matter what happens above.
                    logger.critical(
                        "failed to serialize file record %d (%s), "
                        "quiting",
                        i, exc)
                    sink.write("]")
                    sink.write(tail)
                    if indented:
                        sink.write("\n")
                    raise

            # Because trailing commas aren't valid in JSON arrays
            # we'll write the item separator before each of the
            # remaining features.
            for i, rec in enumerate(source, 1):
                try:
                    if with_ld_context:
                        rec = helpers.id_record(rec)
                    if indented:
                        sink.write(rec_indent)
                    sink.write(item_sep)
                    sink.write(rec.replace("\n", rec_indent))
                except Exception as exc:
                    if ignore_errors:
                        logger.error(
                            "failed to serialize file record %d (%s), "
                            "continuing",
                            i, exc)
                    else:
                        logger.critical(
                            "failed to serialize file record %d (%s), "
                            "quiting",
                            i, exc)
                        sink.write("]")
                        sink.write(tail)
                        if indented:
                            sink.write("\n")
                        raise

            # Close up the GeoJSON after writing all features.
            sink.write("]")
            sink.write(tail)
            if indented:
                sink.write("\n")

        else:
            # Buffer GeoJSON data at the collection level. The default.
            collection = {
                'type': 'FeatureCollection',
                'features': []}
            if with_ld_context:
                collection['@context'] = helpers.make_ld_context(
                    add_ld_context_item)

            head, tail = json.dumps(collection, **dump_kwds).split('[]')
            sink.write(head)
            sink.write("[")
            sink.write(",".join(source))
            sink.write("]")
            sink.write(tail)
            sink.write("\n")

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#35
0
def run_shp_to_csv(src, reader_projection, reader_projection_preserve_units,
                   writer_delimiter, writer_fieldname, writer_quotechar,
                   writer_exceed_field_size_limit):
    # Configure the projection from the source to WGS-84.
    p1 = pyproj.Proj(init=reader_projection,
                     preserve_units=reader_projection_preserve_units)
    p2 = pyproj.Proj(init=DEFAULT_PROJECTION_READER_)  # WGS-84
    transform = functools.partial(pyproj.transform, p1, p2)

    # Initialize the ESRI Shapefile reader.
    shapereader = shapefile.Reader(src)

    # Initialize the list of fields in the ESRI Shapefile.
    shapereader_fieldnames = list(
        map(lambda field: field[0], list(shapereader.fields)))

    # Remove the first element of the list.
    del shapereader_fieldnames[0]  # DeletionFlag

    # Validate the fields for the CSV writer.
    if writer_fieldname in shapereader_fieldnames:
        raise click.ClickException(
            'Invalid value for "writer-fieldname": Duplicate field: {0}'.
            format(writer_fieldname))

    # Validation successfull. Initialize the list of fields for the CSV writer.
    csvwriter_fieldnames = shapereader_fieldnames + [writer_fieldname]

    # Initialize the CSV writer for the standard-output stream.
    csvwriter = csv.DictWriter(click.get_text_stream('stdout'),
                               delimiter=writer_delimiter,
                               fieldnames=csvwriter_fieldnames,
                               quotechar=writer_quotechar)

    csvwriter.writeheader()

    if shapereader.shapeType == shapefile.POLYGON:
        for shapeRecord in shapereader.shapeRecords():
            assert shapeRecord.shape.shapeType == shapefile.POLYGON

            # Initialize new CSV row.
            csvrow = dict(zip(shapereader_fieldnames,
                              list(shapeRecord.record)))

            # Look-up the value of the field.
            #
            # NOTE The length of the string may exceed the CSV field size limit.
            writer_fieldname_value = str(
                shapely.geometry.Polygon(
                    map(lambda coords: transform(*coords),
                        list(shapeRecord.shape.points))))

            # Test if the length of the string exceeds the CSV field size limit.
            if (len(writer_fieldname_value) > csv.field_size_limit()
                ) and not writer_exceed_field_size_limit:
                # If the length of the string exceeds the CSV field size limit
                # and the flag is set to `False`, then do not write the CSV row.
                #
                # TODO Warning: String length exceeds CSV field size limit.
                continue

            csvrow[writer_fieldname] = writer_fieldname_value

            # Write the row (to the standard output stream).
            csvwriter.writerow(csvrow)
    else:
        # TODO Warning: Invalid ESRI Shapefile (not POLYGON).
        pass

    # Done!
    return
示例#36
0
def bounds(ctx, input, precision, indent, compact, projection, dst_crs,
           sequence, use_rs, geojson_type):
    """Write bounding boxes to stdout as GeoJSON for use with, e.g.,
    geojsonio

      $ rio bounds *.tif | geojsonio
    
    If a destination crs is passed via dst_crs, it takes precedence over
    the projection parameter.
    """
    import rasterio.warp
    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')
    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    stdout = click.get_text_stream('stdout')

    # This is the generator for (feature, bbox) pairs.
    class Collection(object):
        def __init__(self):
            self._xs = []
            self._ys = []

        @property
        def bbox(self):
            return min(self._xs), min(self._ys), max(self._xs), max(self._ys)

        def __call__(self):
            for i, path in enumerate(input):
                with rasterio.open(path) as src:
                    bounds = src.bounds
                    if dst_crs:
                        bbox = transform_bounds(src.crs, dst_crs, *bounds)
                    elif projection == 'mercator':
                        bbox = transform_bounds(src.crs, {'init': 'epsg:3857'},
                                                *bounds)
                    elif projection == 'geographic':
                        bbox = transform_bounds(src.crs, {'init': 'epsg:4326'},
                                                *bounds)
                    else:
                        bbox = bounds

                if precision >= 0:
                    bbox = [round(b, precision) for b in bbox]

                yield {
                    'type': 'Feature',
                    'bbox': bbox,
                    'geometry': {
                        'type':
                        'Polygon',
                        'coordinates':
                        [[[bbox[0], bbox[1]], [bbox[2], bbox[1]],
                          [bbox[2], bbox[3]], [bbox[0], bbox[3]],
                          [bbox[0], bbox[1]]]]
                    },
                    'properties': {
                        'id': str(i),
                        'title': path,
                        'filename': os.path.basename(path)
                    }
                }

                self._xs.extend(bbox[::2])
                self._ys.extend(bbox[1::2])

    col = Collection()
    # Use the generator defined above as input to the generic output
    # writing function.
    try:
        with rasterio.drivers(CPL_DEBUG=verbosity > 2):
            write_features(stdout,
                           col,
                           sequence=sequence,
                           geojson_type=geojson_type,
                           use_rs=use_rs,
                           **dump_kwds)

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#37
0
def cli(text):
    if not text:
        text = click.get_text_stream('stdin').read().strip()
    emoji_text = emojify(text)
    click.echo(emoji_text)
示例#38
0
def main(filename, html, css, name, process, args, raw, csv, pdb):
    tmp = None
    try:
        if filename == "-":
            module = interpreter.interpret(
                source=click.get_text_stream('stdin').read())
        else:
            module = interpreter.interpret(path=Path(filename))
        if name:
            if name not in module.exports:
                raise RuntimeError(f"{name} not in {filename} exports")
            value = module.exports[name]
        else:
            if module.default_export is interpreter.missing and module.value is interpreter.missing:
                raise RuntimeError(f"{filename} has no default export")
            if module.default_export is not interpreter.missing:
                value = module.default_export
            else:
                value = module.value

        if isinstance(value, Callable):
            arg_names = ', '.join(f'"{n}"' for n in value.arg_names)
            if len(args) != len(value.arg_names):
                raise click.UsageError(
                    f"Expected input argument{'s' if len(arg_names) > 1 else ''}: "
                    f"{arg_names}, see --help")

        for arg in args:
            json_args = [json.load(arg) for arg in args]
            value = value(*json_args)

        if len([n for n in [html, css, process] if n]) > 1:
            raise RuntimeError('can only do 1 post-process at a time')
        if html:
            return print(dnjs_html.to_html(value))
        if css:
            return print(dnjs_css.to_css(value))
        if process:
            f = interpreter.interpret(source=process).value
            assert isinstance(f, Callable)
            value = builtins.undefineds_to_none(f(value))
        if csv:
            assert isinstance(value, list)
            for row in value:
                assert isinstance(row, list)
                if raw:
                    print(",".join(rawify(n) for n in row))
                else:
                    print(",".join(json.dumps(n) for n in row))
            return
        if raw:
            return print(rawify(value))

        print(json.dumps(value))
    except:
        if pdb:
            import pdb
            pdb.post_mortem()
        raise

    if tmp is not None:
        tmp.close()
def main(
    in_vcf: str,
    out_vcf: str,
    overwrite: bool,
    verbose: bool,
    min_qual: float,
    min_depth: int,
    min_fed: float,
    max_depth: int,
    min_strand_bias: int,
    min_bqb: float,
    min_mqb: float,
    min_rpb: float,
    min_rpbz: Optional[float],
    max_rpbz: Optional[float],
    max_scbz: Optional[float],
    max_sgb: float,
    min_vdb: float,
    hist: bool,
    min_frs: float,
    min_mq: int,
):
    """Apply the following filters to a VCF:\n
    - Minimum proportion of the expected (median) depth\n
    - Maximum proportion of the expected (median) depth\n
    - Minimum QUAL threshold\n
    - Minimum Strand bias percentage
    """
    log_level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(
        format="%(asctime)s [%(levelname)s]: %(message)s", level=log_level
    )

    vcf_reader = VCF(in_vcf)
    if not vcf_reader.contains(Tags.Depth.value):
        raise DepthTagError(f"Depth tag {Tags.Depth} not found in header")

    if (not vcf_reader.contains(str(Tags.StrandDepth))) and min_strand_bias:
        logging.warning(
            f"Strand depth tag {Tags.StrandDepth} not found in header. "
            f"Turning off strand bias filter..."
        )
        min_strand_bias = 0

    logging.info("Calculating expected (median) depth...")
    depths = []
    quals = []
    for v in vcf_reader:
        depths.append(get_depth(v))
        quals.append(v.QUAL or 0)

    median_depth = np.median(depths)
    logging.info(f"Expected depth: {median_depth}")

    if hist:
        import histoprint

        tick_format = "% .1f"
        logging.info("Depth histogram:")
        histoprint.print_hist(
            np.histogram(depths, bins=HIST_BINS),
            title="Depth histogram",
            summary=True,
            tick_format=tick_format,
            file=click.get_text_stream("stderr"),
        )

        logging.info("QUAL histogram")
        histoprint.print_hist(
            np.histogram(quals, bins=HIST_BINS),
            title="QUAL histogram",
            summary=True,
            tick_format=tick_format,
            file=click.get_text_stream("stderr"),
        )

    assessor = Filter(
        expected_depth=int(median_depth),
        min_qual=min_qual,
        min_depth=min_depth,
        min_fed=min_fed,
        max_depth=max_depth,
        min_strand_bias=min_strand_bias,
        min_bqb=min_bqb,
        min_mqb=min_mqb,
        min_rpb=min_rpb,
        max_sgb=max_sgb,
        min_vdb=min_vdb,
        min_frs=min_frs,
        min_mq=min_mq,
        min_rpbz=min_rpbz,
        max_rpbz=max_rpbz,
        max_scbz=max_scbz,
    )

    vcf_reader = VCF(in_vcf)
    assessor.add_filters_to_header(vcf_reader)

    if not Path(out_vcf).parent.exists():
        Path(out_vcf).parent.mkdir(exist_ok=True, parents=True)

    vcf_writer = Writer(out_vcf, tmpl=vcf_reader)

    stats = Counter()
    logging.info("Filtering variants...")
    for variant in vcf_reader:
        filter_status = assessor.filter_status(variant)

        if (
            (not overwrite)
            and variant.FILTER is not None
            and filter_status != str(Tags.Pass)
        ):
            current_filter = variant.FILTER.rstrip(";")
            variant.FILTER = f"{current_filter};{filter_status}"
        else:
            variant.FILTER = filter_status

        vcf_writer.write_record(variant)

        stats.update(filter_status.split(";"))

    vcf_reader.close()
    vcf_writer.close()

    logging.info("FILTER STATISTICS")
    logging.info("=================")
    for filt, count in stats.items():
        logging.info(f"Filter: {filt}\tCount: {count}")

    logging.info("Done!")
示例#40
0
        for name in _get_variants(name):
            if _filter_name(name):
                tokens = tokenizer.tokenize(name)
                if _filter_tokens(tokens):
                    tokenized_names.append(tokens)
    #print(tokenized_names)
    return tokenized_names


@dict_cli.command()
@click.argument('jochem', type=click.File('r', encoding='utf8'))
@click.option('--output',
              '-o',
              type=click.File('w', encoding='utf8'),
              help='Dictionary file.',
              default=click.get_text_stream('stdout'))
@click.option('--csoutput',
              '-c',
              type=click.File('w', encoding='utf8'),
              help='Case-sensitive dictionary file.',
              default=click.get_text_stream('stdout'))
@click.pass_obj
def prepare_jochem(ctx, jochem, output, csoutput):
    """Process and filter jochem file to produce list of names for dictionary."""
    click.echo('chemdataextractor.dict.prepare_jochem')
    for i, line in enumerate(jochem):
        print('JC%s' % i)
        if line.startswith('TM '):
            if line.endswith('	@match=ci\n'):
                for tokens in _make_tokens(line[3:-11]):
                    output.write(' '.join(tokens))
示例#41
0
文件: cli.py 项目: elin1231/htmap
def _read_tags_from_stdin(ctx, param, value):
    if not value and not click.get_text_stream("stdin").isatty():
        return click.get_text_stream("stdin").read().split()
    else:
        return value
示例#42
0
def get_name(ctx, param, value):
    if not value and not click.get_text_stream("stdin").isatty():
        return click.get_text_stream("stdin").read().split()
    else:
        return value
示例#43
0
def shapes(ctx, input, output, precision, indent, compact, projection,
           sequence, use_rs, geojson_type, band, bandidx, sampling,
           with_nodata, as_mask):
    """Extracts shapes from one band or mask of a dataset and writes
    them out as GeoJSON. Unless otherwise specified, the shapes will be
    transformed to WGS 84 coordinates.

    The default action of this command is to extract shapes from the
    first band of the input dataset. The shapes are polygons bounding
    contiguous regions (or features) of the same raster value. This
    command performs poorly for int16 or float type datasets.

    Bands other than the first can be specified using the `--bidx`
    option:

      $ rio shapes --bidx 3 tests/data/RGB.byte.tif

    The valid data footprint of a dataset's i-th band can be extracted
    by using the `--mask` and `--bidx` options:

      $ rio shapes --mask --bidx 1 tests/data/RGB.byte.tif

    Omitting the `--bidx` option results in a footprint extracted from
    the conjunction of all band masks. This is generally smaller than
    any individual band's footprint.

    A dataset band may be analyzed as though it were a binary mask with
    the `--as-mask` option:

      $ rio shapes --as-mask --bidx 1 tests/data/RGB.byte.tif
    """
    # These import numpy, which we don't want to do unless it's needed.
    import numpy
    import rasterio.features
    import rasterio.warp

    verbosity = ctx.obj['verbosity'] if ctx.obj else 1
    logger = logging.getLogger('rio')
    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')

    stdout = click.open_file(
        output, 'w') if output else click.get_text_stream('stdout')

    bidx = 1 if bandidx is None and band else bandidx

    # This is the generator for (feature, bbox) pairs.
    class Collection(object):
        def __init__(self):
            self._xs = []
            self._ys = []

        @property
        def bbox(self):
            return min(self._xs), min(self._ys), max(self._xs), max(self._ys)

        def __call__(self):
            with rasterio.open(input) as src:
                if bidx is not None and bidx > src.count:
                    raise ValueError('bidx is out of range for raster')

                img = None
                msk = None

                # Adjust transforms.
                transform = src.affine
                if sampling > 1:
                    # Decimation of the raster produces a georeferencing
                    # shift that we correct with a translation.
                    transform *= Affine.translation(src.width % sampling,
                                                    src.height % sampling)
                    # And follow by scaling.
                    transform *= Affine.scale(float(sampling))

                # Most of the time, we'll use the valid data mask.
                # We skip reading it if we're extracting every possible
                # feature (even invalid data features) from a band.
                if not band or (band and not as_mask and not with_nodata):
                    if sampling == 1:
                        msk = src.read_masks(bidx)
                    else:
                        msk_shape = (src.height // sampling,
                                     src.width // sampling)
                        if bidx is None:
                            msk = numpy.zeros((src.count, ) + msk_shape,
                                              'uint8')
                        else:
                            msk = numpy.zeros(msk_shape, 'uint8')
                        msk = src.read_masks(bidx, msk)

                    if bidx is None:
                        msk = numpy.logical_or.reduce(msk).astype('uint8')

                    # Possibly overidden below.
                    img = msk

                # Read the band data unless the --mask option is given.
                if band:
                    if sampling == 1:
                        img = src.read(bidx, masked=False)
                    else:
                        img = numpy.zeros(
                            (src.height // sampling, src.width // sampling),
                            dtype=src.dtypes[src.indexes.index(bidx)])
                        img = src.read(bidx, img, masked=False)

                # If --as-mask option was given, convert the image
                # to a binary image. This reduces the number of shape
                # categories to 2 and likely reduces the number of
                # shapes.
                if as_mask:
                    tmp = numpy.ones_like(img, 'uint8') * 255
                    tmp[img == 0] = 0
                    img = tmp
                    if not with_nodata:
                        msk = tmp

                # Transform the raster bounds.
                bounds = src.bounds
                xs = [bounds[0], bounds[2]]
                ys = [bounds[1], bounds[3]]
                if projection == 'geographic':
                    xs, ys = rasterio.warp.transform(src.crs,
                                                     {'init': 'epsg:4326'}, xs,
                                                     ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                self._xs = xs
                self._ys = ys

                # Prepare keyword arguments for shapes().
                kwargs = {'transform': transform}
                if not with_nodata:
                    kwargs['mask'] = msk

                src_basename = os.path.basename(src.name)

                # Yield GeoJSON features.
                for i, (g, val) in enumerate(
                        rasterio.features.shapes(img, **kwargs)):
                    if projection == 'geographic':
                        g = rasterio.warp.transform_geom(
                            src.crs,
                            'EPSG:4326',
                            g,
                            antimeridian_cutting=True,
                            precision=precision)
                    xs, ys = zip(*coords(g))
                    yield {
                        'type': 'Feature',
                        'id': "{0}:{1}".format(src_basename, i),
                        'properties': {
                            'val': val,
                            'filename': src_basename
                        },
                        'bbox': [min(xs), min(ys),
                                 max(xs), max(ys)],
                        'geometry': g
                    }

    if not sequence:
        geojson_type = 'collection'

    try:
        with rasterio.drivers(CPL_DEBUG=(verbosity > 2)):
            write_features(stdout,
                           Collection(),
                           sequence=sequence,
                           geojson_type=geojson_type,
                           use_rs=use_rs,
                           **dump_kwds)
    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#44
0
 def get_stream(self, verbosity: int = 0):
     if verbosity <= self.verbosity:
         return click.get_text_stream('stdout')
     else:
         return null_stream()
示例#45
0
def run_append_to_csv(code_length, codec, reader_delimiter, reader_fieldname,
                      reader_quotechar, writer_delimiter, writer_fieldname,
                      writer_quotechar, wkt, wkt_fieldname_bbox,
                      wkt_fieldname_centroid):
    # Look-up the codec module.
    codec_module = CODEC_MODULES_BY_INDEX_[codec]

    # Initialize the CSV reader for the standard-input stream.
    reader_kwargs = {
        'delimiter': reader_delimiter,
        'quotechar': reader_quotechar,
    }
    # NOTE Use of 'csv.DictReader' assumes presence of CSV header row.
    reader = csv.DictReader(click.get_text_stream('stdin'), **reader_kwargs)

    # Initialize the list of fields for the CSV writer.
    #
    # NOTE Use 'list' function to clone list of fields (e.g., so that changes do
    # not affect the behavior of the CSV reader).
    writer_fieldnames = list(reader.fieldnames)

    # Validate the fields for the CSV reader and writer.
    if not reader_fieldname in writer_fieldnames:
        raise click.ClickException(
            'Invalid value for "reader-fieldname": Field not found: {0}'.
            format(reader_fieldname))
    elif writer_fieldname in writer_fieldnames:
        raise click.ClickException(
            'Invalid value for "writer-fieldname": Duplicate field: {0}'.
            format(writer_fieldname))

    # Validation successful. Add the field for the CSV writer to the list
    # (safely).
    writer_fieldnames.append(writer_fieldname)

    if wkt:
        # Validate WKT fields.
        if wkt_fieldname_bbox in writer_fieldnames:
            raise click.ClickException(
                'Invalid value for "wkt-fieldname-bbox": Duplicate field: {0}'.
                format(wkt_fieldname_bbox))
        if wkt_fieldname_centroid in writer_fieldnames:
            raise click.ClickException(
                'Invalid value for "wkt-fieldname-centroid": Duplicate field: {0}'
                .format(wkt_fieldname_centroid))

        # Validation successful. Add the WKT fields for the CSV writer to the
        # list (safely).
        writer_fieldnames.append(wkt_fieldname_bbox)
        writer_fieldnames.append(wkt_fieldname_centroid)

    # Initialize the CSV writer for the standard-output stream.
    writer_kwargs = {
        'delimiter': writer_delimiter,
        'quotechar': writer_quotechar,
        # NOTE Use of 'csv.QUOTE_NONNUMERIC' ensures that UBID is quoted.
        'quoting': csv.QUOTE_NONNUMERIC,
    }
    writer = csv.DictWriter(click.get_text_stream('stdout'), writer_fieldnames,
                            **writer_kwargs)

    # NOTE If at least one row is written, then write the header row. Otherwise,
    # do not write the header row (to the standard-output stream).
    writer_writerheader_called = False

    # Initialize the CSV writer for the standard-error stream.
    err_writer = csv.DictWriter(click.get_text_stream('stderr'),
                                reader.fieldnames, **writer_kwargs)

    # NOTE If at least one row is written, then write the header row. Otherwise,
    # do not write the header row (to the standard-error stream).
    err_writer_writeheader_called = False

    for row in reader:
        # Look-up the value of the field.
        reader_fieldname_value = row[reader_fieldname]

        try:
            # Parse the value of the field, assuming Well-known Text (WKT)
            # format, and then encode the result as a UBID.
            writer_fieldname_value = codec_module.encode(
                *buildingid.wkt.parse(reader_fieldname_value),
                codeLength=code_length)

            if wkt:
                # Decode the UBID.
                writer_fieldname_value_CodeArea = codec_module.decode(
                    writer_fieldname_value)

                # Encode the UBID bounding box and centroid as WKT.
                row[wkt_fieldname_bbox] = str(
                    shapely.geometry.box(
                        writer_fieldname_value_CodeArea.longitudeLo,
                        writer_fieldname_value_CodeArea.latitudeLo,
                        writer_fieldname_value_CodeArea.longitudeHi,
                        writer_fieldname_value_CodeArea.latitudeHi))
                row[wkt_fieldname_centroid] = str(
                    shapely.geometry.box(
                        writer_fieldname_value_CodeArea.child.longitudeLo,
                        writer_fieldname_value_CodeArea.child.latitudeLo,
                        writer_fieldname_value_CodeArea.child.longitudeHi,
                        writer_fieldname_value_CodeArea.child.latitudeHi))
        except:
            # If an exception is raised (and caught), then write the CSV header
            # row (to the standard-error stream).
            if not err_writer_writeheader_called:
                # Set the flag.
                err_writer_writeheader_called = True

                err_writer.writeheader()

            # Write the row (to the standard-error stream).
            err_writer.writerow(row)
        else:
            # Set the value of the field.
            row[writer_fieldname] = writer_fieldname_value

            # Write the CSV header row (to the standard-output stream.)
            if not writer_writerheader_called:
                # Set the flag.
                writer_writerheader_called = True

                writer.writeheader()

            # Write the row (to the standard output stream).
            writer.writerow(row)

    # Done!
    return
示例#46
0
def test_confirm_destructive_query_notty(executor):
    stdin = click.get_text_stream('stdin')
    assert stdin.isatty() is False

    sql = 'drop database foo;'
    assert confirm_destructive_query(sql) is None
示例#47
0
def cli(database, user, host, port, socket, password, dbname,
        version, verbose, prompt, logfile, defaults_group_suffix,
        defaults_file, login_path, auto_vertical_output, local_infile,
        ssl_ca, ssl_capath, ssl_cert, ssl_key, ssl_cipher,
        ssl_verify_server_cert, table, csv, warn, execute, myclirc, dsn,
        list_dsn, ssh_user, ssh_host, ssh_port, ssh_password,
        ssh_key_filename):
    """A MySQL terminal client with auto-completion and syntax highlighting.

    \b
    Examples:
      - mycli my_database
      - mycli -u my_user -h my_host.com my_database
      - mycli mysql://my_user@my_host.com:3306/my_database

    """

    if version:
        print('Version:', __version__)
        sys.exit(0)

    mycli = MyCli(prompt=prompt, logfile=logfile,
                  defaults_suffix=defaults_group_suffix,
                  defaults_file=defaults_file, login_path=login_path,
                  auto_vertical_output=auto_vertical_output, warn=warn,
                  myclirc=myclirc)
    if list_dsn:
        try:
            alias_dsn = mycli.config['alias_dsn']
        except KeyError as err:
            click.secho('Invalid DSNs found in the config file. '\
                'Please check the "[alias_dsn]" section in myclirc.',
                 err=True, fg='red')
            exit(1)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)
        for alias, value in alias_dsn.items():
            if verbose:
                click.secho("{} : {}".format(alias, value))
            else:
                click.secho(alias)
        sys.exit(0)
    # Choose which ever one has a valid value.
    database = dbname or database

    ssl = {
            'ca': ssl_ca and os.path.expanduser(ssl_ca),
            'cert': ssl_cert and os.path.expanduser(ssl_cert),
            'key': ssl_key and os.path.expanduser(ssl_key),
            'capath': ssl_capath,
            'cipher': ssl_cipher,
            'check_hostname': ssl_verify_server_cert,
            }

    # remove empty ssl options
    ssl = {k: v for k, v in ssl.items() if v is not None}

    dsn_uri = None

    # Treat the database argument as a DSN alias if we're missing
    # other connection information.
    if (mycli.config['alias_dsn'] and database and '://' not in database
            and not any([user, password, host, port, login_path])):
        dsn, database = database, ''

    if database and '://' in database:
        dsn_uri, database = database, ''

    if dsn:
        try:
            dsn_uri = mycli.config['alias_dsn'][dsn]
        except KeyError:
            click.secho('Could not find the specified DSN in the config file. '
                        'Please check the "[alias_dsn]" section in your '
                        'myclirc.', err=True, fg='red')
            exit(1)
        else:
            mycli.dsn_alias = dsn

    if dsn_uri:
        uri = urlparse(dsn_uri)
        if not database:
            database = uri.path[1:]  # ignore the leading fwd slash
        if not user:
            user = unquote(uri.username)
        if not password and uri.password is not None:
            password = unquote(uri.password)
        if not host:
            host = uri.hostname
        if not port:
            port = uri.port

    if not paramiko and ssh_host:
        click.secho(
            "Cannot use SSH transport because paramiko isn't installed, "
            "please install paramiko or don't use --ssh-host=",
            err=True, fg="red"
        )
        exit(1)

    ssh_key_filename = ssh_key_filename and os.path.expanduser(ssh_key_filename)

    mycli.connect(
        database=database,
        user=user,
        passwd=password,
        host=host,
        port=port,
        socket=socket,
        local_infile=local_infile,
        ssl=ssl,
        ssh_user=ssh_user,
        ssh_host=ssh_host,
        ssh_port=ssh_port,
        ssh_password=ssh_password,
        ssh_key_filename=ssh_key_filename
    )

    mycli.logger.debug('Launch Params: \n'
            '\tdatabase: %r'
            '\tuser: %r'
            '\thost: %r'
            '\tport: %r', database, user, host, port)

    #  --execute argument
    if execute:
        try:
            if csv:
                mycli.formatter.format_name = 'csv'
            elif not table:
                mycli.formatter.format_name = 'tsv'

            mycli.run_query(execute)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

    if sys.stdin.isatty():
        mycli.run_cli()
    else:
        stdin = click.get_text_stream('stdin')
        try:
            stdin_text = stdin.read()
        except MemoryError:
            click.secho('Failed! Ran out of memory.', err=True, fg='red')
            click.secho('You might want to try the official mysql client.', err=True, fg='red')
            click.secho('Sorry... :(', err=True, fg='red')
            exit(1)

        try:
            sys.stdin = open('/dev/tty')
        except (IOError, OSError):
            mycli.logger.warning('Unable to open TTY as stdin.')

        if (mycli.destructive_warning and
                confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            new_line = True

            if csv:
                mycli.formatter.format_name = 'csv'
            elif not table:
                mycli.formatter.format_name = 'tsv'

            mycli.run_query(stdin_text, new_line=new_line)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)
示例#48
0
def bounds(ctx, input, precision, indent, compact, projection, sequence,
           use_rs, geojson_type):
    """Write bounding boxes to stdout as GeoJSON for use with, e.g.,
    geojsonio

      $ rio bounds *.tif | geojsonio

    """
    import rasterio.warp
    verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1
    logger = logging.getLogger('rio')
    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    stdout = click.get_text_stream('stdout')

    # This is the generator for (feature, bbox) pairs.
    class Collection(object):
        def __init__(self):
            self._xs = []
            self._ys = []

        @property
        def bbox(self):
            return min(self._xs), min(self._ys), max(self._xs), max(self._ys)

        def __call__(self):
            for i, path in enumerate(input):
                with rasterio.open(path) as src:
                    bounds = src.bounds
                    xs = [bounds[0], bounds[2]]
                    ys = [bounds[1], bounds[3]]
                    if projection == 'geographic':
                        xs, ys = rasterio.warp.transform(
                            src.crs, {'init': 'epsg:4326'}, xs, ys)
                    if projection == 'mercator':
                        xs, ys = rasterio.warp.transform(
                            src.crs, {'init': 'epsg:3857'}, xs, ys)
                if precision >= 0:
                    xs = [round(v, precision) for v in xs]
                    ys = [round(v, precision) for v in ys]
                bbox = [min(xs), min(ys), max(xs), max(ys)]

                yield {
                    'type': 'Feature',
                    'bbox': bbox,
                    'geometry': {
                        'type':
                        'Polygon',
                        'coordinates': [[[xs[0], ys[0]], [xs[1], ys[0]],
                                         [xs[1], ys[1]], [xs[0], ys[1]],
                                         [xs[0], ys[0]]]]
                    },
                    'properties': {
                        'id': str(i),
                        'title': path,
                        'filename': os.path.basename(path)
                    }
                }

                self._xs.extend(bbox[::2])
                self._ys.extend(bbox[1::2])

    col = Collection()
    # Use the generator defined above as input to the generic output
    # writing function.
    try:
        with rasterio.drivers(CPL_DEBUG=verbosity > 2):
            write_features(stdout,
                           col,
                           sequence=sequence,
                           geojson_type=geojson_type,
                           use_rs=use_rs,
                           **dump_kwds)
        sys.exit(0)
    except Exception:
        logger.exception("Failed. Exception caught")
        sys.exit(1)
示例#49
0
def test(ctx):
    """ Test
    """
    stdin = click.get_text_stream('stdin').read()
    with click.progressbar(length=1, label='Unzipping archive') as count:
        click.echo(f'{count} {stdin}')
示例#50
0
def dump(ctx, input, encoding, precision, indent, compact, record_buffered,
         ignore_errors, with_ld_context, add_ld_context_item, layer):
    """Dump a dataset either as a GeoJSON feature collection (the default)
    or a sequence of GeoJSON features."""

    logger = logging.getLogger(__name__)
    sink = click.get_text_stream('stdout')

    dump_kwds = {'sort_keys': True}
    if indent:
        dump_kwds['indent'] = indent
    if compact:
        dump_kwds['separators'] = (',', ':')
    item_sep = compact and ',' or ', '

    open_kwds = {}
    if encoding:
        open_kwds['encoding'] = encoding
    if layer:
        open_kwds['layer'] = layer

    def transformer(crs, feat):
        tg = partial(transform_geom,
                     crs,
                     'EPSG:4326',
                     antimeridian_cutting=True,
                     precision=precision)
        feat['geometry'] = tg(feat['geometry'])
        return feat

    try:
        with fiona.open(input, **open_kwds) as source:
            meta = source.meta
            meta['fields'] = dict(source.schema['properties'].items())

            if record_buffered:
                # Buffer GeoJSON data at the feature level for smaller
                # memory footprint.
                indented = bool(indent)
                rec_indent = "\n" + " " * (2 * (indent or 0))

                collection = {
                    'type': 'FeatureCollection',
                    'fiona:schema': meta['schema'],
                    'fiona:crs': meta['crs'],
                    'features': []
                }
                if with_ld_context:
                    collection['@context'] = helpers.make_ld_context(
                        add_ld_context_item)

                head, tail = json.dumps(collection, **dump_kwds).split('[]')

                sink.write(head)
                sink.write("[")

                itr = iter(source)

                # Try the first record.
                try:
                    i, first = 0, next(itr)
                    first = transformer(first)
                    if with_ld_context:
                        first = helpers.id_record(first)
                    if indented:
                        sink.write(rec_indent)
                    sink.write(
                        json.dumps(first,
                                   **dump_kwds).replace("\n", rec_indent))
                except StopIteration:
                    pass
                except Exception as exc:
                    # Ignoring errors is *not* the default.
                    if ignore_errors:
                        logger.error(
                            "failed to serialize file record %d (%s), "
                            "continuing", i, exc)
                    else:
                        # Log error and close up the GeoJSON, leaving it
                        # more or less valid no matter what happens above.
                        logger.critical(
                            "failed to serialize file record %d (%s), "
                            "quiting", i, exc)
                        sink.write("]")
                        sink.write(tail)
                        if indented:
                            sink.write("\n")
                        raise

                # Because trailing commas aren't valid in JSON arrays
                # we'll write the item separator before each of the
                # remaining features.
                for i, rec in enumerate(itr, 1):
                    rec = transformer(rec)
                    try:
                        if with_ld_context:
                            rec = helpers.id_record(rec)
                        if indented:
                            sink.write(rec_indent)
                        sink.write(item_sep)
                        sink.write(
                            json.dumps(rec,
                                       **dump_kwds).replace("\n", rec_indent))
                    except Exception as exc:
                        if ignore_errors:
                            logger.error(
                                "failed to serialize file record %d (%s), "
                                "continuing", i, exc)
                        else:
                            logger.critical(
                                "failed to serialize file record %d (%s), "
                                "quiting", i, exc)
                            sink.write("]")
                            sink.write(tail)
                            if indented:
                                sink.write("\n")
                            raise

                # Close up the GeoJSON after writing all features.
                sink.write("]")
                sink.write(tail)
                if indented:
                    sink.write("\n")

            else:
                # Buffer GeoJSON data at the collection level. The default.
                collection = {
                    'type': 'FeatureCollection',
                    'fiona:schema': meta['schema'],
                    'fiona:crs': meta['crs']
                }
                if with_ld_context:
                    collection['@context'] = helpers.make_ld_context(
                        add_ld_context_item)
                    collection['features'] = [
                        helpers.id_record(transformer(rec)) for rec in source
                    ]
                else:
                    collection['features'] = [
                        transformer(source.crs, rec) for rec in source
                    ]
                json.dump(collection, sink, **dump_kwds)

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#51
0
文件: main.py 项目: upsight/mycli
def cli(database, user, host, port, socket, password, dbname,
        version, prompt, logfile, defaults_group_suffix, defaults_file,
        login_path, auto_vertical_output, local_infile, ssl_ca, ssl_capath,
        ssl_cert, ssl_key, ssl_cipher, ssl_verify_server_cert, table, warn):

    if version:
        print('Version:', __version__)
        sys.exit(0)

    mycli = MyCli(prompt=prompt, logfile=logfile,
                  defaults_suffix=defaults_group_suffix,
                  defaults_file=defaults_file, login_path=login_path,
                  auto_vertical_output=auto_vertical_output, warn=warn)

    # Choose which ever one has a valid value.
    database = database or dbname

    ssl = {
            'ca': ssl_ca and os.path.expanduser(ssl_ca),
            'cert': ssl_cert and os.path.expanduser(ssl_cert),
            'key': ssl_key and os.path.expanduser(ssl_key),
            'capath': ssl_capath,
            'cipher': ssl_cipher,
            'check_hostname': ssl_verify_server_cert,
            }

    # remove empty ssl options
    ssl = dict((k, v) for (k, v) in ssl.items() if v is not None)
    if database and '://' in database:
        mycli.connect_uri(database, local_infile, ssl)
    else:
        mycli.connect(database, user, password, host, port, socket,
                      local_infile=local_infile, ssl=ssl)

    mycli.logger.debug('Launch Params: \n'
            '\tdatabase: %r'
            '\tuser: %r'
            '\thost: %r'
            '\tport: %r', database, user, host, port)

    if sys.stdin.isatty():
        mycli.run_cli()
    else:
        stdin = click.get_text_stream('stdin')
        stdin_text = stdin.read()

        try:
            sys.stdin = open('/dev/tty')
        except FileNotFoundError:
            mycli.logger.warning('Unable to open TTY as stdin.')

        if (mycli.destructive_warning and
                confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            results = mycli.sqlexecute.run(stdin_text)
            for result in results:
                title, cur, headers, status = result
                table_format = mycli.table_format if table else None
                output = format_output(title, cur, headers, None, table_format)
                for line in output:
                    click.echo(line)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)
示例#52
0
def get_input(ctx, param, value):
    if not value and not click.get_text_stream('stdin').isatty():
        return click.get_text_stream('stdin').read().strip()
    else:
        return value
示例#53
0
def indexes(obj, limit, pretty_print, connections, format, verbose, output):
    ctx = obj
    ctx['pretty_print'] = pretty_print
    ctx['connections'] = connections
    ctx['verbose'] = verbose

    # Hack - only support first URL for now
    url = obj['URLs'][0]
    r = requests.get(url)
    r.raise_for_status()

    is_db = 'db_name' in r.json()
    if is_db:
        db_name = r.json()['db_name']
        all_dbs = [db_name]
        ctx['URL'] = url.replace("/" + db_name, "")
    else:
        all_dbs_resp = requests.get(url + '/_all_dbs')
        all_dbs = map(lambda n: urllib.quote(n, safe=''), all_dbs_resp.json())
        ctx['URL'] = obj['URLs'][0]

    ctx['session'] = requests.session()

    index_stats = get_index_data(ctx, all_dbs)

    if limit > 0:
        sorted_index_stats = index_stats[:limit]
    else:
        sorted_index_stats = index_stats

    if len(output) > 0:
        format = 'csv'

    if format == 'json':
        click.echo(json.dumps(sorted_index_stats))
    else:
        table_headers = ([
            'db name', 'type', 'ddoc', 'index name', 'size', 'dbcopy',
            'reduce', 'custom_reduce'
        ])
        table = map(partial(format_stats, ctx), sorted_index_stats)

        if format == 'table':
            if limit > 0 and len(index_stats) > limit:
                click.echo('Showing {0} of {1} indexes'.format(
                    limit, len(index_stats)))
            else:
                click.echo('Showing all {0} indexes'.format(len(index_stats)))

            click.echo('\n')
            click.echo(tabulate(table, headers=table_headers))
        elif format == 'csv':
            if output is None:
                writer = csv.writer(click.get_text_stream('stdout'),
                                    dialect='excel')
                writer.writerow(table_headers)
                writer.writerows(table)
            else:
                with open(output, 'wb') as csvfile:
                    writer = csv.writer(csvfile, dialect='excel')
                    writer.writerow(table_headers)
                    writer.writerows(table)
def progressbar(*args, **kwargs):
    stderr_fobj = click.get_text_stream("stderr")

    return click.progressbar(*args, file=stderr_fobj, **kwargs)
示例#55
0
def read_entities_from_stdin(_ctx, _param, value):
    if not value and not click.get_text_stream('stdin').isatty():
        data = click.get_text_stream('stdin').read()
        return split_shell_arguments(data)
    else:
        return value
示例#56
0
def searcher(pattern, filename, flag_u, flag_c, flag_l, flag_s, flag_o, flag_n, stat):
    """
    """
    text_lines = click.get_text_stream('stdin')
    if filename:
        text_lines = click.open_file(filename, 'r')

    counter_of_matches = Counter()
    list_of_matches = list()
    lines_with_matches = 0

    # save all matches
    for l in text_lines:
        matches = re.findall(pattern, l)
        list_of_matches += matches
        lines_with_matches += (len(matches) > 0)
    counter_of_matches.update(list_of_matches)

    # flag -l : total count of LINES with at least one match
    if flag_l:
        click.echo(lines_with_matches)

    # flag --stats: statistics of matches
    elif stat:
        #
        output_stat_with_sorting_options(
            counter_of_matches.items(),
            len(list_of_matches),
            stat=stat,
            flag_s=flag_s,
            flag_o=flag_o,
        )

    # other flags
    else:
        # flags -u and -c: print total count of unique matches
        if flag_u and flag_c:
            click.echo(len(counter_of_matches.keys()))

        # flag -c: print total count of found matches
        elif flag_c:
            click.echo(len(list_of_matches))

        # flag -u: print unique matches only
        elif flag_u:
            out_data = counter_of_matches

            if flag_s or flag_o:
                output_data_with_sorting_options(
                    out_data.items(),
                    flag_s,
                    flag_o,
                )
            else:
                output_data(out_data)

        # flag -n: print first N matches
        elif flag_n:
            if 0 < flag_n < len(list_of_matches):
                out_data = list_of_matches[:flag_n]
            else:
                out_data = list_of_matches

            if flag_s or flag_o:
                output_data_with_sorting_options(
                    Counter(out_data).items(),
                    flag_s,
                    flag_o,
                )
            else:
                output_data(out_data)

        # no flag: print list of all matches
        else:

            if flag_s or flag_o:
                output_data_with_sorting_options(
                    counter_of_matches.items(),
                    flag_s,
                    flag_o,
                )
            else:
                output_data(list_of_matches)
示例#57
0
文件: main.py 项目: viveksinha/mycli
def cli(database, user, host, port, socket, password, dbname, version, prompt,
        logfile, defaults_group_suffix, defaults_file, login_path,
        auto_vertical_output, local_infile, ssl_ca, ssl_capath, ssl_cert,
        ssl_key, ssl_cipher, ssl_verify_server_cert, table, csv, warn, execute,
        myclirc):
    """A MySQL terminal client with auto-completion and syntax highlighting.

    \b
    Examples:
      - mycli my_database
      - mycli -u my_user -h my_host.com my_database
      - mycli mysql://my_user@my_host.com:3306/my_database

    """

    if version:
        print('Version:', __version__)
        sys.exit(0)

    mycli = MyCli(prompt=prompt,
                  logfile=logfile,
                  defaults_suffix=defaults_group_suffix,
                  defaults_file=defaults_file,
                  login_path=login_path,
                  auto_vertical_output=auto_vertical_output,
                  warn=warn,
                  myclirc=myclirc)

    # Choose which ever one has a valid value.
    database = database or dbname

    ssl = {
        'ca': ssl_ca and os.path.expanduser(ssl_ca),
        'cert': ssl_cert and os.path.expanduser(ssl_cert),
        'key': ssl_key and os.path.expanduser(ssl_key),
        'capath': ssl_capath,
        'cipher': ssl_cipher,
        'check_hostname': ssl_verify_server_cert,
    }

    # remove empty ssl options
    ssl = {k: v for k, v in ssl.items() if v is not None}
    if database and '://' in database:
        mycli.connect_uri(database, local_infile, ssl)
    else:
        mycli.connect(database,
                      user,
                      password,
                      host,
                      port,
                      socket,
                      local_infile=local_infile,
                      ssl=ssl)

    mycli.logger.debug(
        'Launch Params: \n'
        '\tdatabase: %r'
        '\tuser: %r'
        '\thost: %r'
        '\tport: %r', database, user, host, port)

    #  --execute argument
    if execute:
        try:
            if csv:
                mycli.formatter.format_name = 'csv'
            elif not table:
                mycli.formatter.format_name = 'tsv'

            mycli.run_query(execute)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)

    if sys.stdin.isatty():
        mycli.run_cli()
    else:
        stdin = click.get_text_stream('stdin')
        stdin_text = stdin.read()

        try:
            sys.stdin = open('/dev/tty')
        except FileNotFoundError:
            mycli.logger.warning('Unable to open TTY as stdin.')

        if (mycli.destructive_warning
                and confirm_destructive_query(stdin_text) is False):
            exit(0)
        try:
            new_line = True

            if csv:
                mycli.formatter.format_name = 'csv'
                new_line = False
            elif not table:
                mycli.formatter.format_name = 'tsv'

            mycli.run_query(stdin_text, new_line=new_line)
            exit(0)
        except Exception as e:
            click.secho(str(e), err=True, fg='red')
            exit(1)
示例#58
0
    def run(self, **kwargs):
        """Run SDK component server.

        Calling this method checks command line arguments before
        component server starts, and then blocks the caller script
        until component server finishes.

        """

        self._args = kwargs

        # Standard input is read only when action name is given
        message = {}
        if kwargs.get('action'):
            contents = click.get_text_stream('stdin', encoding='utf8').read()

            # Add JSON file contents to message
            try:
                message['payload'] = json.loads(contents)
            except:
                LOG.exception('Stdin input value is not valid JSON')
                os._exit(EXIT_ERROR)

            # Add action name to message
            message['action'] = kwargs['action']

        # When compact mode is enabled use long payload field names
        if not self.compact_names:
            katana.payload.DISABLE_FIELD_MAPPINGS = True

        # Create component server
        server = self.server_cls(
            self.callbacks,
            self.args,
            debug=self.debug,
            source_file=self.source_file,
            error_callback=self.__error_callback,
        )

        # Initialize component logging only when `quiet` argument is False, or
        # if an input message is given init logging only when debug is True
        if not kwargs.get('quiet'):
            setup_katana_logging(
                self.server_cls.get_type(),
                server.component_name,
                server.component_version,
                server.framework_version,
                logging.DEBUG if self.debug else logging.INFO,
            )

        LOG.debug('Using PID: "%s"', os.getpid())

        if not message:
            # Create channel for TCP or IPC conections
            if self.tcp_port:
                channel = tcp('127.0.0.1:{}'.format(self.tcp_port))
            else:
                # Abstract domain unix socket
                channel = 'ipc://{}'.format(self.socket_name)

        # By default exit successfully
        exit_code = EXIT_OK

        # Call startup callback
        if self.__startup_callback:
            LOG.info('Running startup callback ...')
            try:
                self.__startup_callback(self.component)
            except:
                LOG.exception('Startup callback failed')
                LOG.error('Component failed')
                exit_code = EXIT_ERROR

        # Run component server
        if exit_code != EXIT_ERROR:
            try:
                # Create a greenlet to run server
                if message:
                    greenlet = gevent.spawn(server.process_input, message)
                else:
                    greenlet = gevent.spawn(server.listen, channel)

                # Listen for SIGTERM and SIGINT
                gevent.signal(signal.SIGTERM, greenlet.kill)
                gevent.signal(signal.SIGINT, greenlet.kill)
                # Run server
                greenlet.join()
            except KatanaError as err:
                exit_code = EXIT_ERROR
                LOG.error(err)
                LOG.error('Component failed')
            except zmq.error.ZMQError as err:
                exit_code = EXIT_ERROR
                if err.errno == 98:
                    LOG.error('Address unavailable: "%s"', self.socket_name)
                else:
                    LOG.error(err.strerror)

                LOG.error('Component failed')
            except Exception as exc:
                exit_code = EXIT_ERROR
                LOG.exception('Component failed')

        # Call shutdown callback
        if self.__shutdown_callback:
            LOG.info('Running shutdown callback ...')
            try:
                self.__shutdown_callback(self.component)
            except:
                LOG.exception('Shutdown callback failed')
                LOG.error('Component failed')
                exit_code = EXIT_ERROR

        if exit_code == EXIT_OK:
            LOG.info('Operation complete')

        os._exit(exit_code)
def bounds(ctx, precision, explode, with_id, with_obj, use_rs):
    """Print the bounding boxes of GeoJSON objects read from stdin.

    Optionally explode collections and print the bounds of their
    features.

    To print identifiers for input objects along with their bounds
    as a {id: identifier, bbox: bounds} JSON object, use --with-id.

    To print the input objects themselves along with their bounds
    as GeoJSON object, use --with-obj. This has the effect of updating
    input objects with {id: identifier, bbox: bounds}.
    """
    verbosity = (ctx.obj and ctx.obj['verbosity']) or 2
    logger = logging.getLogger('fio')
    stdin = click.get_text_stream('stdin')
    stdout = click.get_text_stream('stdout')
    try:
        source = obj_gen(stdin)
        for i, obj in enumerate(source):
            obj_id = obj.get('id', 'collection:' + str(i))
            xs = []
            ys = []
            features = obj.get('features') or [obj]
            for j, feat in enumerate(features):
                feat_id = feat.get('id', 'feature:' + str(i))
                w, s, e, n = fiona.bounds(feat)
                if precision > 0:
                    w, s, e, n = (round(v, precision)
                                  for v in (w, s, e, n))
                if explode:
                    if with_id:
                        rec = {
                            'parent': obj_id,
                            'id': feat_id,
                            'bbox': (w, s, e, n)}
                    elif with_obj:
                        feat.update(parent=obj_id, bbox=(w, s, e, n))
                        rec = feat
                    else:
                        rec = (w, s, e, n)
                    if use_rs:
                        click.echo(u'\u001e', nl=False)
                    click.echo(json.dumps(rec))
                else:
                    xs.extend([w, e])
                    ys.extend([s, n])
            if not explode:
                w, s, e, n = (min(xs), min(ys), max(xs), max(ys))
                if with_id:
                    rec = {'id': obj_id, 'bbox': (w, s, e, n)}
                elif with_obj:
                    obj.update(id=obj_id, bbox=(w, s, e, n))
                    rec = obj
                else:
                    rec = (w, s, e, n)
                if use_rs:
                    click.echo(u'\u001e', nl=False)
                click.echo(json.dumps(rec))

    except Exception:
        logger.exception("Exception caught during processing")
        raise click.Abort()
示例#60
0
              type=click.Choice(
                  ["verbose", "one-click-link", "short-link", "id"]),
              default="one-click-link",
              help="Which type of link to return")
@click.option("--outformat",
              "-f",
              type=click.Choice(["plain", "json"]),
              default="json",
              help="Return output in plain text or json format.")
@click.option("--expires",
              "-e",
              type=click.Choice(["1h", "1d", "1w"]),
              default="1h")
@click.argument("secret",
                type=click.STRING,
                default=click.get_text_stream('stdin'))
def send(secret, expires, outmode, outformat):
    """Submit secret to server. Optionally pipe secret via stdin.

    """
    backend = os.environ.get("YOPASS_BACKEND_URL")
    if backend is None:
        click.echo("""YOPASS_BACKEND_URL is not defined, run export
            YOPASS_BACKEND_URL=<your backend> first""")
        exit(1)
    frontend = os.environ.get("YOPASS_FRONTEND_URL")
    if frontend is None and outmode != "id":
        click.echo("""YOPASS_FRONTEND_URL is not defined, run export
            YOPASS_FRONTEND_URL=<your frontend> first""")
        exit(1)
    try: