Beispiel #1
0
def main(profile, dry_run, router):
    """Live migrate ROUTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate HV to new POD'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    router = co.get_system_vm(name=router)
    if not router:
        sys.exit(1)

    source_host = co.get_host(id=router['hostid'])
    if not source_host:
        sys.exit(1)

    cluster = co.get_cluster(id=source_host['clusterid'])
    if not cluster:
        sys.exit(1)

    destination_host = cluster.find_migration_host(router)
    if not destination_host:
        sys.exit(1)

    if not router.migrate(destination_host):
        sys.exit(1)
Beispiel #2
0
def main(address, port, provider, debug):
    # Silence the loggers before we print the port
    # We wouldn't be able to connect from emacs via epc otherwise
    epclogger = logging.getLogger("epc")
    click_log.basic_config(epclogger)
    epclogger.setLevel(logger.level)

    with logging_level([logger, epclogger], logging.ERROR):
        server = reddel_server.Server((address, port))
        server.print_port()

    provider = provider + ("reddel_server.RedBaronProvider", )
    logger.info("setting the following providers %s", provider)

    providers = load_providers(provider, server, debug)
    logger.debug("all providers initialized: %s", providers)

    logger.debug("creating chained provider")
    chainedprovider = reddel_server.ChainedProvider(server,
                                                    providers=providers)
    server.set_provider(chainedprovider)

    logger.debug("serve forever")
    server.serve_forever()
    server.logger.info("server shutdown")
Beispiel #3
0
def main(profile, destination_dc, dry_run, host, cluster):
    """Migrate all VMs on HOST to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate HV to new POD'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    host = co.get_host(name=host)
    if not host:
        sys.exit(1)

    for vm in host.get_all_vms() + host.get_all_project_vms():
        live_migrate(co=co,
                     cs=cs,
                     cluster=cluster,
                     vm_name=vm['name'],
                     destination_dc=destination_dc,
                     add_affinity_group=None,
                     is_project_vm=None,
                     zwps_to_cwps=None,
                     log_to_slack=log_to_slack,
                     dry_run=dry_run)
Beispiel #4
0
def cli(ctx, config_file, host, port, token, verbose):
    """Aria2 RPC Client"""
    logging.basicConfig(level=LOG_LEVELS.get(verbose, logging.INFO))
    # init logger
    logger = logging.getLogger(__name__)
    click_log.basic_config(logger)

    guess_paths = [
        Path('.'),  # current dir ./
        Path.home() / DEFAULT_CONFIG_PATH,  # ~/.aria2/
        Path(__file__).resolve().parent /
        DEFAULT_CONFIG_PATH,  # ${BIN_PATH}/.aria2/
    ]
    config = load_aria2_config(config_file, guess_paths=guess_paths)

    ctx.ensure_object(dict)
    ctx.obj['host'] = host
    ctx.obj['port'] = port
    ctx.obj['token'] = token
    ctx.obj['config'] = config
    ctx.obj['guess_paths'] = guess_paths
    ctx.obj['aria2'] = aria2p.API(
        aria2p.Client(host=host or config.get('host'),
                      port=port or config.get('port'),
                      secret=token or config.get('token')))
    ctx.obj['logger'] = logger
Beispiel #5
0
def main(ctx, name, hostname):
    for logger in LOGGERS:
        click_log.basic_config(logger)
    if name and hostname:
        raise click.BadParameter(
            "Either name or hostname can be set not both.")
    ctx.obj = {"name": name, "hostname": hostname}
Beispiel #6
0
def run(config_file, host, port, token, interval, verbose):
    max_level = max(LOG_LEVELS, key=int)
    logging.basicConfig(level=LOG_LEVELS.get(min(verbose, max_level),
                                             logging.INFO),
                        format=LOG_FORMAT)
    logger = logging.getLogger(__name__)
    logger.setLevel(LOG_LEVELS.get(min(verbose, max_level), logging.INFO))
    click_log.basic_config(logger)
    # requests logger
    if verbose < 3:
        logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)

    config = load_aria2_config(config_file)

    aria2 = aria2p.API(
        aria2p.Client(host=host or config.get('host'),
                      port=port or config.get('port'),
                      secret=token or config.get('token')))

    register_single()

    aria2_queue_manager = Aria2QueueManager(aria2, exit_event)

    logger.debug('Main loop.')
    while not exit_event.is_set():
        try:
            aria2_queue_manager.run()
        except requests.exceptions.ConnectTimeout as e:
            logger.warning('Connect Timeout: %s', str(e))
        logger.info(f'sleep {interval}s.')
        exit_event.wait(interval)
    click.secho('Program exit.', fg='green')
Beispiel #7
0
def main(ctx, device, baudrate, flow_control):
    ctx.obj = {
        CONF_DEVICE: device,
        CONF_DEVICE_BAUDRATE: baudrate,
        CONF_FLOW_CONTROL: flow_control,
    }
    click_log.basic_config()
Beispiel #8
0
def main(ctx, edit, create):
    """
    Simple command line tool to help manage environment variables stored in a S3-like system. Facilitates editing text
    files remotely stored, as well as downloading and uploading files.
    """
    # configs this module logger to behave properly
    # logger messages will go to stderr (check __init__.py/patch.py)
    # client output should be generated with click.echo() to go to stdout
    try:
        click_log.basic_config('s3conf')
        logger.debug('Running main entrypoint')
        if edit:
            if ctx.invoked_subcommand is None:
                logger.debug('Using config file %s', config.LOCAL_CONFIG_FILE)
                config.ConfigFileResolver(
                    config.LOCAL_CONFIG_FILE).edit(create=create)
                return
            else:
                raise UsageError(
                    'Edit should not be called with a subcommand.')
        # manually call help in case no relevant settings were defined
        if ctx.invoked_subcommand is None:
            click.echo(main.get_help(ctx))
    except exceptions.FileDoesNotExist as e:
        raise UsageError(
            'The file {} does not exist. Try "-c" option if you want to create it.'
            .format(str(e)))
def main(profile, hostname, name_filter, non_running, plain_display):
    """Lists HA workers"""

    click_log.basic_config()

    logging.info(
        list_ha_workers(profile, hostname, name_filter, non_running,
                        plain_display))
def main(profile, dry_run, skip_version, older_then, skip_zone, only_zone, restart_agent):
    """Destroy SVM per zone and waits for a new one"""

    click_log.basic_config()

    if dry_run:
        logging.info('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run)
    if older_then:
        older_then = datetime.strptime(f"{older_then}T00:00:00+0200", '%Y%m%dT%H:%M:%S%z')

    svms = co.get_all_systemvms()
    zones = defaultdict(list)
    for svm in svms:
        if only_zone and co.get_host(name=svm['name']).get('zonename') != only_zone:
            continue
        if skip_zone and co.get_host(name=svm['name']).get('zonename') == skip_zone:
            continue
        if skip_version and co.get_host(name=svm['name']).get('version') == skip_version:
            continue
        if older_then and datetime.strptime(svm['created'], '%Y-%m-%dT%H:%M:%S%z') > older_then:
            continue
        zones[svm['zonename']].append(svm)

    for zone in zones:
        logging.info(f"Processing zone: {zone}")
        for vm in zones[zone]:
            if not vm.destroy():
                sys.exit(1)

            up = list()
            down = list(zones[zone])
            retries = 60
            zone_id = vm['zoneid']
            while len(up) < len(zones[zone]) or len(down) > 0:
                if not dry_run:
                    time.sleep(5)

                try:
                    systemvms = {x['name']: x for x in co.get_all_systemvms(zoneid=zone_id)}
                    host_status = {k: co.get_host(name=k) for k in systemvms}
                    up = list(filter(lambda x: x and x['state'] == 'Up' and x['resourcestate'] == 'Enabled', host_status.values()))
                    down = list(filter(lambda x: x and x['state'] != 'Up' and x['resourcestate'] == 'Enabled', host_status.values()))
                    retries -= 1
                    if retries == 0:
                        break
                    if down and restart_agent:
                        for d in down:
                            svm_object = list(filter(lambda x: x and x['name'].lower() == d['name'], systemvms.values()))[0]
                            svm_object.restart_agent()
                except KeyError:
                    # Ignore keyerror, systemvm is still not available as host
                    pass

            if retries == 0:
                logging.error("Exceeded retry count waiting for new systemvm")
                sys.exit(1)
Beispiel #11
0
def entry():
    # Move flags to after the command
    ARGS = sorted(sys.argv[1:], key=lambda x: 1 if x.startswith("--") else -1)

    if ARGS and not ARGS[0].startswith("print-"):
        # print-* command output should not be polluted with logging.
        click_log.basic_config()

    main(args=ARGS)
Beispiel #12
0
def main(profile, dry_run, instance_id):
    """Kills all jobs related to INSTANCE_ID"""

    click_log.basic_config()

    if dry_run:
        logging.warning('Running in dry-run mode, will only show changes')

    kill_jobs(profile, dry_run, instance_id)
Beispiel #13
0
def get_root_logger():
    logger = metricq.get_logger()
    logger.setLevel(logging.WARN)
    click_log.basic_config(logger)
    # logger.handlers[0].formatter = logging.Formatter(
    #     fmt="%(asctime)s [%(levelname)-8s] [%(name)-20s] %(message)s"
    # )

    return logger
def main(profile, shutdown, skip_disable, dry_run, host):
    """Empty HOST by migrating VMs to another host in the same cluster."""

    click_log.basic_config()

    if dry_run:
        logging.info('Running in dry-run mode, will only show changes')

    try:
        logging.info(empty_host(profile, shutdown, skip_disable, dry_run, host))
    except RuntimeError as err:
        logging.error(err)
        sys.exit(1)
def main():
    """Run the click app."""
    click_log.basic_config(logger)

    # https://click.palletsprojects.com/en/7.x/exceptions/#what-if-i-don-t-want-that
    # TODO: do we need this for easier testing? or is invoke catch_exceptions=False enough?
    standalone_mode = os.environ.get("ARGOBYTES_CLICK_STANDALONE", "1") == "1"

    cli(
        obj={},
        auto_envvar_prefix="ARGOBYTES",
        prog_name="argobytes",
        standalone_mode=standalone_mode,
    )
Beispiel #16
0
def no_debug_output(request):
    logger = click_log.basic_config('vdirsyncer')
    logger.setLevel(logging.WARNING)
    old = logger.level

    def teardown():
        logger.setLevel(old)

    request.addfinalizer(teardown)
Beispiel #17
0
def hs_cli(ctx, verbose, cluster, config_file):
    click_log.basic_config(logging.root)
    # custom handler to print output into stdout
    log_handler = StdoutLogHandler()
    log_handler.formatter = click_log.ColorFormatter()
    logging.root.handlers = [log_handler]
    if verbose:
        logging.root.setLevel(logging.DEBUG)
    else:
        logging.root.setLevel(logging.INFO)
    logging.debug("CLI root command (hs_cli) initialized")
    if config_file:
        ctx.obj = config_file
    else:
        # note: didn't set as default in click,
        # since CONFIG_FILE is a special case and can be missing
        # and created later using `hs cluster add` command
        ctx.obj = CONFIG_PATH
    logging.debug(f"Working with {ctx.obj} config file")
def main(profile, uuid, network_uuid, dry_run, vpc):
    """VPC restart script"""

    click_log.basic_config()

    log_to_slack = True

    if uuid and network_uuid:
        logging.error('You can not specify --uuid and --network-uuid together')
        sys.exit(1)

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    if uuid:
        vpc = co.get_vpc(id=vpc)
    elif network_uuid:
        network = co.get_network(id=vpc)
        if not network:
            sys.exit(1)

        vpc = co.get_vpc(id=network['vpcid'])
    else:
        vpc = co.get_vpc(name=vpc)

    if not vpc:
        sys.exit(1)

    logging.slack_title = 'Domain'
    logging.slack_value = vpc['domain']
    logging.instance_name = vpc['name']
    logging.zone_name = vpc['zonename']

    if not vpc.restart():
        sys.exit(1)

    logging.info(
        f"Successfully restarted VPC '{vpc['name']}' ({vpc['id']}) with clean up",
        log_to_slack)
def main(profile, zwps_to_cwps, add_affinity_group, destination_dc, is_project_vm,
         skip_within_cluster, dry_run, vm, cluster):
    """Live migrate VM to CLUSTER"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate VM'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    # Work around migration issue: first in the same pod to limit possible hiccup
    vm_instance = co.get_vm(name=vm, is_project_vm=is_project_vm)

    if not vm_instance:
        logging.error(f"Cannot migrate, VM '{vm}' not found!")
        sys.exit(1)

    if not vm_instance['state'] == 'Running':
        logging.error(f"Cannot migrate, VM has has state: '{vm_instance['state']}'")
        sys.exit(1)

    source_host = co.get_host(id=vm_instance['hostid'])
    source_cluster = co.get_cluster(id=source_host['clusterid'])
    if not skip_within_cluster:
        if not vm_instance.migrate_within_cluster(vm=vm_instance, source_cluster=source_cluster,
                                                  source_host=source_host, instancename=vm_instance):
            logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
            sys.exit(1)

    if not live_migrate(co, cs, cluster, vm, destination_dc, add_affinity_group, is_project_vm, zwps_to_cwps,
                        log_to_slack, dry_run):
        logging.info(f"VM Migration failed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
        sys.exit(1)
    logging.info(f"VM Migration completed at {datetime.now().strftime('%d-%m-%Y %H:%M:%S')}\n")
def main(profile, all_databases, mac_address):
    """Shows who uses MAC_ADDRESS"""

    click_log.basic_config()

    if not (profile or all_databases):
        logging.error("You must specify --profile or --all-databases")
        sys.exit(1)

    if profile and all_databases:
        logging.error(
            "The --profile and --all-databases options can't be used together")
        sys.exit(1)

    try:
        result = who_has_this_mac(profile, all_databases, mac_address)
    except RuntimeError as err:
        logging.error(err)
        sys.exit(1)

    logging.info(result)
def main(profile, max_iops, zwps_to_cwps, is_project_vm, dry_run, vm,
         storage_pool):
    """Live migrate VM volumes to STORAGE_POOL"""

    click_log.basic_config()

    log_to_slack = True
    logging.task = 'Live Migrate VM Volumes'
    logging.slack_title = 'Domain'

    if dry_run:
        log_to_slack = False
        logging.warning('Running in dry-run mode, will only show changes')

    co = CosmicOps(profile=profile, dry_run=dry_run, log_to_slack=log_to_slack)

    cs = CosmicSQL(server=profile, dry_run=dry_run)

    if not live_migrate_volumes(storage_pool, co, cs, dry_run, is_project_vm,
                                log_to_slack, max_iops, vm, zwps_to_cwps):
        sys.exit(1)
Beispiel #22
0
def setup_logging(verbosity):
    # explicitly set to stderr just in case
    # this is the new default for click_log it seems
    core.ClickHandler._use_stderr = True
    # adding color to INFO log messages as well
    core.ColorFormatter.colors['info'] = dict(fg='green')

    class NamedColorFormatter(core.ColorFormatter):
        def format(self, record):
            if not record.exc_info:
                level = record.levelname.lower()
                msg = record.getMessage()
                if level in self.colors:
                    prefix = click.style('{} [{}]: '.format(level, record.name),
                                         **self.colors[level])
                    msg = '\n'.join(prefix + x for x in msg.splitlines())
                return msg
            return logging.Formatter.format(self, record)

    # replace the formatter with our formatter so that it prints the logger name
    core._default_handler.formatter = NamedColorFormatter()

    if verbosity >= 3:
        log_level = logging.DEBUG
        # set the root logger and any other internal loggers to debug as well if -vvv is provided
        click_log.basic_config().setLevel(log_level)
    elif verbosity == 2:
        log_level = logging.DEBUG
    elif verbosity == 1:
        log_level = logging.INFO
    else:
        log_level = logging.WARNING
    click_log.basic_config('polyswarm').setLevel(log_level)
    click_log.basic_config('polyswarm_api').setLevel(log_level)
Beispiel #23
0
def main(
    ctx: click.Context,
    confluence_api: str,
    user: str,
    key: str,
    space: str,
    verbose: int,
) -> None:
    """Tools for managing and publishing to a Confleunce Cloud wiki using Markdown files."""
    logger = click_log.basic_config()
    logger.setLevel(__verbosity_count_to_log_level(verbose))
    context = CliContext()
    context.confluence = Confluence(confluence_api, user, key, space)
    ctx.obj = context
Beispiel #24
0
def cli(ctx: click.Context, config, verbose: int) -> None:
    """This is the main command group for the click based CLI.

    Args:
        ctx (click.Context): Click context object.
        config ([type]): User adjustments to configuration for this run.
        verbose ([type]): User selected verbosity for this run.
    """
    logger = click_log.basic_config()
    logger.setLevel(__verbosity_count_to_log_level(verbose))

    context = CliContext()
    ctx.obj = context
    for key, value in config:
        ctx.obj.set_config(key, value)
Beispiel #25
0
    get_arn_type_details,
    get_arn_types_for_service,
    get_raw_arns_for_service,
)
from policy_sentry.querying.actions import (
    get_actions_for_service, get_actions_with_access_level, get_action_data,
    get_actions_matching_condition_key,
    get_actions_with_arn_type_and_access_level, get_actions_matching_arn_type)
from policy_sentry.querying.conditions import (
    get_condition_keys_for_service,
    get_condition_key_details,
)
from policy_sentry.shared.constants import DATASTORE_FILE_PATH, LOCAL_DATASTORE_FILE_PATH

logger = logging.getLogger(__name__)
click_log.basic_config(logger)
iam_definition_path = DATASTORE_FILE_PATH


@click.group()
def query():
    """Allow users to query the IAM tables from command line"""


@query.command(
    short_help=
    "Query the action table based on access levels, conditions, or actions that only support wildcard "
    "resources.")
@click.option("--service",
              type=str,
              required=True,
Beispiel #26
0
def setup_logging():
    click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG)
Beispiel #27
0
import mckit as mk
from mckit.utils.resource import filename_resolver
from mckit.cli.runner import mckit,  __version__
from mckit.cli.commands.common import get_default_output_directory
from mckit.parser.mcnp_section_parser import is_comment_text
from mckit.utils.io import MCNP_ENCODING

# skip the pylint warning on fixture names
# pylint: disable=redefined-outer-name

# skip the pylint warning on long names: test names should be descriptive
# pylint: disable=invalid-name


test_logger = logging.getLogger(__name__)
click_log.basic_config(test_logger)
test_logger.level = logging.INFO


@pytest.fixture
def runner():
    return CliRunner()


data_filename_resolver = filename_resolver('tests')


def test_when_there_is_no_args(runner):
    with runner.isolated_filesystem():
        result = runner.invoke(mckit, args=['split'], catch_exceptions=False)
        assert result.exit_code != 0, "Should fail when no arguments provided"
"""Home Assistant CLI (hass-cli)."""
import logging
import os
import sys
from typing import List, Optional, Union, cast

import click
from click.core import Command, Context, Group
import click_log
import homeassistant_cli.autocompletion as autocompletion
from homeassistant_cli.config import Configuration
import homeassistant_cli.const as const
from homeassistant_cli.helper import debug_requests_on, to_tuples

click_log.basic_config()

_LOGGER = logging.getLogger(__name__)

CONTEXT_SETTINGS = dict(auto_envvar_prefix='HOMEASSISTANT')

pass_context = click.make_pass_decorator(  # pylint: disable=invalid-name
    Configuration, ensure=True
)


def run() -> None:
    """Run entry point.

    Wraps click for full control over exception handling in Click.
    """
    # A hack to see if exception details should be printed.
Beispiel #29
0
import click_log
from launchpadlib.launchpad import Launchpad
from toolkit import CONTEXT_SETTINGS

# Workdir and other click defaults for this script
WORK_DIR_OPT = ['-w', '--workdir']
WORK_DIR_OPT_PARAMS = dict(default='/tmp/bugtriage',
                           type=click.Path(exists=True, file_okay=False,
                                           dir_okay=True, writable=True,
                                           resolve_path=True),
                           help='Work directory: Temporary workspace folder',
                           show_default=True)

# CODE STARTS HERE
LOGGER = logging.getLogger(__name__)
click_log.basic_config(LOGGER)

# STATIC VARS
STATES = ['New']
ORDERBY = '-datecreated'


@click.command(context_settings=CONTEXT_SETTINGS)
@click_log.simple_verbosity_option(LOGGER)
@click.option(*WORK_DIR_OPT, **WORK_DIR_OPT_PARAMS)
def generate_page(**kwargs):
    """ Generate a bug triage page to help the triaging process
    """

    cache_folder = kwargs['workdir'] + '/cache/'
    if not os.path.lexists(cache_folder):
Beispiel #30
0
# -*- coding: utf-8 -*-
import os
import sys
import click
"""Log"""
import logging
import click_log
logger = click_log.basic_config(__name__)
from .logger.logger import Logger

CONTEXT_SETTINGS = dict(auto_envvar_prefix='COMPLEX')


class Context(object):
    def __init__(self):
        self.debug = False
        self.logLevel = 0
        self.sharedModule = None
        self.baseAPI = 'http://localhost:3000'
        self.log = Logger(logger)


pass_context = click.make_pass_decorator(Context, ensure=True)
cmd_folder = os.path.abspath(
    os.path.join(os.path.dirname(__file__), 'commands'))


class ComplexCLI(click.MultiCommand):
    def list_commands(self, ctx):
        rv = []
        for filename in os.listdir(cmd_folder):
Beispiel #31
0
from tabulate import tabulate

from diffy.filters import AWSFilter

from diffy.config import CONFIG, configure_swag
from diffy.common.utils import install_plugins
from diffy._version import __version__
from diffy.plugins.base import plugins
from diffy.core import analysis, baseline
from diffy.exceptions import DiffyException
from diffy_cli.utils.dynamic_click import CORE_COMMANDS, func_factory, params_factory

log = logging.getLogger("diffy")
log.addFilter(AWSFilter())

click_log.basic_config(log)

install_plugins()


def plugin_command_factory():
    """Dynamically generate plugin groups for all plugins, and add all basic command to it"""
    for p in plugins.all():
        plugin_name = p.slug
        help = f"Options for '{plugin_name}'"
        group = click.Group(name=plugin_name, help=help)
        for name, description in CORE_COMMANDS.items():
            callback = func_factory(p, name)
            pretty_opt = click.Option(
                ["--pretty/--not-pretty"], help="Output a pretty version of the JSON"
            )
Beispiel #32
0
def no_debug_output(request):
    logger = click_log.basic_config('vdirsyncer')
    logger.setLevel(logging.WARNING)
Beispiel #33
0
Datei: cli.py Projekt: pdav/khal
from . import __version__, controllers, khalendar
from .exceptions import FatalError
from .settings import InvalidSettingsError, get_config
from .settings.exceptions import NoConfigFile
from .terminal import colored

try:
    from setproctitle import setproctitle
except ImportError:
    def setproctitle(x):
        pass


logger = logging.getLogger('khal')
click_log.basic_config('khal')

days_option = click.option('--days', default=None, type=int, help='How many days to include.')
week_option = click.option('--week', '-w', help='Include all events in one week.', is_flag=True)
events_option = click.option('--events', default=None, type=int, help='How many events to include.')
dates_arg = click.argument('dates', nargs=-1)


def time_args(f):
    return dates_arg(events_option(week_option(days_option(f))))


def multi_calendar_select(ctx, include_calendars, exclude_calendars):
    if include_calendars and exclude_calendars:
        raise click.UsageError('Can\'t use both -a and -d.')
    # if not isinstance(include_calendars, tuple):
import pyudev
import subprocess
import sh
import json
from time import sleep
import re
import click
import logging
import click_log
import toml
import os

uuid_pattern = re.compile('UUID="[^"]+"')

log = logging.getLogger(__name__)
click_log.basic_config(log)


@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
    """USB disk automatic mounter and umounter"""
    if not ctx.invoked_subcommand:
        auto()


def exec_actions(actions):
    for action in actions:
        cmd = sh.Command(action['cmd'])
        cmd(action['params'])
Beispiel #35
0
def _echo_item(x):
    if not x: return
    click.echo(x)




import os
import logging
import click
import click_log
from . import config

_logger = logging.getLogger(__name__)
click_log.basic_config(_logger)

@click.group()
def cli():
    pass




from . import blackboard

@cli.group(name='blackboard')
def cli_blackboard():
    pass

@cli_blackboard.command(name='download', help='Download')
Beispiel #36
0
from glob import glob
from itertools import chain
from multiprocessing import cpu_count

import click
import click_log
from pandas import read_csv
from six import iteritems, itervalues
from tqdm import tqdm

from gp_align.analysis import analyze_run, PLATES
from gp_align.conversion import g2od


LOGGER = logging.getLogger(__name__.split(".", 1)[0])
click_log.basic_config(LOGGER)

try:
    NUM_CPU = min(4, cpu_count())
except NotImplementedError:
    LOGGER.warning("Could not detect the number of cores - assuming only one.")
    NUM_CPU = 1


@click.group()
@click.help_option("--help", "-h")
@click_log.simple_verbosity_option(
    LOGGER, default="INFO", show_default=True,
    type=click.Choice(["CRITICAL", "ERROR", "WARN", "INFO", "DEBUG"]))
def cli():
    """
Beispiel #37
0
def setup_logging():
    click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG)
Beispiel #38
0
def no_debug_output(request):
    logger = click_log.basic_config('vdirsyncer')
    logger.setLevel(logging.WARNING)