Exemplo n.º 1
0
def test_optsudo(mocker):
    m = mocker.patch(TESTED + '.is_docker_user')
    m.side_effect = [
        True,
        False,
    ]
    assert utils.optsudo() == ''
    assert utils.optsudo() == 'sudo '
Exemplo n.º 2
0
def add_node_red(force):
    """
    Create a service for Node-RED.
    """
    utils.check_config()
    utils.confirm_mode()

    name = 'node-red'
    sudo = utils.optsudo()
    host = utils.host_ip()
    port = utils.getenv(const.HTTPS_PORT_KEY)
    config = utils.read_compose()

    if not force:
        check_duplicate(config, name)

    config['services'][name] = {
        'image': 'brewblox/node-red:${BREWBLOX_RELEASE}',
        'restart': 'unless-stopped',
        'volumes': [{
            'type': 'bind',
            'source': f'./{name}',
            'target': '/data',
        }]
    }

    sh(f'mkdir -p ./{name}')
    if [getgid(), getuid()] != [1000, 1000]:
        sh(f'sudo chown -R 1000:1000 ./{name}')

    utils.write_compose(config)
    click.echo(f'Added Node-RED service `{name}`.')
    if utils.confirm('Do you want to run `brewblox-ctl up` now?'):
        sh(f'{sudo}docker-compose up -d')
        click.echo(f'Visit https://{host}:{port}/{name} in your browser to load the editor.')
Exemplo n.º 3
0
def add_plaato(name, token, force):
    """
    Create a service for the Plaato airlock.

    This will periodically query the Plaato server for current state.
    An authentication token is required.

    See https://plaato.io/apps/help-center#!hc-auth-token on how to get one.
    """
    utils.check_config()
    utils.confirm_mode()

    sudo = utils.optsudo()
    config = utils.read_compose()

    if not force:
        check_duplicate(config, name)

    config['services'][name] = {
        'image': 'brewblox/brewblox-plaato:${BREWBLOX_RELEASE}',
        'restart': 'unless-stopped',
        'environment': {
            'PLAATO_AUTH': token,
        },
        'command': f'--name={name}',
    }

    utils.write_compose(config)
    click.echo(f'Added Plaato service `{name}`.')
    click.echo('This service publishes history data, but does not have a UI component.')
    if utils.confirm('Do you want to run `brewblox-ctl up` now?'):
        sh(f'{sudo}docker-compose up -d')
Exemplo n.º 4
0
def kill(zombies):
    """Stop and remove all containers on this host.

    This includes those not from Brewblox.

    If the --zombies flag is set,
    leftover processes that are still claiming a port will be forcibly removed.
    Use this if you get "port is already allocated" errors after your system crashed.
    """
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh(f'{sudo}docker rm --force $({sudo}docker ps -aq)', check=False)

    if zombies:
        # We can't use psutil for this, as we need root rights to get pids
        if not utils.command_exists('netstat'):
            utils.warn(
                'Command `netstat` not found. Please install it by running:')
            utils.warn('')
            utils.warn(
                '    sudo apt-get update && sudo apt-get install net-tools')
            utils.warn('')
            return

        procs = re.findall(r'(\d+)/docker-proxy',
                           sh('sudo netstat -pna', capture=True))

        if procs:
            utils.info(f'Removing {len(procs)} zombies...')
            sh('sudo service docker stop')
            sh([f'sudo kill -9 {proc}' for proc in procs])
            sh('sudo service docker start')
Exemplo n.º 5
0
def down():
    """Stop all services.

    This wraps `docker-compose down --remove-orphans`
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh('{}docker-compose down --remove-orphans'.format(sudo))
Exemplo n.º 6
0
def makecert(dir, release: str = None):
    absdir = Path(dir).resolve()
    sudo = utils.optsudo()
    tag = utils.docker_tag(release)
    sh(f'mkdir -p "{absdir}"')
    sh(f'{sudo}docker run' + ' --rm --privileged' + ' --pull always' +
       f' -v "{absdir}":/certs/' + f' brewblox/omgwtfssl:{tag}')
    sh(f'sudo chmod 644 "{absdir}/brewblox.crt"')
    sh(f'sudo chmod 600 "{absdir}/brewblox.key"')
Exemplo n.º 7
0
def kill():
    """Stop and remove all containers on this computer.

    This includes those not from Brewblox.
    """
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh('{}docker rm --force $({}docker ps -aq)'.format(sudo, sudo),
       check=False)
Exemplo n.º 8
0
def down(compose_args):
    """Stop all services.

    This wraps `docker-compose down`
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh(f'{sudo}docker-compose down ' + ' '.join(list(compose_args)))
Exemplo n.º 9
0
def up(compose_args):
    """Start all services.

    This wraps `docker-compose up -d`
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh(f'{sudo}docker-compose up -d ' + ' '.join(list(compose_args)))
Exemplo n.º 10
0
def prepare_flasher(release, pull):
    tag = utils.docker_tag(release)
    sudo = utils.optsudo()

    if pull:
        utils.info('Pulling flasher image...')
        sh('{}docker pull brewblox/firmware-flasher:{}'.format(sudo, tag))

    if utils.path_exists('./docker-compose.yml'):
        utils.info('Stopping services...')
        sh('{}docker-compose down'.format(sudo))
Exemplo n.º 11
0
def restart():
    """Stop and start all services.

    This wraps `docker-compose down --remove-orphans; docker-compose up -d`

    Note: `docker-compose restart` also exists -
    it restarts containers without recreating them.
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh('{}docker-compose down --remove-orphans'.format(sudo))
    sh('{}docker-compose up -d'.format(sudo))
Exemplo n.º 12
0
def restart(compose_args):
    """Recreates all services.

    This wraps `docker-compose up -d --force-recreate`

    Note: `docker-compose restart` also exists -
    it restarts containers without recreating them.
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh(f'{sudo}docker-compose up -d --force-recreate ' +
       ' '.join(list(compose_args)))
Exemplo n.º 13
0
def run_particle_flasher(release: str, pull: bool, cmd: str):
    tag = utils.docker_tag(release)
    sudo = utils.optsudo()

    opts = ' '.join([
        '-it',
        '--rm',
        '--privileged',
        '-v /dev:/dev',
        '--pull ' + ('always' if pull else 'missing'),
    ])

    sh(f'{sudo}docker-compose --log-level CRITICAL down', check=False)
    sh(f'{sudo}docker run {opts} brewblox/firmware-flasher:{tag} {cmd}')
Exemplo n.º 14
0
def run_esp_flasher(release: str, pull: bool):
    tag = utils.docker_tag(release)
    sudo = utils.optsudo()

    opts = ' '.join([
        '-it',
        '--rm',
        '--privileged',
        '-v /dev:/dev',
        '-w /app/firmware',
        '--entrypoint bash',
        '--pull ' + ('always' if pull else 'missing'),
    ])

    sh(f'{sudo}docker-compose --log-level CRITICAL down', check=False)
    sh(f'{sudo}docker run {opts} brewblox/brewblox-devcon-spark:{tag} flash')
Exemplo n.º 15
0
def _influx_line_count(service: str, args: str) -> Optional[int]:
    sudo = utils.optsudo()
    measurement = f'"brewblox"."downsample_1m"."{service}"'
    points_field = '"m_ Combined Influx points"'
    json_result = sh(
        f'{sudo}docker exec influxdb-migrate influx '
        '-database brewblox '
        f"-execute 'SELECT count({points_field}) FROM {measurement} {args}' "
        '-format json',
        capture=True)

    result = json.loads(json_result)

    try:
        return result['results'][0]['series'][0]['values'][0][1]
    except (IndexError, KeyError):
        return None
Exemplo n.º 16
0
def follow(services):
    """Show logs for one or more services.

    This will start watching the logs for specified services.
    Call without arguments to show logs for all running services.

    Once started, press ctrl+C to stop.

    Service name will be equal to those specified in docker-compose.log,
    not the container name.

    To follow logs for service 'spark-one':

    \b
        GOOD: `brewblox-ctl follow spark-one`
         BAD: `brewblox-ctl follow brewblox_spark-one_1`
    """
    utils.check_config()
    sudo = utils.optsudo()
    sh('{}docker-compose logs --follow {}'.format(sudo, ' '.join(services)))
Exemplo n.º 17
0
def add_tilt(force):
    """
    Create a service for the Tilt hydrometer.

    The service listens for Bluetooth status updates from the Tilt,
    and requires the host to have a Bluetooth receiver.

    The empty ./tilt dir is created to hold calibration files.
    """
    utils.check_config()
    utils.confirm_mode()

    name = 'tilt'
    sudo = utils.optsudo()
    config = utils.read_compose()

    if not force:
        check_duplicate(config, name)

    config['services'][name] = {
        'image': 'brewblox/brewblox-tilt:${BREWBLOX_RELEASE}',
        'restart': 'unless-stopped',
        'privileged': True,
        'network_mode': 'host',
        'volumes': [{
            'type': 'bind',
            'source': f'./{name}',
            'target': '/share',
        }],
        'labels': [
            'traefik.enable=false',
        ],
    }

    sh(f'mkdir -p ./{name}')

    utils.write_compose(config)
    click.echo(f'Added Tilt service `{name}`.')
    click.echo('It will automatically show up in the UI.\n')
    if utils.confirm('Do you want to run `brewblox-ctl up` now?'):
        sh(f'{sudo}docker-compose up -d')
Exemplo n.º 18
0
def _influx_measurements() -> List[str]:
    """
    Fetch all known measurements from Influx
    This requires an InfluxDB docker container with name 'influxdb-migrate'
    to have been started.
    """
    sudo = utils.optsudo()

    raw_measurements = list(
        utils.sh_stream(f'{sudo}docker exec influxdb-migrate influx '
                        '-database brewblox '
                        "-execute 'SHOW MEASUREMENTS' "
                        '-format csv'))

    measurements = [
        s.strip().split(',')[1]
        for s in raw_measurements[1:]  # ignore line with headers
        if s.strip()
    ]

    return measurements
Exemplo n.º 19
0
def pull(ctx, services):
    """Pull one or more services without doing a full update."""
    sudo = utils.optsudo()
    sh(f'{sudo}docker-compose pull ' + ' '.join(services))
    restart_services(ctx)
Exemplo n.º 20
0
def run_flasher(release, args):
    tag = utils.docker_tag(release)
    sudo = utils.optsudo()
    opts = '-it --rm --privileged -v /dev:/dev'
    sh('{}docker run {} brewblox/firmware-flasher:{} {}'.format(sudo, opts, tag, args))
Exemplo n.º 21
0
def update(update_ctl, update_ctl_done, pull, update_system, migrate, prune,
           from_version):
    """Download and apply updates.

    This is the one-stop-shop for updating your Brewblox install.
    You can use any of the options to fine-tune the update by enabling or disabling subroutines.

    By default, all options are enabled.

    --update-ctl/--no-update-ctl: Whether to download and install new versions of
    of brewblox-ctl. If this flag is set, update will download the new version
    and then restart itself. This way, the migrate is done with the latest version of brewblox-ctl.

    If you're using dry run mode, you'll notice the hidden option --update-ctl-done.
    You can use it to watch the rest of the update: it\'s a flag to avoid endless loops.

    --pull/--no-pull. Whether to pull docker images.
    This is useful if any of your services is using a local image (not from Docker Hub).

    --update-system/--no-update-system determines whether

    --migrate/--no-migrate. Updates regularly require changes to configuration.
    Required changes are applied here.

    --prune/--no-prune. Updates to docker images can leave unused old versions
    on your system. These can be pruned to free up disk space.
    This includes all images and volumes on your system, and not just those created by Brewblox.

    \b
    Steps:
        - Check whether any system fixes must be applied.
        - Update brewblox-ctl.
        - Stop services.
        - Update Avahi config.
        - Update system packages.
        - Migrate configuration files.
        - Pull Docker images.
        - Prune unused Docker images and volumes.
        - Start services.
        - Migrate service configuration.
        - Write version number to .env file.
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()

    prev_version = StrictVersion(from_version)
    check_version(prev_version)

    if update_ctl and not update_ctl_done:
        utils.info('Updating brewblox-ctl...')
        utils.pip_install('pip')
        actions.install_ctl_package()
        # Restart update - we just replaced the source code
        sh(' '.join(
            ['python3 -m brewblox_ctl', *const.ARGS[1:], '--update-ctl-done']))
        return

    if update_ctl:
        actions.uninstall_old_ctl_package()
        actions.deploy_ctl_wrapper()

    utils.info('Stopping services...')
    sh(f'{sudo}docker-compose down')

    if update_system:
        actions.update_system_packages()

    if migrate:
        downed_migrate(prev_version)

    if pull:
        utils.info('Pulling docker images...')
        sh(f'{sudo}docker-compose pull')

    if prune:
        utils.info('Pruning unused images...')
        sh(f'{sudo}docker image prune -f > /dev/null')
        utils.info('Pruning unused volumes...')
        sh(f'{sudo}docker volume prune -f > /dev/null')

    utils.info('Starting services...')
    sh(f'{sudo}docker-compose up -d')

    if migrate:
        upped_migrate(prev_version)
        utils.info(
            f'Configuration version: {prev_version} -> {const.CURRENT_VERSION}'
        )
        utils.setenv(const.CFG_VERSION_KEY, const.CURRENT_VERSION)
Exemplo n.º 22
0
def load(archive, load_env, load_compose, load_datastore, load_spark,
         load_node_red, load_mosquitto, load_tilt, update):
    """Load and apply Brewblox settings backup.

    This function uses files generated by `brewblox-ctl backup save` as input.
    You can use the --load-XXXX options to partially load the backup.

    This does not attempt to merge data: it will overwrite current docker-compose.yml,
    datastore entries, and Spark blocks.

    Blocks on Spark services not in the backup file will not be affected.

    If dry-run is enabled, it will echo all configuration from the backup archive.

    Steps:
        - Write .env
        - Read .env values
        - Write docker-compose.yml, run `docker-compose up`.
        - Write all datastore files found in backup.
        - Write all Spark blocks found in backup.
        - Write Node-RED config files found in backup.
        - Write Mosquitto config files found in backup.
        - Run brewblox-ctl update
    """
    utils.check_config()
    utils.confirm_mode()
    urllib3.disable_warnings()

    sudo = utils.optsudo()
    host_url = utils.host_url()
    store_url = utils.datastore_url()

    yaml = YAML()
    zipf = zipfile.ZipFile(archive, 'r', zipfile.ZIP_DEFLATED)
    available = zipf.namelist()
    redis_file = 'global.redis.json'
    couchdb_files = [v for v in available if v.endswith('.datastore.json')]
    spark_files = [v for v in available if v.endswith('.spark.json')]
    node_red_files = [v for v in available if v.startswith('node-red/')]
    mosquitto_files = [v for v in available if v.startswith('mosquitto/')]
    tilt_files = [v for v in available if v.startswith('tilt/')]

    if load_env and '.env' in available:
        utils.info('Loading .env file')
        with NamedTemporaryFile('w') as tmp:
            data = zipf.read('.env').decode()
            utils.info('Writing .env')
            utils.show_data('.env', data)
            tmp.write(data)
            tmp.flush()
            sh(f'cp -f {tmp.name} .env')

        utils.info('Reading .env values')
        load_dotenv(Path('.env').resolve())

    if load_compose:
        if 'docker-compose.yml' in available:
            utils.info('Loading docker-compose.yml')
            config = yaml.load(zipf.read('docker-compose.yml').decode())
            # Older services may still depend on the `datastore` service
            # The `depends_on` config is useless anyway in a brewblox system
            for svc in config['services'].values():
                with suppress(KeyError):
                    del svc['depends_on']
            utils.write_compose(config)
            sh(f'{sudo}docker-compose up -d')
        else:
            utils.info('docker-compose.yml file not found in backup archive')

    if load_datastore:
        if redis_file in available or couchdb_files:
            utils.info('Waiting for the datastore...')
            sh(f'{const.CLI} http wait {store_url}/ping')
            # Wipe UI/Automation, but leave Spark files
            mdelete_cmd = '{} http post {}/mdelete --quiet -d \'{{"namespace":"{}", "filter":"*"}}\''
            sh(mdelete_cmd.format(const.CLI, store_url, 'brewblox-ui-store'))
            sh(mdelete_cmd.format(const.CLI, store_url, 'brewblox-automation'))
        else:
            utils.info('No datastore files found in backup archive')

        if redis_file in available:
            data = json.loads(zipf.read(redis_file).decode())
            utils.info(
                f'Loading {len(data["values"])} entries from Redis datastore')
            mset(data)

        # Backwards compatibility for UI/automation files from CouchDB
        # The IDs here are formatted as {moduleId}__{objId}
        # The module ID becomes part of the Redis namespace
        for db in ['brewblox-ui-store', 'brewblox-automation']:
            fname = f'{db}.datastore.json'
            if fname not in available:
                continue
            docs = json.loads(zipf.read(fname).decode())
            # Drop invalid names (not prefixed with module ID)
            docs[:] = [d for d in docs if len(d['_id'].split('__', 1)) == 2]
            # Add namespace / ID fields
            for d in docs:
                segments = d['_id'].split('__', 1)
                d['namespace'] = f'{db}:{segments[0]}'
                d['id'] = segments[1]
                del d['_id']
            utils.info(f'Loading {len(docs)} entries from database `{db}`')
            mset({'values': docs})

        # Backwards compatibility for Spark service files
        # There is no module ID field here
        spark_db = 'spark-service'
        spark_fname = f'{spark_db}.datastore.json'
        if spark_fname in available:
            docs = json.loads(zipf.read(spark_fname).decode())
            for d in docs:
                d['namespace'] = spark_db
                d['id'] = d['_id']
                del d['_id']
            utils.info(
                f'Loading {len(docs)} entries from database `{spark_db}`')
            mset({'values': docs})

    if load_spark:
        sudo = utils.optsudo()

        if not spark_files:
            utils.info('No Spark files found in backup archive')

        for f in spark_files:
            spark = f[:-len('.spark.json')]
            utils.info(f'Writing blocks to Spark service `{spark}`')
            with NamedTemporaryFile('w') as tmp:
                data = json.loads(zipf.read(f).decode())
                utils.show_data(spark, data)
                json.dump(data, tmp)
                tmp.flush()
                sh(f'{const.CLI} http post {host_url}/{spark}/blocks/backup/load -f {tmp.name}'
                   )
                sh(f'{sudo}docker-compose restart {spark}')

    if load_node_red and node_red_files:
        sudo = ''
        if [getgid(), getuid()] != [1000, 1000]:
            sudo = 'sudo '

        with TemporaryDirectory() as tmpdir:
            zipf.extractall(tmpdir, members=node_red_files)
            sh('mkdir -p ./node-red')
            sh(f'{sudo}chown 1000:1000 ./node-red/')
            sh(f'{sudo}chown -R 1000:1000 {tmpdir}')
            sh(f'{sudo}cp -rfp {tmpdir}/node-red/* ./node-red/')

    if load_mosquitto and mosquitto_files:
        zipf.extractall(members=mosquitto_files)

    if load_tilt and tilt_files:
        zipf.extractall(members=tilt_files)

    zipf.close()

    if update:
        utils.info('Updating brewblox...')
        sh(f'{const.CLI} update')

    utils.info('Done!')
Exemplo n.º 23
0
def migrate_couchdb():
    urllib3.disable_warnings()
    sudo = utils.optsudo()
    opts = utils.ctx_opts()
    redis_url = utils.datastore_url()
    couch_url = 'http://localhost:5984'

    utils.info('Migrating datastore from CouchDB to Redis...')

    if opts.dry_run:
        utils.info('Dry run. Skipping migration...')
        return

    if not utils.path_exists('./couchdb/'):
        utils.info('couchdb/ dir not found. Skipping migration...')
        return

    utils.info('Starting a temporary CouchDB container on port 5984...')
    sh(f'{sudo}docker rm -f couchdb-migrate', check=False)
    sh(f'{sudo}docker run --rm -d'
       ' --name couchdb-migrate'
       ' -v "$(pwd)/couchdb/:/opt/couchdb/data/"'
       ' -p "5984:5984"'
       ' treehouses/couchdb:2.3.1')
    sh(f'{const.CLI} http wait {couch_url}')
    sh(f'{const.CLI} http wait {redis_url}/ping')

    resp = requests.get(f'{couch_url}/_all_dbs')
    resp.raise_for_status()
    dbs = resp.json()

    for db in ['brewblox-ui-store', 'brewblox-automation']:
        if db in dbs:
            resp = requests.get(f'{couch_url}/{db}/_all_docs',
                                params={'include_docs': True})
            resp.raise_for_status()
            docs = [v['doc'] for v in resp.json()['rows']]
            # Drop invalid names
            docs[:] = [d for d in docs if len(d['_id'].split('__', 1)) == 2]
            for d in docs:
                segments = d['_id'].split('__', 1)
                d['namespace'] = f'{db}:{segments[0]}'
                d['id'] = segments[1]
                del d['_rev']
                del d['_id']
            resp = requests.post(f'{redis_url}/mset',
                                 json={'values': docs},
                                 verify=False)
            resp.raise_for_status()
            utils.info(f'Migrated {len(docs)} entries from {db}')

    if 'spark-service' in dbs:
        resp = requests.get(f'{couch_url}/spark-service/_all_docs',
                            params={'include_docs': True})
        resp.raise_for_status()
        docs = [v['doc'] for v in resp.json()['rows']]
        for d in docs:
            d['namespace'] = 'spark-service'
            d['id'] = d['_id']
            del d['_rev']
            del d['_id']
        resp = requests.post(f'{redis_url}/mset',
                             json={'values': docs},
                             verify=False)
        resp.raise_for_status()
        utils.info(f'Migrated {len(docs)} entries from spark-service')

    sh(f'{sudo}docker stop couchdb-migrate')
    sh('sudo mv couchdb/ couchdb-migrated-' +
       datetime.now().strftime('%Y%m%d'))
Exemplo n.º 24
0
def migrate_influxdb(
    target: str = 'victoria',
    duration: str = '',
    services: List[str] = [],
    offsets: List[Tuple[str, int]] = [],
):
    """Exports InfluxDB history data.

    The exported data is either immediately imported to the new history database,
    or saved to file.
    """
    opts = utils.ctx_opts()
    sudo = utils.optsudo()
    date = datetime.now().strftime('%Y%m%d_%H%M')

    utils.warn('Depending on the amount of data, this may take some hours.')
    utils.warn(
        'You can use your system as normal while the migration is in progress.'
    )
    utils.warn('The migration can safely be stopped and restarted or resumed.')
    utils.warn(
        'For more info, see https://brewblox.netlify.app/dev/migration/influxdb.html'
    )

    if opts.dry_run:
        utils.info('Dry run. Skipping migration...')
        return

    if not utils.path_exists('./influxdb/'):
        utils.info('influxdb/ dir not found. Skipping migration...')
        return

    utils.info('Starting InfluxDB container...')

    # Stop container in case previous migration was cancelled
    sh(f'{sudo}docker stop influxdb-migrate > /dev/null', check=False)

    # Start standalone container
    # We'll communicate using 'docker exec', so no need to publish a port
    sh(f'{sudo}docker run '
       '--rm -d '
       '--name influxdb-migrate '
       '-v "$(pwd)/influxdb:/var/lib/influxdb" '
       'influxdb:1.8 '
       '> /dev/null')

    # Do a health check until startup is done
    inner_cmd = 'curl --output /dev/null --silent --fail http://localhost:8086/health'
    bash_cmd = f'until $({inner_cmd}); do sleep 1 ; done'
    sh(f"{sudo}docker exec influxdb-migrate bash -c '{bash_cmd}'")

    # Determine relevant measurement
    # Export all of them if not specified by user
    if not services:
        services = _influx_measurements()

    utils.info(f'Exporting services: {", ".join(services)}')

    # Export data and import to target
    for svc in services:
        offset = next((v for v in offsets if v[0] == svc), ('default', 0))[1]
        _copy_influx_measurement(svc, date, duration, target, offset)

    # Stop migration container
    sh(f'{sudo}docker stop influxdb-migrate > /dev/null', check=False)
Exemplo n.º 25
0
def _copy_influx_measurement(
    service: str,
    date: str,
    duration: str,
    target: str,
    offset: int = 0,
):
    """
    Export measurement from Influx, and copy/import to `target`.
    This requires an InfluxDB docker container with name 'influxdb-migrate'
    to have been started.
    """
    QUERY_BATCH_SIZE = 5000
    FILE_BATCH_SIZE = 50000
    FILE_DIR = './influxdb-export'
    sudo = utils.optsudo()
    measurement = f'"brewblox"."downsample_1m"."{service}"'
    args = f'where time > now() - {duration}' if duration else ''

    total_lines = _influx_line_count(service, args)
    offset = max(offset, 0)
    offset -= (offset % QUERY_BATCH_SIZE
               )  # Round down to multiple of batch size
    num_lines = offset

    if target == 'file':
        sh(f'mkdir -p {FILE_DIR}')

    if total_lines is None:
        return

    while True:
        generator = utils.sh_stream(
            f'{sudo}docker exec influxdb-migrate influx '
            '-database brewblox '
            f"-execute 'SELECT * FROM {measurement} {args} ORDER BY time LIMIT {QUERY_BATCH_SIZE} OFFSET {offset}' "
            '-format csv')

        headers = next(generator, '').strip()
        time = None

        if not headers:
            return

        fields = [
            f[2:].replace(' ', '\\ ')  # Remove 'm_' prefix and escape spaces
            for f in headers.split(',')[2:]  # Ignore 'name' and 'time' columns
        ]

        with NamedTemporaryFile('w') as tmp:
            for line in generator:
                if not line:
                    continue

                num_lines += 1
                values = line.strip().split(',')
                name = values[0]
                time = values[1]

                # Influx line protocol:
                # MEASUREMENT k1=1,k2=2,k3=3 TIMESTAMP
                tmp.write(f'{name} ')
                tmp.write(','.join(
                    (f'{f}={v}' for f, v in zip(fields, values[2:]) if v)))
                tmp.write(f' {time}\n')

            tmp.flush()

            if target == 'victoria':
                with open(tmp.name, 'rb') as rtmp:
                    url = f'{utils.host_url()}/victoria/write'
                    urllib3.disable_warnings()
                    requests.get(url, data=rtmp, verify=False)

            elif target == 'file':
                idx = str(offset // FILE_BATCH_SIZE + 1).rjust(3, '0')
                fname = f'{FILE_DIR}/{service}__{date}__{duration or "all"}__{idx}.lines'
                sh(f'cat "{tmp.name}" >> "{fname}"')

            else:
                raise ValueError(f'Invalid target: {target}')

        offset = 0
        args = f'where time > {time}'
        utils.info(f'{service}: exported {num_lines}/{total_lines} lines')
Exemplo n.º 26
0
def log(add_compose, add_system, upload):
    """Generate and share log file for bug reports.

    This command generates a comprehensive report on current system state and logs.
    When reporting bugs, a termbin blink to the output is often the first thing asked for.

    For best results, run when the services are still active.
    Service logs are discarded after `brewblox-ctl down`.

    Care is taken to prevent accidental leaks of confidential information.
    Only known variables are read from .env,
    and the `--no-add-compose` flag allows skipping compose configuration.
    The latter is useful if the configuration contains passwords or tokens.

    To review or edit the output, use the `--no-upload` flag.
    The output will include instructions on how to manually upload the file.

    \b
    Steps:
        - Create ./brewblox.log file.
        - Append Brewblox .env variables.
        - Append software version info.
        - Append service logs.
        - Append content of docker-compose.yml (optional).
        - Append content of docker-compose.shared.yml (optional).
        - Append blocks from Spark services.
        - Append system diagnostics.
        - Upload file to termbin.com for shareable link (optional).
    """
    utils.check_config()
    utils.confirm_mode()
    sudo = utils.optsudo()

    # Create log
    utils.info(f"Log file: {Path('./brewblox.log').resolve()}")
    create()
    append('date')

    # Add .env values
    utils.info('Writing Brewblox .env values...')
    header('.env')
    for key in ENV_KEYS:
        append(f'echo "{key}={utils.getenv(key)}"')

    # Add version info
    utils.info('Writing software version info...')
    header('Versions')
    append('uname -a')
    append('python3 --version')
    append(f'{sudo}docker --version')
    append(f'{sudo}docker-compose --version')

    # Add active containers
    utils.info('Writing active containers...')
    header('Containers')
    append(f'{sudo}docker-compose ps -a')

    # Add service logs
    try:
        config_names = list(utils.read_compose()['services'].keys())
        shared_names = list(utils.read_shared_compose()['services'].keys())
        names = [n for n in config_names if n not in shared_names] + shared_names
        for name in names:
            utils.info(f'Writing {name} service logs...')
            header(f'Service: {name}')
            append(f'{sudo}docker-compose logs --timestamps --no-color --tail 200 {name}')
    except Exception as ex:
        append('echo ' + shlex.quote(type(ex).__name__ + ': ' + str(ex)))

    # Add compose config
    if add_compose:
        utils.info('Writing docker-compose configuration...')
        header('docker-compose.yml')
        append('cat docker-compose.yml')
        header('docker-compose.shared.yml')
        append('cat docker-compose.shared.yml')
    else:
        utils.info('Skipping docker-compose configuration...')

    # Add blocks
    host_url = utils.host_url()
    services = utils.list_services('brewblox/brewblox-devcon-spark')
    for svc in services:
        utils.info(f'Writing {svc} blocks...')
        header(f'Blocks: {svc}')
        append(f'{const.CLI} http post --pretty {host_url}/{svc}/blocks/all/read')

    # Add system diagnostics
    if add_system:
        utils.info('Writing system diagnostics...')
        header('docker info')
        append(f'{sudo}docker info')
        header('journalctl -u docker')
        append('sudo journalctl -u docker | tail -100')
        header('journalctl -u avahi-daemon')
        append('sudo journalctl -u avahi-daemon | tail -100')
        header('disk usage')
        append('df -hl')
        header('/var/log/syslog')
        append('sudo tail -n 500 /var/log/syslog')
        header('dmesg')
        append('dmesg -T')
    else:
        utils.info('Skipping system diagnostics...')

    # Upload
    if upload:
        click.echo(utils.file_netcat('termbin.com', 9999, Path('./brewblox.log')).decode())
    else:
        utils.info('Skipping upload. If you want to manually upload the log, run: ' +
                   click.style('brewblox-ctl termbin ./brewblox.log', fg='green'))
Exemplo n.º 27
0
def add_spark(name,
              discover_now,
              device_id,
              discovery_type,
              device_host,
              command,
              force,
              release,
              simulation):
    """
    Create or update a Spark service.

    If you run brewblox-ctl add-spark without any arguments,
    it will prompt you for required info, and then create a sensibly configured service.

    If you want to fine-tune your service configuration, multiple arguments are available.

    For a detailed explanation: https://brewblox.netlify.com/user/connect_settings.html
    """
    # utils.check_config()
    utils.confirm_mode()

    image_name = 'brewblox/brewblox-devcon-spark'
    sudo = utils.optsudo()
    config = utils.read_compose()

    if not force:
        check_duplicate(config, name)

    for (nm, svc) in config['services'].items():
        img = svc.get('image', '')
        cmd = svc.get('command', '')
        if not any([
            nm == name,
            not img.startswith(image_name),
            '--device-id' in cmd,
            '--device-host' in cmd,
            '--simulation' in cmd,
        ]):
            utils.warn(f'The existing Spark service `{nm}` does not have any connection settings.')
            utils.warn('It will connect to any controller it can find.')
            utils.warn('This may cause multiple services to connect to the same controller.')
            utils.warn(f'To reconfigure `{nm}`, please run:')
            utils.warn('')
            utils.warn(f'    brewblox-ctl add-spark -f --name {nm}')
            utils.warn('')
            utils.select('Press ENTER to continue or Ctrl-C to exit')

    if discover_now and not simulation and not device_id:
        if device_host:
            dev = find_device_by_host(device_host)
        else:
            dev = choose_device(discovery_type, config)

        if dev:
            device_id = dev['id']
        else:
            # We have no device ID, and no device host. Avoid a wildcard service
            click.echo('No valid combination of device ID and device host.')
            raise SystemExit(1)

    commands = [
        '--name=' + name,
        '--discovery=' + discovery_type,
    ]

    if device_id:
        commands += ['--device-id=' + device_id]

    if device_host:
        commands += ['--device-host=' + device_host]

    if simulation:
        commands += ['--simulation']

    if command:
        commands += [command]

    config['services'][name] = {
        'image': f'{image_name}:{utils.docker_tag(release)}',
        'privileged': True,
        'restart': 'unless-stopped',
        'command': ' '.join(commands)
    }

    if simulation:
        mount_dir = f'simulator__{name}'
        config['services'][name]['volumes'] = [{
            'type': 'bind',
            'source': f'./{mount_dir}',
            'target': '/app/simulator'
        }]
        sh(f'mkdir -m 777 -p {mount_dir}')

    utils.write_compose(config)
    click.echo(f'Added Spark service `{name}`.')
    click.echo('It will automatically show up in the UI.\n')
    if utils.confirm('Do you want to run `brewblox-ctl up` now?'):
        sh(f'{sudo}docker-compose up -d')
Exemplo n.º 28
0
def install(ctx: click.Context, snapshot_file):
    """Install Brewblox and its dependencies.

    Brewblox can be installed multiple times on the same computer.
    Settings and databases are stored in a Brewblox directory.

    This command also installs system-wide dependencies.
    A reboot is required after installing docker, or adding the user to the 'docker' group.

    By default, `brewblox-ctl install` attempts to download packages using the apt package manager.
    If you are using a system without apt (eg. Synology NAS), this step will be skipped.
    You will need to manually install any missing libraries.

    When using the `--snapshot ARCHIVE` option, no dir is created.
    Instead, the directory in the snapshot is extracted.
    It will be renamed to the desired name of the Brewblox directory.

    \b
    Steps:
        - Ask confirmation for installation steps.
        - Install apt packages.
        - Install docker.
        - Add user to 'docker' group.
        - Fix host IPv6 settings.
        - Disable host-wide mDNS reflection.
        - Set variables in .env file.
        - If snapshot provided:
            - Load configuration from snapshot.
        - Else:
            - Check for port conflicts.
            - Create docker-compose configuration files.
            - Create datastore (Redis) directory.
            - Create history (Victoria) directory.
            - Create gateway (Traefik) directory.
            - Create SSL certificates.
            - Create eventbus (Mosquitto) directory.
            - Set version number in .env file.
        - Pull docker images.
        - Reboot if needed.
    """
    utils.confirm_mode()
    user = utils.getenv('USER')
    opts = InstallOptions()

    opts.check_confirm_opts()
    opts.check_system_opts()
    opts.check_docker_opts()
    opts.check_reboot_opts()

    if not snapshot_file:
        opts.check_init_opts()

    # Install Apt packages
    if opts.apt_install:
        utils.info('Installing apt packages...')
        apt_deps = ' '.join(const.APT_DEPENDENCIES)
        sh([
            'sudo apt-get update',
            'sudo apt-get upgrade -y',
            f'sudo apt-get install -y {apt_deps}',
        ])
    else:
        utils.info('Skipped: apt-get install.')

    # Install docker
    if opts.docker_install:
        utils.info('Installing docker...')
        sh('curl -sL get.docker.com | sh', check=False)
    else:
        utils.info('Skipped: docker install.')

    # Add user to 'docker' group
    if opts.docker_group_add:
        utils.info(f"Adding {user} to 'docker' group...")
        sh('sudo usermod -aG docker $USER')
    else:
        utils.info(f"Skipped: adding {user} to 'docker' group.")

    # Always apply actions
    actions.disable_ssh_accept_env()
    actions.fix_ipv6(None, False)
    actions.edit_avahi_config()
    actions.add_particle_udev_rules()
    actions.uninstall_old_ctl_package()
    actions.deploy_ctl_wrapper()

    # Set variables in .env file
    # Set version number to 0.0.0 until snapshot load / init is done
    utils.info('Setting .env values...')
    utils.setenv(const.CFG_VERSION_KEY, '0.0.0')
    utils.setenv(const.SKIP_CONFIRM_KEY, str(opts.skip_confirm))
    for key, default_val in const.ENV_DEFAULTS.items():
        utils.setenv(key, utils.getenv(key, default_val))

    # Install process splits here
    # Either load all config files from snapshot or run init
    sudo = utils.optsudo()
    if snapshot_file:
        ctx.invoke(snapshot.load, file=snapshot_file)
    else:
        release = utils.getenv('BREWBLOX_RELEASE')

        utils.info('Checking for port conflicts...')
        actions.check_ports()

        utils.info('Copying docker-compose.shared.yml...')
        sh(f'cp -f {const.CONFIG_DIR}/docker-compose.shared.yml ./')

        if opts.init_compose:
            utils.info('Copying docker-compose.yml...')
            sh(f'cp -f {const.CONFIG_DIR}/docker-compose.yml ./')

        # Stop after we're sure we have a compose file
        utils.info('Stopping services...')
        sh(f'{sudo}docker-compose down')

        if opts.init_datastore:
            utils.info('Creating datastore directory...')
            sh('sudo rm -rf ./redis/; mkdir ./redis/')

        if opts.init_history:
            utils.info('Creating history directory...')
            sh('sudo rm -rf ./victoria/; mkdir ./victoria/')

        if opts.init_gateway:
            utils.info('Creating gateway directory...')
            sh('sudo rm -rf ./traefik/; mkdir ./traefik/')

            utils.info('Creating SSL certificate...')
            actions.makecert('./traefik', release)

        if opts.init_eventbus:
            utils.info('Creating mosquitto config directory...')
            sh('sudo rm -rf ./mosquitto/; mkdir ./mosquitto/')

        # Always copy cert config to traefik dir
        sh(f'cp -f {const.CONFIG_DIR}/traefik-cert.yaml ./traefik/')

        # Init done - now set CFG version
        utils.setenv(const.CFG_VERSION_KEY, const.CURRENT_VERSION)

    if opts.docker_pull:
        utils.info('Pulling docker images...')
        sh(f'{sudo}docker-compose pull')

    utils.info('All done!')

    # Reboot
    if opts.reboot_needed:
        if opts.prompt_reboot:
            utils.info('Press ENTER to reboot.')
            input()
        else:
            utils.info('Rebooting in 10 seconds...')
            sleep(10)
        sh('sudo reboot')