def add_plaato(name, token, force): """ Create a service for the Plaato airlock. This will periodically query the Plaato server for current state. An authentication token is required. See https://plaato.io/apps/help-center#!hc-auth-token on how to get one. """ utils.check_config() utils.confirm_mode() sudo = utils.optsudo() config = utils.read_compose() if not force: check_duplicate(config, name) config['services'][name] = { 'image': 'brewblox/brewblox-plaato:${BREWBLOX_RELEASE}', 'restart': 'unless-stopped', 'environment': { 'PLAATO_AUTH': token, }, 'command': f'--name={name}', } utils.write_compose(config) click.echo(f'Added Plaato service `{name}`.') click.echo( 'This service publishes history data, but does not have a UI component.' ) if utils.confirm('Do you want to run `brewblox-ctl up` now?'): sh(f'{sudo}docker-compose up -d')
def add_node_red(force): """ Create a service for Node-RED. """ utils.check_config() utils.confirm_mode() name = 'node-red' sudo = utils.optsudo() host = utils.host_ip() port = utils.getenv(const.HTTPS_PORT_KEY) config = utils.read_compose() if not force: check_duplicate(config, name) config['services'][name] = { 'image': 'brewblox/node-red:${BREWBLOX_RELEASE}', 'restart': 'unless-stopped', 'volumes': [ f'./{name}:/data', ] } sh(f'mkdir -p ./{name}') if [getgid(), getuid()] != [1000, 1000]: sh(f'sudo chown -R 1000:1000 ./{name}') utils.write_compose(config) click.echo(f'Added Node-RED service `{name}`.') if utils.confirm('Do you want to run `brewblox-ctl up` now?'): sh(f'{sudo}docker-compose up -d') click.echo( f'Visit https://{host}:{port}/{name} in your browser to load the editor.' )
def apply_shared(): """Apply docker-compose.shared.yml from data directory""" sh('cp -f {}/docker-compose.shared.yml ./'.format(const.CONFIG_DIR)) shared_cfg = utils.read_shared_compose() usr_cfg = utils.read_compose() usr_cfg['version'] = shared_cfg['version'] utils.write_compose(usr_cfg)
def apply_config(): """Apply system-defined configuration from config dir""" sh('cp -f {}/traefik-cert.yaml ./traefik/'.format(const.CONFIG_DIR)) sh('cp -f {}/docker-compose.shared.yml ./'.format(const.CONFIG_DIR)) shared_cfg = utils.read_shared_compose() usr_cfg = utils.read_compose() usr_cfg['version'] = shared_cfg['version'] utils.write_compose(usr_cfg)
def downed_migrate(prev_version): """Migration commands to be executed without any running services""" if prev_version < StrictVersion('0.2.0'): # Breaking changes: Influx downsampling model overhaul # Old data is completely incompatible utils.select( 'Upgrading to version >=0.2.0 requires a complete reset of your history data. ' + "We'll be deleting it now") sh('sudo rm -rf ./influxdb') if prev_version < StrictVersion('0.3.0'): # Splitting compose configuration between docker-compose and docker-compose.shared.yml # Version pinning (0.2.2) will happen automatically utils.info('Moving system services to docker-compose.shared.yml...') config = utils.read_compose() sys_names = [ 'mdns', 'eventbus', 'influx', 'datastore', 'history', 'ui', 'traefik', ] usr_config = { 'version': config['version'], 'services': { key: svc for (key, svc) in config['services'].items() if key not in sys_names } } utils.write_compose(usr_config) if prev_version < StrictVersion('0.6.0'): # The datastore service is gone # Older services may still rely on it utils.info('Removing `depends_on` fields from docker-compose.yml...') config = utils.read_compose() for svc in config['services'].values(): with suppress(KeyError): del svc['depends_on'] utils.write_compose(config) # Init dir. It will be filled during upped_migrate utils.info('Creating redis/ dir...') sh('mkdir -p redis/') utils.info('Checking .env variables...') for (key, default_value) in const.ENV_DEFAULTS.items(): current_value = utils.getenv(key) if current_value is None: utils.setenv(key, default_value)
def migrate_compose_datastore(): # The couchdb datastore service is gone # Older services may still rely on it utils.info('Removing `depends_on` fields from docker-compose.yml...') config = utils.read_compose() for svc in config['services'].values(): with suppress(KeyError): del svc['depends_on'] utils.write_compose(config) # Init dir. It will be filled during upped_migrate utils.info('Creating redis/ dir...') sh('mkdir -p redis/')
def remove(ctx, name): """Remove a service.""" utils.check_config() utils.confirm_mode() config = utils.read_compose() try: del config['services'][name] utils.info("Removing service '{}'".format(name)) utils.write_compose(config) restart_services(ctx) except KeyError: click.echo("Service '{}' not found".format(name))
def remove(ctx, services): """Remove a service.""" utils.check_config() utils.confirm_mode() config = utils.read_compose() for name in services: try: del config['services'][name] utils.info(f"Removed service '{name}'") except KeyError: utils.warn(f"Service '{name}' not found") if services: utils.write_compose(config) restart_services(ctx, compose_args=['--remove-orphans'])
def expose(ctx, delete, service, value): """Add exposed port to docker-compose.yml for backend service""" config = utils.read_compose() ports = nested_setdefault(config, [('services', {}), (service, {}), ('ports', [])]) if (value in ports) ^ delete: return # already in desired state if delete: ports.remove(value) else: ports.append(value) config['services'] = clean_empty(config['services']) utils.write_compose(config) restart_services(ctx)
def downed_migrate(prev_version): """Migration commands to be executed without any running services""" if prev_version < StrictVersion('0.2.0'): # Breaking changes: Influx downsampling model overhaul # Old data is completely incompatible utils.select( 'Upgrading to version >=0.2.0 requires a complete reset of your history data. ' + "We'll be deleting it now") sh('sudo rm -rf ./influxdb') if prev_version < StrictVersion('0.3.0'): # Splitting compose configuration between docker-compose and docker-compose.shared.yml # Version pinning (0.2.2) will happen automatically utils.info('Moving system services to docker-compose.shared.yml') config = utils.read_compose() sys_names = [ 'mdns', 'eventbus', 'influx', 'datastore', 'history', 'ui', 'traefik', ] usr_config = { 'version': config['version'], 'services': { key: svc for (key, svc) in config['services'].items() if key not in sys_names } } utils.write_compose(usr_config) utils.info('Writing env values for all variables') for key in [ const.COMPOSE_FILES_KEY, const.RELEASE_KEY, const.HTTP_PORT_KEY, const.HTTPS_PORT_KEY, const.MDNS_PORT_KEY, ]: utils.setenv(key, utils.getenv(key, const.ENV_DEFAULTS[key]))
def add_tilt(force): """ Create a service for the Tilt hydrometer. The service listens for Bluetooth status updates from the Tilt, and requires the host to have a Bluetooth receiver. The empty ./tilt dir is created to hold calibration files. """ utils.check_config() utils.confirm_mode() name = 'tilt' sudo = utils.optsudo() config = utils.read_compose() if not force: check_duplicate(config, name) config['services'][name] = { 'image': 'brewblox/brewblox-tilt:${BREWBLOX_RELEASE}', 'restart': 'unless-stopped', 'privileged': True, 'network_mode': 'host', 'volumes': [ f'./{name}:/share', ], 'labels': [ 'traefik.enable=false', ], } sh(f'mkdir -p ./{name}') utils.write_compose(config) click.echo(f'Added Tilt service `{name}`.') click.echo('It will automatically show up in the UI.\n') if utils.confirm('Do you want to run `brewblox-ctl up` now?'): sh(f'{sudo}docker-compose up -d')
def migrate_compose_split(): # Splitting compose configuration between docker-compose and docker-compose.shared.yml # Version pinning (0.2.2) will happen automatically utils.info('Moving system services to docker-compose.shared.yml...') config = utils.read_compose() sys_names = [ 'mdns', 'eventbus', 'influx', 'datastore', 'history', 'ui', 'traefik', ] usr_config = { 'version': config['version'], 'services': { key: svc for (key, svc) in config['services'].items() if key not in sys_names } } utils.write_compose(usr_config)
def add_plaato(name, token, force): """ Create a service for the Plaato airlock. This will periodically query the Plaato server for current state. An authentication token is required. See https://plaato.io/apps/help-center#!hc-auth-token on how to get one. """ utils.check_config() utils.confirm_mode() sudo = utils.optsudo() config = utils.read_compose() if name in config['services'] and not force: click.echo( 'Service "{}" already exists. Use the --force flag if you want to overwrite it' .format(name)) raise SystemExit(1) config['services'][name] = { 'image': 'brewblox/brewblox-plaato:${BREWBLOX_RELEASE}', 'restart': 'unless-stopped', 'environment': { 'PLAATO_AUTH': token, }, 'command': '--name=' + name, } utils.write_compose(config) click.echo("Added Plaato service '{}'.".format(name)) click.echo( 'This service publishes history data, but does not have a UI component.' ) if utils.confirm("Do you want to run 'brewblox-ctl up' now?"): sh('{}docker-compose up -d --remove-orphans'.format(sudo))
def add_node_red(): """ Create a service for Node-RED. """ utils.check_config() utils.confirm_mode() name = 'node-red' sudo = utils.optsudo() host = utils.host_ip() port = utils.getenv(const.HTTPS_PORT_KEY) config = utils.read_compose() if name in config['services']: click.echo('The {} service already exists'.format(name)) raise SystemExit(1) config['services'][name] = { 'image': 'brewblox/node-red:${BREWBLOX_RELEASE}', 'restart': 'unless-stopped', 'volumes': [ './{}:/data'.format(name), ] } sh('mkdir -p ./{}'.format(name)) if [getgid(), getuid()] != [1000, 1000]: sh('sudo chown 1000:1000 ./{}'.format(name)) utils.write_compose(config) click.echo("Added Node-RED service '{}'.".format(name)) if utils.confirm("Do you want to run 'brewblox-ctl up' now?"): sh('{}docker-compose up -d --remove-orphans'.format(sudo)) click.echo( 'Visit https://{}:{}/{} in your browser to load the editor.'. format(host, port, name))
def add_spark(name, discover_now, device_id, discovery, device_host, command, force, release, simulation, discovery_release): """ Create or update a Spark service. If you run brewblox-ctl add-spark without any arguments, it will prompt you for required info, and then create a sensibly configured service. If you want to fine-tune your service configuration, multiple arguments are available. For a detailed explanation: https://brewblox.netlify.com/user/connect_settings.html """ utils.check_config() utils.confirm_mode() sudo = utils.optsudo() config = utils.read_compose() if name in config['services'] and not force: click.echo( 'Service "{}" already exists. Use the --force flag if you want to overwrite it' .format(name)) raise SystemExit(1) if device_id is None and discover_now and not simulation: dev = find_device(discovery, discovery_release, device_host) if dev: device_id = dev.split(' ')[1] elif device_host is None: # We have no device ID, and no device host. Avoid a wildcard service click.echo('No valid combination of device ID and device host.') raise SystemExit(1) commands = [ '--name=' + name, '--mdns-port=${BREWBLOX_PORT_MDNS}', '--discovery=' + discovery, ] if device_id: commands += ['--device-id=' + device_id] if device_host: commands += ['--device-host=' + device_host] if simulation: commands += ['--simulation'] if command: commands += [command] config['services'][name] = { 'image': 'brewblox/brewblox-devcon-spark:{}'.format(utils.docker_tag(release)), 'privileged': True, 'restart': 'unless-stopped', 'labels': [ 'traefik.port=5000', 'traefik.frontend.rule=PathPrefix: /{}'.format(name), ], 'command': ' '.join(commands) } if simulation: volume_dir = 'simulator__{}'.format(name) config['services'][name]['volumes'] = [ './{}:/app/simulator'.format(volume_dir) ] sh('mkdir -m 777 -p {}'.format(volume_dir)) utils.write_compose(config) click.echo("Added Spark service '{}'.".format(name)) click.echo('It will automatically show up in the UI.\n') if utils.confirm("Do you want to run 'brewblox-ctl up' now?"): sh('{}docker-compose up -d --remove-orphans'.format(sudo))
def add_spark(name, discover_now, device_id, discovery_type, device_host, command, force, release, simulation): """ Create or update a Spark service. If you run brewblox-ctl add-spark without any arguments, it will prompt you for required info, and then create a sensibly configured service. If you want to fine-tune your service configuration, multiple arguments are available. For a detailed explanation: https://brewblox.netlify.com/user/connect_settings.html """ utils.check_config() utils.confirm_mode() image_name = 'brewblox/brewblox-devcon-spark' sudo = utils.optsudo() config = utils.read_compose() if name in config['services'] and not force: click.echo( 'Service "{}" already exists. Use the --force flag if you want to overwrite it' .format(name)) raise SystemExit(1) for (nm, svc) in config['services'].items(): img = svc.get('image', '') cmd = svc.get('command', '') if not any([ nm == name, not img.startswith(image_name), '--device-id' in cmd, '--device-host' in cmd, '--simulation' in cmd, ]): utils.warn( "The existing Spark service '{}' does not have any connection settings." .format(nm)) utils.warn('It will connect to any controller it can find.') utils.warn( 'This may cause multiple services to connect to the same controller.' ) utils.warn("To reconfigure '{}', please run:".format(nm)) utils.warn('') utils.warn(' brewblox-ctl add-spark -f --name {}'.format(nm)) utils.warn('') utils.select('Press ENTER to continue or Ctrl-C to exit') if device_id is None and discover_now and not simulation: dev = find_device(discovery_type, device_host) if dev: device_id = dev['id'] elif device_host is None: # We have no device ID, and no device host. Avoid a wildcard service click.echo('No valid combination of device ID and device host.') raise SystemExit(1) commands = [ '--name=' + name, '--discovery=' + discovery_type, ] if device_id: commands += ['--device-id=' + device_id] if device_host: commands += ['--device-host=' + device_host] if simulation: commands += ['--simulation'] if command: commands += [command] config['services'][name] = { 'image': '{}:{}'.format(image_name, utils.docker_tag(release)), 'privileged': True, 'restart': 'unless-stopped', 'command': ' '.join(commands) } if simulation: volume_dir = 'simulator__{}'.format(name) config['services'][name]['volumes'] = [ './{}:/app/simulator'.format(volume_dir) ] sh('mkdir -m 777 -p {}'.format(volume_dir)) utils.write_compose(config) click.echo("Added Spark service '{}'.".format(name)) click.echo('It will automatically show up in the UI.\n') if utils.confirm("Do you want to run 'brewblox-ctl up' now?"): sh('{}docker-compose up -d --remove-orphans'.format(sudo))
def load(archive, load_env, load_compose, load_datastore, load_spark, update): """Load and apply Brewblox settings backup. This function uses files generated by `brewblox-ctl backup save` as input. You can use the --load-XXXX options to partially load the backup. This does not attempt to merge data: it will overwrite current docker-compose.yml, datastore databases, and Spark blocks. Blocks on Spark services not in the backup file will not be affected. If dry-run is enabled, it will echo all configuration from the backup archive. Steps: - Write .env - Read .env values - Write docker-compose.yml, run `docker-compose up`. - Write all datastore files found in backup. - Write all Spark blocks found in backup. - Run brewblox-ctl update """ utils.check_config() utils.confirm_mode() urllib3.disable_warnings() sudo = utils.optsudo() host_url = utils.host_url() store_url = utils.datastore_url() zipf = zipfile.ZipFile(archive, 'r', zipfile.ZIP_DEFLATED) available = zipf.namelist() datastore_files = [v for v in available if v.endswith('.datastore.json')] spark_files = [v for v in available if v.endswith('.spark.json')] if load_env and '.env' in available: with NamedTemporaryFile('w') as tmp: data = zipf.read('.env').decode() utils.info('Writing .env') utils.show_data(data) tmp.write(data) tmp.flush() sh('cp -f {} .env'.format(tmp.name)) utils.info('Reading .env values') load_dotenv(path.abspath('.env')) if load_compose: if 'docker-compose.yml' in available: utils.info('Writing docker-compose.yml') utils.write_compose(yaml.safe_load( zipf.read('docker-compose.yml'))) sh('{} docker-compose up -d --remove-orphans'.format(sudo)) else: utils.info('docker-compose.yml file not found in backup archive') if load_datastore: if datastore_files: utils.info('Waiting for the datastore...') sh('{} http wait {}'.format(const.CLI, store_url)) else: utils.info('No datastore files found in backup archive') for f in datastore_files: db = f[:-len('.datastore.json')] utils.info('Recreating database {}'.format(db)) sh('{} http delete {}/{} --allow-fail'.format( const.CLI, store_url, db)) sh('{} http put {}/{}'.format(const.CLI, store_url, db)) utils.info('Writing database {}'.format(db)) with NamedTemporaryFile('w') as tmp: data = {'docs': json.loads(zipf.read(f).decode())} utils.show_data(data) json.dump(data, tmp) tmp.flush() sh('{} http post {}/{}/_bulk_docs -f {}'.format( const.CLI, store_url, db, tmp.name)) if load_spark: sudo = utils.optsudo() if not spark_files: utils.info('No Spark files found in backup archive') for f in spark_files: spark = f[:-len('.spark.json')] utils.info('Writing blocks to Spark service {}'.format(spark)) with NamedTemporaryFile('w') as tmp: data = json.loads(zipf.read(f).decode()) utils.show_data(data) json.dump(data, tmp) tmp.flush() sh('{} http post {}/{}/import_objects -f {}'.format( const.CLI, host_url, spark, tmp.name)) sh('{} docker-compose restart {}'.format(sudo, spark)) zipf.close() if update: utils.info('Updating brewblox...') sh('{} update'.format(const.CLI)) utils.info('Done!')
def load(archive, load_env, load_compose, load_datastore, load_spark, load_node_red, update): """Load and apply Brewblox settings backup. This function uses files generated by `brewblox-ctl backup save` as input. You can use the --load-XXXX options to partially load the backup. This does not attempt to merge data: it will overwrite current docker-compose.yml, datastore entries, and Spark blocks. Blocks on Spark services not in the backup file will not be affected. If dry-run is enabled, it will echo all configuration from the backup archive. Steps: - Write .env - Read .env values - Write docker-compose.yml, run `docker-compose up`. - Write all datastore files found in backup. - Write all Spark blocks found in backup. - Write Node-RED config files found in backup. - Run brewblox-ctl update """ utils.check_config() utils.confirm_mode() urllib3.disable_warnings() sudo = utils.optsudo() host_url = utils.host_url() store_url = utils.datastore_url() zipf = zipfile.ZipFile(archive, 'r', zipfile.ZIP_DEFLATED) available = zipf.namelist() redis_file = 'global.redis.json' couchdb_files = [v for v in available if v.endswith('.datastore.json')] spark_files = [v for v in available if v.endswith('.spark.json')] node_red_files = [v for v in available if v.startswith('node-red/')] if load_env and '.env' in available: utils.info('Loading .env file') with NamedTemporaryFile('w') as tmp: data = zipf.read('.env').decode() utils.info('Writing .env') utils.show_data(data) tmp.write(data) tmp.flush() sh('cp -f {} .env'.format(tmp.name)) utils.info('Reading .env values') load_dotenv(path.abspath('.env')) if load_compose: if 'docker-compose.yml' in available: utils.info('Loading docker-compose.yml') config = yaml.safe_load(zipf.read('docker-compose.yml')) # Older services may still depend on the `datastore` service # The `depends_on` config is useless anyway in a brewblox system for svc in config['services'].values(): with suppress(KeyError): del svc['depends_on'] utils.write_compose(config) sh('{} docker-compose up -d --remove-orphans'.format(sudo)) else: utils.info('docker-compose.yml file not found in backup archive') if load_datastore: if redis_file in available or couchdb_files: utils.info('Waiting for the datastore...') sh('{} http wait {}/ping'.format(const.CLI, store_url)) # Wipe UI/Automation, but leave Spark files mdelete_cmd = '{} http post {}/mdelete --quiet -d \'{{"namespace":"{}", "filter":"*"}}\'' sh(mdelete_cmd.format(const.CLI, store_url, 'brewblox-ui-store')) sh(mdelete_cmd.format(const.CLI, store_url, 'brewblox-automation')) else: utils.info('No datastore files found in backup archive') if redis_file in available: data = json.loads(zipf.read(redis_file).decode()) utils.info('Loading {} entries from Redis datastore'.format(len(data['values']))) mset(data) # Backwards compatibility for UI/automation files from CouchDB # The IDs here are formatted as {moduleId}__{objId} # The module ID becomes part of the Redis namespace for db in ['brewblox-ui-store', 'brewblox-automation']: fname = '{}.datastore.json'.format(db) if fname not in available: continue docs = json.loads(zipf.read(fname).decode()) # Drop invalid names (not prefixed with module ID) docs[:] = [d for d in docs if len(d['_id'].split('__', 1)) == 2] # Add namespace / ID fields for d in docs: segments = d['_id'].split('__', 1) d['namespace'] = '{}:{}'.format(db, segments[0]) d['id'] = segments[1] del d['_id'] utils.info('Loading {} entries from database `{}`'.format(len(docs), db)) mset({'values': docs}) # Backwards compatibility for Spark service files # There is no module ID field here spark_db = 'spark-service' spark_fname = '{}.datastore.json'.format(spark_db) if spark_fname in available: docs = json.loads(zipf.read(spark_fname).decode()) for d in docs: d['namespace'] = spark_db d['id'] = d['_id'] del d['_id'] utils.info('Loading {} entries from database `{}`'.format(len(docs), spark_db)) mset({'values': docs}) if load_spark: sudo = utils.optsudo() if not spark_files: utils.info('No Spark files found in backup archive') for f in spark_files: spark = f[:-len('.spark.json')] utils.info('Writing blocks to Spark service {}'.format(spark)) with NamedTemporaryFile('w') as tmp: data = json.loads(zipf.read(f).decode()) utils.show_data(data) json.dump(data, tmp) tmp.flush() sh('{} http post {}/{}/blocks/backup/load -f {}'.format(const.CLI, host_url, spark, tmp.name)) sh('{} docker-compose restart {}'.format(sudo, spark)) if load_node_red and node_red_files: sudo = '' if [getgid(), getuid()] != [1000, 1000]: sudo = 'sudo ' with TemporaryDirectory() as tmpdir: zipf.extractall(tmpdir, members=node_red_files) sh('mkdir -p ./node-red') sh('{}chown 1000:1000 ./node-red/'.format(sudo)) sh('{}chown -R 1000:1000 {}'.format(sudo, tmpdir)) sh('{}cp -rfp {}/node-red/* ./node-red/'.format(sudo, tmpdir)) zipf.close() if update: utils.info('Updating brewblox...') sh('{} update'.format(const.CLI)) utils.info('Done!')