def particle_wifi(dev: usb.core.Device): if utils.ctx_opts().dry_run: utils.info('Dry run: skipping activation of Spark listening mode') else: dev.reset() # Magic numbers for the USB control call HOST_TO_DEVICE = 0x40 # bmRequestType REQUEST_INIT = 1 # bRequest REQUEST_SEND = 3 # bRequest PARTICLE_LISTEN_INDEX = 70 # wIndex PARTICLE_LISTEN_VALUE = 0 # wValue PARTICLE_BUF_SIZE = 64 # wLength dev.ctrl_transfer(HOST_TO_DEVICE, REQUEST_INIT, PARTICLE_LISTEN_VALUE, PARTICLE_LISTEN_INDEX, PARTICLE_BUF_SIZE) dev.ctrl_transfer(HOST_TO_DEVICE, REQUEST_SEND, PARTICLE_LISTEN_VALUE, PARTICLE_LISTEN_INDEX, PARTICLE_BUF_SIZE) sleep(LISTEN_MODE_WAIT_S) serial = usb.util.get_string(dev, dev.iSerialNumber) path = next( Path('/dev/serial/by-id').glob(f'*{serial}*'), Path('/dev/ttyACM0')) utils.info('Press w to start Wifi configuration.') utils.info('Press Ctrl + ] to cancel.') utils.info('The Spark must be restarted after canceling.') sh(f'pyserial-miniterm -q {path.resolve()} 2>/dev/null')
def disable_ipv6(): """Disable IPv6 support on the host machine. Reason: https://github.com/docker/for-linux/issues/914 Should only be used if your services are having stability issues """ utils.confirm_mode() is_disabled = sh('cat /proc/sys/net/ipv6/conf/all/disable_ipv6', capture=True).strip() if is_disabled == '1': utils.info('IPv6 is already disabled') elif is_disabled == '0' or utils.ctx_opts().dry_run: utils.info('Disabling IPv6...') sh('echo "net.ipv6.conf.all.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf') sh('echo "net.ipv6.conf.default.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf') sh('echo "net.ipv6.conf.lo.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf') sh('sudo sysctl -p') else: utils.info('Invalid result when checking IPv6 status: ' + is_disabled)
def load(file): """Create Brewblox directory from snapshot. This can be used to move Brewblox installations between hosts. To create a snapshot, use `brewblox-ctl snapshot save` """ utils.check_config() utils.confirm_mode() dir = Path('./').resolve() with TemporaryDirectory() as tmpdir: utils.info(f'Extracting snapshot to {dir} directory...') sh(f'tar -xzf {file} -C {tmpdir}') content = list(Path(tmpdir).iterdir()) if utils.ctx_opts().dry_run: content = ['brewblox'] if len(content) != 1: raise ValueError(f'Multiple files found in snapshot: {content}') sh('sudo rm -rf ./*') # We need to explicitly include dotfiles in the mv glob src = content[0] sh(f'mv {src}/.[!.]* {src}/* {dir}/') actions.install_ctl_package(download='missing')
def test_ctx_opts(): # Will raise an error outside click context with pytest.raises(RuntimeError): utils.ctx_opts()
def migrate_couchdb(): urllib3.disable_warnings() sudo = utils.optsudo() opts = utils.ctx_opts() redis_url = utils.datastore_url() couch_url = 'http://localhost:5984' utils.info('Migrating datastore from CouchDB to Redis...') if opts.dry_run: utils.info('Dry run. Skipping migration...') return if not utils.path_exists('./couchdb/'): utils.info('couchdb/ dir not found. Skipping migration...') return utils.info('Starting a temporary CouchDB container on port 5984...') sh(f'{sudo}docker rm -f couchdb-migrate', check=False) sh(f'{sudo}docker run --rm -d' ' --name couchdb-migrate' ' -v "$(pwd)/couchdb/:/opt/couchdb/data/"' ' -p "5984:5984"' ' treehouses/couchdb:2.3.1') sh(f'{const.CLI} http wait {couch_url}') sh(f'{const.CLI} http wait {redis_url}/ping') resp = requests.get(f'{couch_url}/_all_dbs') resp.raise_for_status() dbs = resp.json() for db in ['brewblox-ui-store', 'brewblox-automation']: if db in dbs: resp = requests.get(f'{couch_url}/{db}/_all_docs', params={'include_docs': True}) resp.raise_for_status() docs = [v['doc'] for v in resp.json()['rows']] # Drop invalid names docs[:] = [d for d in docs if len(d['_id'].split('__', 1)) == 2] for d in docs: segments = d['_id'].split('__', 1) d['namespace'] = f'{db}:{segments[0]}' d['id'] = segments[1] del d['_rev'] del d['_id'] resp = requests.post(f'{redis_url}/mset', json={'values': docs}, verify=False) resp.raise_for_status() utils.info(f'Migrated {len(docs)} entries from {db}') if 'spark-service' in dbs: resp = requests.get(f'{couch_url}/spark-service/_all_docs', params={'include_docs': True}) resp.raise_for_status() docs = [v['doc'] for v in resp.json()['rows']] for d in docs: d['namespace'] = 'spark-service' d['id'] = d['_id'] del d['_rev'] del d['_id'] resp = requests.post(f'{redis_url}/mset', json={'values': docs}, verify=False) resp.raise_for_status() utils.info(f'Migrated {len(docs)} entries from spark-service') sh(f'{sudo}docker stop couchdb-migrate') sh('sudo mv couchdb/ couchdb-migrated-' + datetime.now().strftime('%Y%m%d'))
def migrate_influxdb( target: str = 'victoria', duration: str = '', services: List[str] = [], offsets: List[Tuple[str, int]] = [], ): """Exports InfluxDB history data. The exported data is either immediately imported to the new history database, or saved to file. """ opts = utils.ctx_opts() sudo = utils.optsudo() date = datetime.now().strftime('%Y%m%d_%H%M') utils.warn('Depending on the amount of data, this may take some hours.') utils.warn( 'You can use your system as normal while the migration is in progress.' ) utils.warn('The migration can safely be stopped and restarted or resumed.') utils.warn( 'For more info, see https://brewblox.netlify.app/dev/migration/influxdb.html' ) if opts.dry_run: utils.info('Dry run. Skipping migration...') return if not utils.path_exists('./influxdb/'): utils.info('influxdb/ dir not found. Skipping migration...') return utils.info('Starting InfluxDB container...') # Stop container in case previous migration was cancelled sh(f'{sudo}docker stop influxdb-migrate > /dev/null', check=False) # Start standalone container # We'll communicate using 'docker exec', so no need to publish a port sh(f'{sudo}docker run ' '--rm -d ' '--name influxdb-migrate ' '-v "$(pwd)/influxdb:/var/lib/influxdb" ' 'influxdb:1.8 ' '> /dev/null') # Do a health check until startup is done inner_cmd = 'curl --output /dev/null --silent --fail http://localhost:8086/health' bash_cmd = f'until $({inner_cmd}); do sleep 1 ; done' sh(f"{sudo}docker exec influxdb-migrate bash -c '{bash_cmd}'") # Determine relevant measurement # Export all of them if not specified by user if not services: services = _influx_measurements() utils.info(f'Exporting services: {", ".join(services)}') # Export data and import to target for svc in services: offset = next((v for v in offsets if v[0] == svc), ('default', 0))[1] _copy_influx_measurement(svc, date, duration, target, offset) # Stop migration container sh(f'{sudo}docker stop influxdb-migrate > /dev/null', check=False)