Exemple #1
0
def edit_avahi_config():
    conf = Path(const.AVAHI_CONF)

    if not conf.exists():
        return

    config = ConfigObj(str(conf), file_error=True)
    copy = deepcopy(config)
    config.setdefault('server', {}).setdefault('use-ipv6', 'no')
    config.setdefault('publish', {}).setdefault('publish-aaaa-on-ipv4', 'no')
    config.setdefault('reflector', {}).setdefault('enable-reflector', 'yes')

    if config == copy:
        return

    utils.show_data(conf, config.dict())

    with NamedTemporaryFile('w') as tmp:
        config.filename = None
        lines = config.write()
        # avahi-daemon.conf requires a 'key=value' syntax
        tmp.write('\n'.join(lines).replace(' = ', '=') + '\n')
        tmp.flush()
        sh(f'sudo chmod --reference={conf} {tmp.name}')
        sh(f'sudo cp -fp {tmp.name} {conf}')

    if utils.command_exists('systemctl'):
        utils.info('Restarting avahi-daemon service...')
        sh('sudo systemctl restart avahi-daemon')
    else:
        utils.warn(
            '"systemctl" command not found. Please restart your machine to enable Wifi discovery.'
        )
Exemple #2
0
def check_ports():
    if utils.path_exists('./docker-compose.yml'):
        utils.info('Stopping services...')
        sh(f'{utils.optsudo()}docker-compose down')

    ports = [
        int(utils.getenv(key, const.ENV_DEFAULTS[key])) for key in [
            const.HTTP_PORT_KEY,
            const.HTTPS_PORT_KEY,
            const.MQTT_PORT_KEY,
        ]
    ]

    try:
        port_connnections = [
            conn for conn in psutil.net_connections()
            if conn.laddr.ip in ['::', '0.0.0.0'] and conn.laddr.port in ports
        ]
    except psutil.AccessDenied:
        utils.warn(
            'Unable to read network connections. You need to run `netstat` or `lsof` manually.'
        )
        port_connnections = []

    if port_connnections:
        port_str = ', '.join(
            set(str(conn.laddr.port) for conn in port_connnections))
        utils.warn(f'Port(s) {port_str} already in use.')
        utils.warn(
            'Run `brewblox-ctl service ports` to configure Brewblox ports.')
        if not utils.confirm('Do you want to continue?'):
            raise SystemExit(1)
Exemple #3
0
def disable_ssh_accept_env():
    """Disable the 'AcceptEnv LANG LC_*' setting in sshd_config

    This setting is default on the Raspberry Pi,
    but leads to locale errors when an unsupported LANG is sent.

    Given that the Pi by default only includes the en_GB locale,
    the chances of being sent a unsupported locale are very real.
    """
    file = Path('/etc/ssh/sshd_config')
    if not file.exists():
        return

    content = file.read_text()
    updated = re.sub(r'^AcceptEnv LANG LC',
                     '#AcceptEnv LANG LC',
                     content,
                     flags=re.MULTILINE)

    if content == updated:
        return

    with NamedTemporaryFile('w') as tmp:
        tmp.write(updated)
        tmp.flush()
        utils.info('Updating SSHD config to disable AcceptEnv...')
        utils.show_data('/etc/ssh/sshd_config', updated)
        sh(f'sudo chmod --reference={file} {tmp.name}')
        sh(f'sudo cp -fp {tmp.name} {file}')

    if utils.command_exists('systemctl'):
        utils.info('Restarting SSH service...')
        sh('sudo systemctl restart ssh')
Exemple #4
0
def choose_device(discovery_type: str, compose_config: dict = None):
    id_services = match_id_services(compose_config)
    table = tabular.Table(
        keys=['index', 'connect', 'hw', 'id', 'host', 'service'],
        headers={
            'index': 'Index',
            'connect': 'Type',
            'hw': 'Model'.ljust(HW_LEN),
            'id': 'Device ID'.ljust(MAX_ID_LEN),
            'host': 'Device host'.ljust(HOST_LEN),
            'service': 'Service',
        })
    devs = []

    utils.info('Discovering devices...')
    table.print_headers()
    for i, dev in enumerate(discover_device(discovery_type)):
        devs.append(dev)
        table.print_row({
            **dev,
            'index': i + 1,
            'service': id_services.get(dev['id'], ''),
        })

    if not devs:
        click.echo('No devices discovered')
        return None

    idx = click.prompt('Which device do you want to use?',
                       type=click.IntRange(1, len(devs)),
                       default=1)

    return devs[idx - 1]
Exemple #5
0
def kill(zombies):
    """Stop and remove all containers on this host.

    This includes those not from Brewblox.

    If the --zombies flag is set,
    leftover processes that are still claiming a port will be forcibly removed.
    Use this if you get "port is already allocated" errors after your system crashed.
    """
    utils.confirm_mode()
    sudo = utils.optsudo()
    sh(f'{sudo}docker rm --force $({sudo}docker ps -aq)', check=False)

    if zombies:
        # We can't use psutil for this, as we need root rights to get pids
        if not utils.command_exists('netstat'):
            utils.warn(
                'Command `netstat` not found. Please install it by running:')
            utils.warn('')
            utils.warn(
                '    sudo apt-get update && sudo apt-get install net-tools')
            utils.warn('')
            return

        procs = re.findall(r'(\d+)/docker-proxy',
                           sh('sudo netstat -pna', capture=True))

        if procs:
            utils.info(f'Removing {len(procs)} zombies...')
            sh('sudo service docker stop')
            sh([f'sudo kill -9 {proc}' for proc in procs])
            sh('sudo service docker start')
Exemple #6
0
def add_particle_udev_rules():
    rules_dir = '/etc/udev/rules.d'
    target = f'{rules_dir}/50-particle.rules'
    if not utils.path_exists(target) and utils.command_exists('udevadm'):
        utils.info('Adding udev rules for Particle devices...')
        sh(f'sudo mkdir -p {rules_dir}')
        sh(f'sudo cp {const.CONFIG_DIR}/50-particle.rules {target}')
        sh('sudo udevadm control --reload-rules && sudo udevadm trigger')
Exemple #7
0
def read_fields(policy, measurement, keys):
    prefix = 'm_' * POLICIES.index(policy)
    fields = ','.join(['"{}{}"'.format(prefix, k)
                       for k in keys])

    utils.info('Reading {} {}'.format(measurement, policy))
    sh('docker-compose exec influx influx -format csv ' +
       "-execute 'SELECT {} from brewblox.{}.\"{}\"'".format(fields, policy, measurement) +
       '> /tmp/influx_rename_{}.csv'.format(policy))
Exemple #8
0
def apply_config_files():
    """Apply system-defined configuration from config dir"""
    utils.info('Updating configuration files...')
    sh(f'cp -f {const.CONFIG_DIR}/traefik-cert.yaml ./traefik/')
    sh(f'cp -f {const.CONFIG_DIR}/docker-compose.shared.yml ./')
    shared_cfg = utils.read_shared_compose()
    usr_cfg = utils.read_compose()

    usr_cfg['version'] = shared_cfg['version']
    utils.write_compose(usr_cfg)
Exemple #9
0
def prepare_flasher(release, pull):
    tag = utils.docker_tag(release)
    sudo = utils.optsudo()

    if pull:
        utils.info('Pulling flasher image...')
        sh('{}docker pull brewblox/firmware-flasher:{}'.format(sudo, tag))

    if utils.path_exists('./docker-compose.yml'):
        utils.info('Stopping services...')
        sh('{}docker-compose down'.format(sudo))
Exemple #10
0
def check_automation_ui():
    # The automation service is deprecated, and its editor is removed from the UI.
    # The service was always optional - only add the automation-ui service if automation is present.
    config = utils.read_compose()
    services = config['services']
    if 'automation' in services and 'automation-ui' not in services:
        utils.info('Adding automation-ui service...')
        services['automation-ui'] = {
            'image': 'brewblox/brewblox-automation-ui:${BREWBLOX_RELEASE}',
            'restart': 'unless-stopped',
        }
        utils.write_compose(config)
Exemple #11
0
    def check_system_opts(self):
        self.apt_install = True

        apt_deps = ' '.join(const.APT_DEPENDENCIES)
        if not utils.command_exists('apt-get'):
            utils.info(
                '`apt-get` is not available. You may need to find another way to install dependencies.'
            )
            utils.info(f'Apt packages: "{apt_deps}"')
            self.apt_install = False
        elif not self.use_defaults:
            self.apt_install = utils.confirm(
                f'Do you want to install apt packages "{apt_deps}"?')
Exemple #12
0
def migrate_compose_datastore():
    # The couchdb datastore service is gone
    # Older services may still rely on it
    utils.info('Removing `depends_on` fields from docker-compose.yml...')
    config = utils.read_compose()
    for svc in config['services'].values():
        with suppress(KeyError):
            del svc['depends_on']
    utils.write_compose(config)

    # Init dir. It will be filled during upped_migrate
    utils.info('Creating redis/ dir...')
    sh('mkdir -p redis/')
Exemple #13
0
def particle_wifi(dev: usb.core.Device):
    if utils.ctx_opts().dry_run:
        utils.info('Dry run: skipping activation of Spark listening mode')
    else:
        dev.reset()

        # Magic numbers for the USB control call
        HOST_TO_DEVICE = 0x40  # bmRequestType
        REQUEST_INIT = 1  # bRequest
        REQUEST_SEND = 3  # bRequest
        PARTICLE_LISTEN_INDEX = 70  # wIndex
        PARTICLE_LISTEN_VALUE = 0  # wValue
        PARTICLE_BUF_SIZE = 64  # wLength

        dev.ctrl_transfer(HOST_TO_DEVICE, REQUEST_INIT, PARTICLE_LISTEN_VALUE,
                          PARTICLE_LISTEN_INDEX, PARTICLE_BUF_SIZE)

        dev.ctrl_transfer(HOST_TO_DEVICE, REQUEST_SEND, PARTICLE_LISTEN_VALUE,
                          PARTICLE_LISTEN_INDEX, PARTICLE_BUF_SIZE)

    sleep(LISTEN_MODE_WAIT_S)

    serial = usb.util.get_string(dev, dev.iSerialNumber)
    path = next(
        Path('/dev/serial/by-id').glob(f'*{serial}*'), Path('/dev/ttyACM0'))

    utils.info('Press w to start Wifi configuration.')
    utils.info('Press Ctrl + ] to cancel.')
    utils.info('The Spark must be restarted after canceling.')
    sh(f'pyserial-miniterm -q {path.resolve()} 2>/dev/null')
Exemple #14
0
def find_device_by_host(device_host: str):
    utils.info(f'Discovering device with address {device_host}...')
    match = next(
        (dev
         for dev in discover_device('lan') if dev.get('host') == device_host),
        None)
    if match:
        id = match['id']
        hw = match['hw']
        utils.info(f'Discovered a {hw} with ID {id}')
        return match
    else:
        click.echo('No devices discovered')
        return None
Exemple #15
0
def ports(http, https, mqtt):
    """Update used ports"""
    utils.check_config()
    utils.confirm_mode()

    cfg = {
        const.HTTP_PORT_KEY: http,
        const.HTTPS_PORT_KEY: https,
        const.MQTT_PORT_KEY: mqtt,
    }

    utils.info('Writing port settings to .env...')
    for key, val in cfg.items():
        utils.setenv(key, val)
Exemple #16
0
def remove(ctx, services):
    """Remove a service."""
    utils.check_config()
    utils.confirm_mode()

    config = utils.read_compose()
    for name in services:
        try:
            del config['services'][name]
            utils.info(f"Removed service '{name}'")
        except KeyError:
            utils.warn(f"Service '{name}' not found")

    if services:
        utils.write_compose(config)
        restart_services(ctx, compose_args=['--remove-orphans'])
Exemple #17
0
def particle(release, pull, command):
    """Start a Docker container with access to the Particle CLI.

    This requires the Spark to be connected over USB.

    \b
    Steps:
        - Stop running services.
        - Pull flasher image.
        - Start flasher image.
    """
    utils.confirm_mode()

    utils.info('Starting Particle image...')
    utils.info("Type 'exit' and press enter to exit the shell")
    run_particle_flasher(release, pull, command)
Exemple #18
0
def list_devices(discovery_type: str, compose_config: dict = None):
    id_services = match_id_services(compose_config)
    table = tabular.Table(keys=['connect', 'hw', 'id', 'host', 'service'],
                          headers={
                              'connect': 'Type',
                              'hw': 'Model'.ljust(HW_LEN),
                              'id': 'Device ID'.ljust(MAX_ID_LEN),
                              'host': 'Device host'.ljust(HOST_LEN),
                              'service': 'Service',
                          })

    utils.info('Discovering devices...')
    table.print_headers()
    for dev in discover_device(discovery_type):
        table.print_row({
            **dev,
            'service': id_services.get(dev['id'], ''),
        })
Exemple #19
0
def fix_ipv6(config_file=None, restart=True):
    utils.info('Fixing Docker IPv6 settings...')

    if utils.is_wsl():
        utils.info('WSL environment detected. Skipping IPv6 config changes.')
        return

    # Config is either provided, or parsed from active daemon process
    if not config_file:
        default_config_file = '/etc/docker/daemon.json'
        dockerd_proc = sh('ps aux | grep dockerd', capture=True)
        proc_match = re.match(r'.*--config-file[\s=](?P<file>.*\.json).*',
                              dockerd_proc,
                              flags=re.MULTILINE)
        config_file = proc_match and proc_match.group(
            'file') or default_config_file

    utils.info(f'Using Docker config file {config_file}')

    # Read config. Create file if not exists
    sh(f"sudo touch '{config_file}'")
    config = sh(f"sudo cat '{config_file}'", capture=True)

    if 'fixed-cidr-v6' in config:
        utils.info('IPv6 settings are already present. Making no changes.')
        return

    # Edit and write. Do not overwrite existing values
    config = json.loads(config or '{}')
    config.setdefault('ipv6', False)
    config.setdefault('fixed-cidr-v6', '2001:db8:1::/64')
    config_str = json.dumps(config, indent=2)
    sh(f"echo '{config_str}' | sudo tee '{config_file}' > /dev/null")

    # Restart daemon
    if restart:
        if utils.command_exists('service'):
            utils.info('Restarting Docker service...')
            sh('sudo service docker restart')
        else:
            utils.warn(
                '"service" command not found. Please restart your machine to apply config changes.'
            )
Exemple #20
0
def flash(release, pull):
    """Flash firmware on Spark.

    This requires the Spark to be connected over USB.

    After the first install, firmware updates can also be installed using the UI.

    \b
    Steps:
        - Stop running services.
        - Pull flasher image.
        - Run flash command.
    """
    utils.confirm_mode()
    utils.confirm_usb()
    prepare_flasher(release, pull)

    utils.info('Flashing Spark...')
    run_flasher(release, 'flash')
Exemple #21
0
def test_logs(mocker):
    m_opts = mocker.patch(TESTED + '.ctx_opts').return_value
    m_secho = mocker.patch(TESTED + '.click.secho')

    m_opts.quiet = True
    utils.info('test')
    assert m_secho.call_count == 0
    utils.warn('warning')
    assert m_secho.call_count == 1
    utils.error('error')
    assert m_secho.call_count == 2

    m_opts.quiet = False
    utils.info('test')
    assert m_secho.call_count == 3
    utils.warn('warning')
    assert m_secho.call_count == 4
    utils.error('error')
    assert m_secho.call_count == 5
Exemple #22
0
def coredump(upload):
    """Read and upload a core dump file for the Spark 4.

    This requires the Spark to be connected over USB.
    Not compatible with the Spark 2 or 3.

    If the Spark 4 crashes, it stores what it was doing at the time of the crash.
    This command exports and uploads this data.

    The `esptool` python package is required, and will be installed if not found.
    """
    if not utils.command_exists('esptool.py'):
        sh('python3 -m pip install esptool')
    sh('sudo -E env "PATH=$PATH" esptool.py --chip esp32 --baud 115200 read_flash 0xA10000 81920 coredump.bin')
    sh('base64 coredump.bin > coredump.b64')

    if upload:
        click.echo(utils.file_netcat('termbin.com', 9999, Path('./coredump.b64')).decode())
    else:
        utils.info('Skipping upload. If you want to manually upload the file, run: ' +
                   click.style('brewblox-ctl termbin ./coredump.b64', fg='green'))
Exemple #23
0
def flash(release, pull):
    """Flash Spark firmware over USB.

    This requires the Spark to be connected over USB.

    After the first install, firmware updates can also be installed using the UI.

    \b
    Steps:
        - Stop running services.
        - Pull flasher image.
        - Run flash command.
    """
    utils.confirm_mode()
    dev = find_usb_spark()

    if dev.idProduct == const.PID_PHOTON:
        utils.info('Flashing Spark 2...')
        run_particle_flasher(release, pull, 'flash')
    elif dev.idProduct == const.PID_P1:
        utils.info('Flashing Spark 3...')
        run_particle_flasher(release, pull, 'flash')
    elif dev.idProduct == const.PID_ESP32:
        utils.info('Flashing Spark 4...')
        run_esp_flasher(release, pull)
    else:
        raise ValueError('Unknown USB device')
Exemple #24
0
def migrate_compose_split():
    # Splitting compose configuration between docker-compose and docker-compose.shared.yml
    # Version pinning (0.2.2) will happen automatically
    utils.info('Moving system services to docker-compose.shared.yml...')
    config = utils.read_compose()
    sys_names = [
        'mdns',
        'eventbus',
        'influx',
        'datastore',
        'history',
        'ui',
        'traefik',
    ]
    usr_config = {
        'version': config['version'],
        'services': {
            key: svc
            for (key, svc) in config['services'].items()
            if key not in sys_names
        }
    }
    utils.write_compose(usr_config)
Exemple #25
0
    def check_docker_opts(self):
        self.docker_install = True
        self.docker_group_add = True
        self.docker_pull = True

        if utils.command_exists('docker'):
            utils.info('Docker is already installed.')
            self.docker_install = False
        elif not self.use_defaults:
            self.docker_install = utils.confirm(
                'Do you want to install docker?')

        if utils.is_docker_user():
            user = utils.getenv('USER')
            utils.info(f'{user} already belongs to the docker group.')
            self.docker_group_add = False
        elif not self.use_defaults:
            self.docker_group_add = utils.confirm(
                'Do you want to run docker commands without sudo?')

        if not self.use_defaults:
            self.docker_pull = utils.confirm(
                'Do you want to pull the docker images for your services?')
Exemple #26
0
def write_fields(policy, keys, pattern, replace):
    prefix = 'm_' * POLICIES.index(policy)
    fields = [re.sub(pattern, replace, k, count=1) for k in keys]
    fields = [re.sub(r' ', r'\\ ', k) for k in fields]

    infile = '/tmp/influx_rename_{}.csv'.format(policy)
    outfile = '/tmp/influx_rename_{}.line'.format(policy)
    sh('rm {}'.format(outfile), check=False)

    with open(infile) as f_in:
        if not f_in.readline():
            utils.info('No values found in policy "{}"'.format(policy))
            return

        with open(outfile, 'w') as f_out:
            f_out.write('# DML\n')
            f_out.write('# CONTEXT-DATABASE: brewblox\n')
            f_out.write('# CONTEXT-RETENTION-POLICY: {}\n'.format(policy))
            f_out.write('\n')

            while True:
                line = f_in.readline().strip()
                if not line:
                    break
                values = line.split(',')
                measurement = values.pop(0)
                time = values.pop(0)
                data = ','.join(['{}{}={}'.format(prefix, field, value)
                                 for (field, value) in zip(fields, values)
                                 if value and value != '0'])
                if data:
                    f_out.write('{} {} {}\n'.format(measurement, data, time))

    utils.info('Writing {} {}'.format(measurement, policy))
    sh('docker cp {} $(docker-compose ps -q influx):/rename'.format(outfile))
    sh('docker-compose exec influx influx -import -path=/rename || true')
Exemple #27
0
def load(file):
    """Create Brewblox directory from snapshot.

    This can be used to move Brewblox installations between hosts.
    To create a snapshot, use `brewblox-ctl snapshot save`
    """
    utils.check_config()
    utils.confirm_mode()
    dir = Path('./').resolve()

    with TemporaryDirectory() as tmpdir:
        utils.info(f'Extracting snapshot to {dir} directory...')
        sh(f'tar -xzf {file} -C {tmpdir}')
        content = list(Path(tmpdir).iterdir())
        if utils.ctx_opts().dry_run:
            content = ['brewblox']
        if len(content) != 1:
            raise ValueError(f'Multiple files found in snapshot: {content}')
        sh('sudo rm -rf ./*')
        # We need to explicitly include dotfiles in the mv glob
        src = content[0]
        sh(f'mv {src}/.[!.]* {src}/* {dir}/')

    actions.install_ctl_package(download='missing')
Exemple #28
0
def wifi(release, pull):
    """DISABLED: Configure Spark Wifi settings.

    This requires the Spark to be connected over USB.

    \b
    Steps:
        - Stop running services.
        - Pull flasher image.
        - Run wifi command.
    """
    utils.info('This command is temporarily disabled')
    utils.info('To set up Wifi, connect to the Spark over USB')
    utils.info('On the Spark service page (actions, top right), you can configure Wifi settings')
Exemple #29
0
def disable_ipv6():
    """Disable IPv6 support on the host machine.

    Reason: https://github.com/docker/for-linux/issues/914
    Should only be used if your services are having stability issues
    """
    utils.confirm_mode()
    is_disabled = sh('cat /proc/sys/net/ipv6/conf/all/disable_ipv6', capture=True).strip()
    if is_disabled == '1':
        utils.info('IPv6 is already disabled')
    elif is_disabled == '0' or utils.ctx_opts().dry_run:
        utils.info('Disabling IPv6...')
        sh('echo "net.ipv6.conf.all.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf')
        sh('echo "net.ipv6.conf.default.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf')
        sh('echo "net.ipv6.conf.lo.disable_ipv6 = 1" | sudo tee -a /etc/sysctl.conf')
        sh('sudo sysctl -p')
    else:
        utils.info('Invalid result when checking IPv6 status: ' + is_disabled)
Exemple #30
0
def save(save_compose, ignore_spark_error):
    """Create a backup of Brewblox settings.

    A zip archive containing JSON/YAML files is created in the ./backup/ directory.
    The archive name will include current date and time to ensure uniqueness.

    Restrictions:
    - The backup is not exported to any kind of remote/cloud storage.
    - The backup does not include history data.
    - The backup does not include Docker images.
    - The backup does not include custom configuration for third-party services.

    To use this command in scripts, run it as `brewblox-ctl --quiet backup save`.
    Its only output to stdout will be the absolute path to the created backup.

    The command will fail if any of the Spark services could not be contacted.

    As it does not make any destructive changes to configuration,
    this command is not affected by --dry-run.

    \b
    Stored data:
    - .env
    - docker-compose.yml.   (Optional)
    - Datastore databases.
    - Spark service blocks.
    - Node-RED data.
    - Mosquitto config files.
    - Tilt config files.

    \b
    NOT stored:
    - History data.

    """
    utils.check_config()
    urllib3.disable_warnings()

    file = f'backup/brewblox_backup_{datetime.now().strftime("%Y%m%d_%H%M")}.zip'
    with suppress(FileExistsError):
        mkdir(Path('backup/').resolve())

    store_url = utils.datastore_url()

    utils.info('Waiting for the datastore...')
    http.wait(store_url + '/ping', info_updates=True)

    config = utils.read_compose()
    sparks = [
        k for k, v in config['services'].items()
        if v.get('image', '').startswith('brewblox/brewblox-devcon-spark')
    ]
    zipf = zipfile.ZipFile(file, 'w', zipfile.ZIP_DEFLATED)

    # Always save .env
    utils.info('Exporting .env')
    zipf.write('.env')

    # Always save datastore
    utils.info('Exporting datastore')
    resp = requests.post(store_url + '/mget',
                         json={
                             'namespace': '',
                             'filter': '*'
                         },
                         verify=False)
    resp.raise_for_status()
    zipf.writestr('global.redis.json', resp.text)

    if save_compose:
        utils.info('Exporting docker-compose.yml')
        zipf.write('docker-compose.yml')

    for spark in sparks:
        utils.info(f'Exporting Spark blocks from `{spark}`')
        resp = requests.post(f'{utils.host_url()}/{spark}/blocks/backup/save',
                             verify=False)
        try:
            resp.raise_for_status()
            zipf.writestr(spark + '.spark.json', resp.text)
        except Exception as ex:
            if ignore_spark_error:
                utils.info(f'Skipping Spark `{spark}` due to error: {str(ex)}')
            else:
                raise ex

    for fname in [
            *glob('node-red/*.js*'),
            *glob('node-red/lib/**/*.js*'),
            *glob('mosquitto/*.conf'),
            *glob('tilt/*'),
    ]:
        zipf.write(fname)

    zipf.close()
    click.echo(Path(file).resolve())
    utils.info('Done!')