Exemplo n.º 1
0
    async def update(self, job, attrs=None):
        """
        Downloads (if not already in cache) and apply an update.
        """
        attrs = attrs or {}
        train = attrs.get('train') or (await self.middleware.call('update.get_trains'))['selected']
        location = await self.middleware.call('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job)

        update = Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        if update is False:
            raise ValueError('No update available')

        new_manifest = Manifest.Manifest(require_signature=True)
        new_manifest.LoadPath('{}/MANIFEST'.format(location))

        Update.ApplyUpdate(
            location,
            install_handler=handler.install_handler,
        )
        await self.middleware.call('cache.put', 'update.applied', True)

        if attrs.get('reboot'):
            await self.middleware.call('system.reboot', {'delay': 10})
        return True
Exemplo n.º 2
0
    def run(self, job, id):
        """
        Job to run cronjob task of `id`.
        """
        def __cron_log(line):
            job.logs_fd.write(line)
            syslog.syslog(syslog.LOG_INFO, line.decode())

        cron_task = self.middleware.call_sync('cronjob._get_instance', id)
        cron_cmd = ' '.join(
            self.middleware.call_sync('cronjob.construct_cron_command',
                                      cron_task['schedule'], cron_task['user'],
                                      cron_task['command'],
                                      cron_task['stdout'],
                                      cron_task['stderr'])[7:])

        job.set_progress(10, 'Executing Cron Task')

        syslog.openlog('cron', facility=syslog.LOG_CRON)

        syslog.syslog(syslog.LOG_INFO,
                      f'({cron_task["user"]}) CMD ({cron_cmd})')

        cp = run_command_with_user_context(cron_cmd, cron_task['user'],
                                           __cron_log)

        syslog.closelog()

        job.set_progress(85, 'Executed Cron Task')

        if cp.stdout:
            email = (self.middleware.call_sync(
                'user.query', [['username', '=', cron_task['user']]],
                {'get': True}))['email']
            stdout = cp.stdout.decode()
            if email:
                mail_job = self.middleware.call_sync('mail.send', {
                    'subject': 'CronTask Run',
                    'text': stdout,
                    'to': [email]
                })

                job.set_progress(95, 'Sending mail for Cron Task output')

                mail_job.wait_sync()
                if mail_job.error:
                    job.logs_fd.write(
                        f'Failed to send email for CronTask run: {mail_job.error}'
                        .encode())
            else:
                job.set_progress(
                    95,
                    'Email for root user not configured. Skipping sending mail.'
                )

            job.logs_fd.write(
                f'Executed CronTask - {cron_cmd}: {stdout}'.encode())

        job.set_progress(100, 'Execution of Cron Task complete.')
Exemplo n.º 3
0
    def upgrade(self, job, jail, options):
        """Upgrades specified jail to specified RELEASE."""
        verrors = ValidationErrors()
        release = options.get('release', None)
        plugin = options['plugin']

        if release is None and not plugin:
            verrors.add(
                'options.release',
                'Must not be None if options.plugin is False.'
            )
            raise verrors

        job.set_progress(0, f'Upgrading {jail}')
        msg_queue = deque(maxlen=10)

        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if plugin:
                plugin_progress(job, msg)
            else:
                jail_progress(job, msg)

            job.set_progress(None, description=final_msg)

        def plugin_progress(job, msg):
            if 'Snapshotting' in msg:
                job.set_progress(20)
            elif 'Updating plugin INDEX' in msg:
                job.set_progress(40)
            elif 'Running upgrade' in msg:
                job.set_progress(70)
            elif 'Installing plugin packages' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        def jail_progress(job, msg):
            if 'Inspecting system' in msg:
                job.set_progress(20)
            elif 'Preparing to download files' in msg:
                job.set_progress(50)
            elif 'Applying patches' in msg:
                job.set_progress(75)
            elif 'Installing updates' in msg:
                job.set_progress(90)
            elif f'{jail} successfully upgraded' in msg:
                job.set_progress(100)

        _, _, iocage = self.check_jail_existence(
            jail,
            callback=progress_callback
        )
        iocage.upgrade(release=release)

        return True
Exemplo n.º 4
0
 def do_update():
     try:
         job.set_progress(30, 'Extracting uploaded file')
         ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
         job.set_progress(50, 'Applying update')
         ApplyUpdate(dest_extracted)
     except Exception as e:
         raise CallError(str(e))
Exemplo n.º 5
0
 def do_update():
     try:
         job.set_progress(30, 'Extracting uploaded file')
         ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
         job.set_progress(50, 'Applying update')
         ApplyUpdate(dest_extracted)
     except Exception as e:
         raise CallError(str(e))
Exemplo n.º 6
0
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        release = options.get('release', None)
        https = options.pop('https', False)
        name = options.pop('name')
        jail_name = options.pop('jail_name', None)

        def progress_callback(content, exception):
            msg = content['message'].strip('\r\n')
            rel_up = f'* Updating {release} to the latest patch level... '

            if 'Downloading : base.txz' in msg and '100%' in msg:
                job.set_progress(5, msg)
            elif 'Downloading : lib32.txz' in msg and '100%' in msg:
                job.set_progress(10, msg)
            elif 'Downloading : doc.txz' in msg and '100%' in msg:
                job.set_progress(15, msg)
            elif 'Downloading : src.txz' in msg and '100%' in msg:
                job.set_progress(20, msg)
            if 'Extracting: base.txz' in msg:
                job.set_progress(25, msg)
            elif 'Extracting: lib32.txz' in msg:
                job.set_progress(50, msg)
            elif 'Extracting: doc.txz' in msg:
                job.set_progress(75, msg)
            elif 'Extracting: src.txz' in msg:
                job.set_progress(90, msg)
            elif rel_up in msg:
                job.set_progress(95, msg)
            else:
                job.set_progress(None, msg)

        self.check_dataset_existence()  # Make sure our datasets exist.
        start_msg = f'{release} being fetched'
        final_msg = f'{release} fetched'

        if name is None and https:
            if 'https' not in options['server']:
                options['server'] = f'https://{options["server"]}'

        if name is not None:
            # we want to create a plugin in this case
            plugin_job = self.middleware.call_sync(
                'plugin.create', {
                    'jail_name': jail_name,
                    'plugin_name': name,
                    'props': options['props'],
                })
            return self.middleware.call_sync('core.job_wait',
                                             plugin_job,
                                             job=True)
        else:
            # We are fetching a release in this case
            iocage = ioc.IOCage(callback=progress_callback, silent=False)
            job.set_progress(0, start_msg)
            iocage.fetch(**options)
            job.set_progress(100, final_msg)
            return True
Exemplo n.º 7
0
    def create_job(self, job, options):
        verrors = ValidationErrors()

        try:
            self.check_jail_existence(options['uuid'], skip=False)

            verrors.add(
                'uuid',
                f'A jail with uuid {options["uuid"]} already exists'
            )
            raise verrors
        except CallError:
            # A jail does not exist with the provided uuid, we can create one now

            self.validate_ips(verrors, options)
            job.set_progress(20, 'Initial validation complete')

            iocage = ioc.IOCage(skip_jails=True)

        release = options["release"]
        template = options.get("template", False)
        pkglist = options.get("pkglist", None)
        uuid = options["uuid"]
        basejail = options["basejail"]
        empty = options["empty"]
        short = options["short"]
        props = options["props"]
        pool = IOCJson().json_get_value("pool")
        iocroot = IOCJson(pool).json_get_value("iocroot")

        if template:
            release = template

        if (
                not os.path.isdir(f'{iocroot}/releases/{release}') and
                not template and
                not empty
        ):
            self.middleware.call_sync(
                'jail.fetch', {"release": release}, job=True
            )

        err, msg = iocage.create(
            release,
            props,
            0,
            pkglist,
            template=template,
            short=short,
            _uuid=uuid,
            basejail=basejail,
            empty=empty
        )

        if err:
            raise CallError(msg)

        return True
Exemplo n.º 8
0
    def _do_create(self, job, data):
        self.middleware.call_sync('jail.check_dataset_existence')
        verrors = ValidationErrors()
        branch = data.pop('branch') or self.get_version()
        install_notes = ''
        plugin_name = data.pop('plugin_name')
        jail_name = data.pop('jail_name')
        plugin_repository = data.pop('plugin_repository')
        post_install = False

        job.set_progress(0, f'Creating plugin: {plugin_name}')
        if jail_name in [
                j['id'] for j in self.middleware.call_sync('jail.query')
        ]:
            verrors.add('plugin_create.jail_name',
                        f'A jail with name {jail_name} already exists')
        else:
            verrors = common_validation(self.middleware,
                                        data,
                                        schema='plugin_create')

        verrors.check()

        job.set_progress(20, 'Initial validation complete')

        def progress_callback(content, exception):
            msg = content['message'].strip('\r\n')
            nonlocal install_notes, post_install

            if post_install and msg:
                install_notes += f'\n{msg}'

            if '  These pkgs will be installed:' in msg:
                job.set_progress(50, msg)
            elif 'Installing plugin packages:' in msg:
                job.set_progress(75, msg)
            elif 'Running post_install.sh' in msg:
                job.set_progress(90, msg)
                # Sets each message going forward as important to the user
                post_install = True
            else:
                job.set_progress(None, msg)

        ioc.IOCage(callback=progress_callback, silent=False).fetch(
            **{
                'accept': True,
                'name': jail_name,
                'plugin_name': plugin_name,
                'git_repository': plugin_repository,
                'props': data['props'],
                'branch': branch,
            })

        new_plugin = self.middleware.call_sync('plugin._get_instance',
                                               jail_name)
        new_plugin['install_notes'] = install_notes.strip()

        return new_plugin
Exemplo n.º 9
0
    def __rsync_worker(self, line, user, job):
        proc_stdout = tempfile.TemporaryFile(mode='w+b', buffering=0)
        try:
            rsync_proc = subprocess.Popen(line,
                                          shell=True,
                                          stdout=proc_stdout.fileno(),
                                          stderr=subprocess.PIPE,
                                          bufsize=0,
                                          preexec_fn=demote(user))
            seek = 0
            old_seek = 0
            progress = 0
            message = 'Starting rsync copy job...'
            while rsync_proc.poll() is None:
                job.set_progress(progress, message)
                proc_op = ''
                proc_stdout.seek(seek)
                try:
                    while True:
                        op_byte = proc_stdout.read(1).decode('utf8')
                        if op_byte == '':
                            # In this case break before incrementing `seek`
                            break
                        seek += 1
                        if op_byte == '\r':
                            break
                        proc_op += op_byte
                        seek += 1
                    if old_seek != seek:
                        old_seek = seek
                        message = proc_op.strip()
                        try:
                            progress = int([
                                x for x in message.split(' ') if '%' in x
                            ][0][:-1])
                        except (IndexError, ValueError):
                            pass
                except BaseException as err:
                    # Catch IOERROR Errno 9 which usually arises because
                    # of already closed fileobject being used here therby
                    # raising Bad File Descriptor error. In this case break
                    # and the outer while loop will check for rsync_proc.poll()
                    # to be None or not and DTRT
                    if hasattr(err, 'errno') and err.errno == 9:
                        break
                    logger.debug('Error whilst parsing rsync progress',
                                 exc_info=True)

        except BaseException as e:
            raise CallError(f'Rsync copy job id: {job.id} failed due to: {e}',
                            errno.EIO)

        if rsync_proc.returncode != 0:
            job.set_progress(None, 'Rsync copy job failed')
            raise CallError(
                f'Rsync copy job id: {job.id} returned non-zero exit code. Command used was: {line}. Error: {rsync_proc.stderr.read()}'
            )
 def fetch_hookreport(self, blocknum, blocksize, totalsize, job, file_name):
     """Hook to report the download progress."""
     readchunk = blocknum * blocksize
     if totalsize > 0:
         percent = readchunk * 1e2 / totalsize
         job.set_progress(int(percent), 'Downloading', {
             'downloaded': readchunk,
             'total': totalsize
         })
Exemplo n.º 11
0
    async def wipe(self, job, dev, mode, sync):
        """
        Performs a wipe of a disk `dev`.
        It can be of the following modes:
          - QUICK: clean the first few and last megabytes of every partition and disk
          - FULL: write whole disk with zero's
          - FULL_RANDOM: write whole disk with random bytes
        """
        await self.middleware.call('disk.swaps_remove_disks', [dev])
        # FIXME: Please implement appropriate alternative for removal of disk from graid in linux
        if not IS_LINUX:
            await self.middleware.call('disk.remove_disk_from_graid', dev)

        # First do a quick wipe of every partition to clean things like zfs labels
        if mode == 'QUICK':
            for part in await self.middleware.call('disk.list_partitions', dev):
                await self.wipe_quick(part['name'], part['size'])

        await self.middleware.call('disk.destroy_partitions', dev)

        if mode == 'QUICK':
            await self.wipe_quick(dev)
        else:
            size = await self.middleware.call('disk.get_dev_size', dev) or 1

            proc = await Popen([
                'dd',
                'if=/dev/{}'.format('zero' if mode == 'FULL' else 'random'),
                f'of=/dev/{dev}',
                'bs=1M',
            ], stdout=subprocess.DEVNULL, stderr=subprocess.PIPE)

            async def dd_wait():
                while True:
                    if proc.returncode is not None:
                        break
                    os.kill(proc.pid, signal.SIGUSR1 if IS_LINUX else signal.SIGINFO)
                    await asyncio.sleep(1)

            asyncio.ensure_future(dd_wait())

            while True:
                line = await proc.stderr.readline()
                if line == b'':
                    break
                line = line.decode()
                reg = RE_DD.search(line)
                if reg:
                    speed = float(reg.group(2)) if IS_LINUX else int(reg.group(2))
                    if IS_LINUX:
                        mapping = {'gb': 1024 * 1024 * 1024, 'mb': 1024 * 1024, 'kb': 1024, 'b': 1}
                        speed = int(speed * mapping[reg.group(3).lower()])
                    job.set_progress((int(reg.group(1)) / size) * 100, extra={'speed': speed})

        if sync:
            await self.middleware.call('disk.sync', dev)
Exemplo n.º 12
0
    def decompress_hookreport(self, dst_file, job):
        totalsize = 4756340736  # XXX: It will be parsed from a sha256 file.
        fd = os.open(dst_file, os.O_RDONLY)
        try:
            size = os.lseek(fd, 0, os.SEEK_END)
        finally:
            os.close(fd)

        percent = (size / totalsize) * 100
        job.set_progress(int(percent), 'Decompress', {'decompressed': size, 'total': totalsize})
Exemplo n.º 13
0
 def do_update():
     try:
         job.set_progress(30, 'Extracting uploaded file')
         ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
         job.set_progress(50, 'Applying update')
         if ApplyUpdate(dest_extracted) is None:
             raise ValueError(
                 'Uploaded file is not a manual update file')
     except Exception as e:
         raise CallError(str(e))
Exemplo n.º 14
0
async def pool_pre_export(middleware, pool, options, job):
    sysds = await middleware.call('systemdataset.config')
    if sysds['pool'] == pool:
        job.set_progress(40, 'Reconfiguring system dataset')
        sysds_job = await middleware.call('systemdataset.update', {
            'pool': None, 'pool_exclude': pool,
        })
        await sysds_job.wait()
        if sysds_job.error:
            raise CallError(sysds_job.error)
Exemplo n.º 15
0
    def __rsync_worker(self, line, user, job):
        proc_stdout = tempfile.TemporaryFile(mode='w+b', buffering=0)
        try:
            rsync_proc = subprocess.Popen(
                line,
                shell=True,
                stdout=proc_stdout.fileno(),
                stderr=subprocess.PIPE,
                bufsize=0,
                preexec_fn=demote(user)
            )
            seek = 0
            old_seek = 0
            progress = 0
            message = 'Starting rsync copy job...'
            while rsync_proc.poll() is None:
                job.set_progress(progress, message)
                proc_op = ''
                proc_stdout.seek(seek)
                try:
                    while True:
                        op_byte = proc_stdout.read(1).decode('utf8')
                        if op_byte == '':
                            # In this case break before incrementing `seek`
                            break
                        seek += 1
                        if op_byte == '\r':
                            break
                        proc_op += op_byte
                        seek += 1
                    if old_seek != seek:
                        old_seek = seek
                        message = proc_op.strip()
                        try:
                            progress = int([x for x in message.split(' ') if '%' in x][0][:-1])
                        except (IndexError, ValueError):
                            pass
                except BaseException as err:
                    # Catch IOERROR Errno 9 which usually arises because
                    # of already closed fileobject being used here therby
                    # raising Bad File Descriptor error. In this case break
                    # and the outer while loop will check for rsync_proc.poll()
                    # to be None or not and DTRT
                    if hasattr(err, 'errno') and err.errno == 9:
                        break
                    logger.debug('Error whilst parsing rsync progress', exc_info=True)

        except BaseException as e:
            raise CallError(f'Rsync copy job id: {job.id} failed due to: {e}', errno.EIO)

        if rsync_proc.returncode != 0:
            job.set_progress(None, 'Rsync copy job failed')
            raise CallError(
                f'Rsync copy job id: {job.id} returned non-zero exit code. Command used was: {line}. Error: {rsync_proc.stderr.read()}'
            )
Exemplo n.º 16
0
    def download(self, job):
        train = self.middleware.call_sync('update.get_trains')['selected']
        location = self.middleware.call_sync('notifier.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job, 100)

        Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        update = Update.CheckForUpdates(train=train, cache_dir=location)

        if not update:
            return False

        notified = False
        try:
            if self.middleware.call_sync('cache.has_key', 'update.notified'):
                notified = self.middleware.call_sync('cache.get', 'update.notified')
        except Exception:
            pass

        if not notified:
            self.middleware.call_sync('cache.put', 'update.notified', True)
            conf = Configuration.Configuration()
            sys_mani = conf.SystemManifest()
            if sys_mani:
                sequence = sys_mani.Sequence()
            else:
                sequence = ''

            changelog = get_changelog(train, start=sequence, end=update.Sequence())
            hostname = socket.gethostname()

            try:
                # FIXME: Translation
                self.middleware.call_sync('mail.send', {
                    'subject': '{}: {}'.format(hostname, 'Update Available'),
                    'text': '''A new update is available for the %(train)s train.
Version: %(version)s
Changelog:
%(changelog)s
''' % {
                        'train': train,
                        'version': update.Version(),
                        'changelog': changelog,
                    },
                }).wait_sync()
            except Exception:
                self.logger.warn('Failed to send email about new update', exc_info=True)
        return True
Exemplo n.º 17
0
    async def upgrade_chart_release(self, job, release, options):
        release_orig = copy.deepcopy(release)
        release_name = release['name']

        catalog_item = await self.get_version(release, options)
        await self.middleware.call('catalog.version_supported_error_check', catalog_item)

        config = await self.middleware.call('chart.release.upgrade_values', release, catalog_item['location'])
        release_orig['config'] = config

        # We will be performing validation for values specified. Why we want to allow user to specify values here
        # is because the upgraded catalog item version might have different schema which potentially means that
        # upgrade won't work or even if new k8s are resources are created/deployed, they won't necessarily function
        # as they should because of changed params or expecting new params
        # One tricky bit which we need to account for first is removing any key from current configured values
        # which the upgraded release will potentially not support. So we can safely remove those as otherwise
        # validation will fail as new schema does not expect those keys.
        config = clean_values_for_upgrade(config, catalog_item['schema']['questions'])
        config.update(options['values'])

        config, context = await self.middleware.call(
            'chart.release.normalise_and_validate_values', catalog_item, config, False, release['dataset'],
            release_orig,
        )
        job.set_progress(50, 'Initial validation complete for upgrading chart version')

        # We have validated configuration now

        chart_path = os.path.join(release['path'], 'charts', catalog_item['version'])
        await self.middleware.run_in_thread(shutil.rmtree, chart_path, ignore_errors=True)
        await self.middleware.run_in_thread(shutil.copytree, catalog_item['location'], chart_path)

        await self.middleware.call('chart.release.perform_actions', context)

        # Let's update context options to reflect that an upgrade is taking place and from which version to which
        # version it's happening.
        # Helm considers simple config change as an upgrade as well, and we have no way of determining the old/new
        # chart versions during helm upgrade in the helm template, hence the requirement for a context object.
        config = await add_context_to_configuration(config, {
            CONTEXT_KEY_NAME: {
                **get_action_context(release_name),
                'operation': 'UPGRADE',
                'isUpgrade': True,
                'upgradeMetadata': {
                    'oldChartVersion': release['chart_metadata']['version'],
                    'newChartVersion': catalog_item['version'],
                    'preUpgradeRevision': release['version'],
                }
            }
        }, self.middleware)

        job.set_progress(60, 'Upgrading chart release version')

        await self.middleware.call('chart.release.helm_action', release_name, chart_path, config, 'upgrade')
        await self.middleware.call('chart.release.refresh_events_state', release_name)
Exemplo n.º 18
0
 def jail_progress(job, msg):
     if 'Inspecting system' in msg:
         job.set_progress(20)
     elif 'Preparing to download files' in msg:
         job.set_progress(50)
     elif 'Applying patches' in msg:
         job.set_progress(75)
     elif 'Installing updates' in msg:
         job.set_progress(90)
     elif f'{jail} successfully upgraded' in msg:
         job.set_progress(100)
Exemplo n.º 19
0
 def plugin_progress(job, msg):
     if 'Snapshotting' in msg:
         job.set_progress(20)
     elif 'Updating plugin INDEX' in msg:
         job.set_progress(40)
     elif 'Running upgrade' in msg:
         job.set_progress(70)
     elif 'Installing plugin packages' in msg:
         job.set_progress(90)
     elif f'{jail} successfully upgraded' in msg:
         job.set_progress(100)
Exemplo n.º 20
0
async def rclone_check_progress(job, proc):
    while True:
        read = (await proc.stdout.readline()).decode()
        job.logs_fd.write(read.encode("utf-8", "ignore"))
        if read == "":
            break
        reg = RE_TRANSF.search(read)
        if reg:
            transferred = reg.group(1).strip()
            if not transferred.isdigit():
                job.set_progress(None, transferred)
Exemplo n.º 21
0
        def progress_callback(content, exception):
            msg = content['message'].strip('\n')
            msg_queue.append(msg)
            final_msg = '\n'.join(msg_queue)

            if plugin:
                plugin_progress(job, msg)
            else:
                jail_progress(job, msg)

            job.set_progress(None, description=final_msg)
Exemplo n.º 22
0
async def rclone_check_progress(job, proc):
    while True:
        read = (await proc.stdout.readline()).decode()
        job.logs_fd.write(read.encode("utf-8", "ignore"))
        if read == "":
            break
        reg = RE_TRANSF.search(read)
        if reg:
            transferred = reg.group(1).strip()
            if not transferred.isdigit():
                job.set_progress(None, transferred)
Exemplo n.º 23
0
    async def remove_storage_class_and_dataset(self, release_name, job=None):
        storage_class_name = get_storage_class_name(release_name)
        if await self.middleware.call(
                'k8s.storage_class.query',
            [['metadata.name', '=', storage_class_name]]):
            if job:
                job.set_progress(85,
                                 f'Removing {release_name!r} storage class')
            try:
                await self.middleware.call('k8s.storage_class.delete',
                                           storage_class_name)
            except Exception as e:
                self.logger.error('Failed to remove %r storage class: %s',
                                  storage_class_name, e)

        k8s_config = await self.middleware.call('kubernetes.config')
        release_ds = os.path.join(k8s_config['dataset'], 'releases',
                                  release_name)

        # If the chart release was consuming any PV's, they would have to be manually removed from k8s database
        # because of chart release reclaim policy being retain
        pvc_volume_ds = os.path.join(release_ds, 'volumes')
        for pv in await self.middleware.call('k8s.pv.query', [[
                'spec.csi.volume_attributes.openebs\\.io/poolname', '=',
                pvc_volume_ds
        ]]):
            await self.middleware.call('k8s.pv.delete', pv['metadata']['name'])

        failed_zfs_volumes = []
        # We would like to delete openebs zfs volumes ( not actual zfs volumes ) in openebs namespace
        for zfs_volume in await self.middleware.call(
                'k8s.zv.query', [['spec.poolName', '=', pvc_volume_ds]]):
            try:
                await self.middleware.call('k8s.zv.delete',
                                           zfs_volume['metadata']['name'])
            except Exception:
                # It's perfectly fine if this fails as functionality wise this change is just cosmetic
                # and is essentially cleaning up leftover zfs volume entries from k8s db
                failed_zfs_volumes.append(zfs_volume['metadata']['name'])

        if failed_zfs_volumes:
            self.logger.error(
                'Failed to delete %r zfs volumes when deleting %r chart release',
                ', '.join(failed_zfs_volumes), release_name)

        if await self.middleware.call('zfs.dataset.query',
                                      [['id', '=', release_ds]]):
            if job:
                job.set_progress(95, f'Removing {release_ds!r} dataset')
            await self.middleware.call('zfs.dataset.delete', release_ds, {
                'recursive': True,
                'force': True
            })
Exemplo n.º 24
0
    def install_manual_impl(self, job, path, dest_extracted):
        p = subprocess.run(['file', path], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, encoding='ascii',
                           errors='ignore')
        if 'Squashfs filesystem' in p.stdout:
            return self._install_scale(job, path)

        job.set_progress(30, 'Extracting file')
        Update.ExtractFrozenUpdate(path, dest_extracted, verbose=True)

        job.set_progress(50, 'Applying update')
        if self.install_impl(job, dest_extracted) is None:
            raise CallError('Uploaded file is not a manual update file')
Exemplo n.º 25
0
    def fetch(self, job, options):
        """Fetches a release or plugin."""
        fetch_output = {'error': False, 'install_notes': []}

        verrors = ValidationErrors()

        self.validate_ips(verrors, options)

        def progress_callback(content):
            level = content['level']
            msg = content['message'].strip('\n')

            if job.progress['percent'] == 90:
                for split_msg in msg.split('\n'):
                    fetch_output['install_notes'].append(split_msg)

            if level == 'EXCEPTION':
                fetch_output['error'] = True
                raise CallError(msg)

            job.set_progress(None, msg)

            if '  These pkgs will be installed:' in msg:
                job.set_progress(50, msg)
            elif 'Installing plugin packages:' in msg:
                job.set_progress(75, msg)
            elif 'Command output:' in msg:
                job.set_progress(90, msg)

        self.check_dataset_existence()  # Make sure our datasets exist.
        start_msg = None
        finaL_msg = None

        if options["name"] is not None:
            options["plugin_file"] = True
            start_msg = 'Starting plugin install'
            finaL_msg = f"Plugin: {options['name']} installed"

        options["accept"] = True

        iocage = ioc.IOCage(callback=progress_callback, silent=False)

        job.set_progress(0, start_msg)
        iocage.fetch(**options)

        if options['name'] is not None:
            # This is to get the admin URL and such
            fetch_output['install_notes'] += job.progress['description'].split(
                '\n')

        job.set_progress(100, finaL_msg)

        return fetch_output
Exemplo n.º 26
0
    async def sync_all(self, job):
        """
        Refresh all available catalogs from upstream.
        """
        catalogs = await self.middleware.call('catalog.query')
        catalog_len = len(catalogs)
        for index, catalog in enumerate(catalogs):
            job.set_progress((index / catalog_len) * 100, f'Syncing {catalog["id"]} catalog')
            sync_job = await self.middleware.call('catalog.sync', catalog['id'])
            await sync_job.wait()

        if await self.middleware.call('service.started', 'kubernetes'):
            asyncio.ensure_future(self.middleware.call('chart.release.chart_releases_update_checks_internal'))
Exemplo n.º 27
0
 async def sync_all(self, job):
     """
     Refresh all available catalogs from upstream.
     """
     catalogs = await self.middleware.call('catalog.query')
     catalog_len = len(catalogs)
     for index, catalog in enumerate(catalogs):
         job.set_progress(((index + 1) / catalog_len) * 100,
                          f'Syncing {catalog["id"]} catalog')
         try:
             await self.middleware.call('catalog.sync', catalog['id'])
         except Exception as e:
             self.logger.error('Failed to sync %r catalog: %s',
                               catalog['id'], e)
Exemplo n.º 28
0
    async def upgrade(self, job, release_name, options):
        """
        Upgrade `release_name` chart release.

        `upgrade_options.item_version` specifies to which item version chart release should be upgraded to.

        System will update container images being used by `release_name` chart release as a chart release
        upgrade is not considered complete until the images in use have also been updated to latest versions.

        During upgrade, `upgrade_options.values` can be specified to apply configuration changes for configuration
        changes for the chart release in question.

        When chart version is upgraded, system will automatically take a snapshot of `ix_volumes` in question
        which can be used to rollback later on.
        """
        await self.middleware.call('kubernetes.validate_k8s_setup')
        release = await self.middleware.call('chart.release.get_instance', release_name)
        if not release['update_available'] and not release['container_images_update_available']:
            raise CallError('No update is available for chart release')

        # We need to update container images before upgrading chart version as it's possible that the chart version
        # in question needs newer image hashes.
        job.set_progress(10, 'Updating container images')
        await (
            await self.middleware.call('chart.release.pull_container_images', release_name, {'redeploy': False})
        ).wait(raise_error=True)
        job.set_progress(30, 'Updated container images')

        await self.scale_down_workloads_before_snapshot(job, release)

        # If a snapshot of the volumes already exist with the same name in case of a failed upgrade, we will remove
        # it as we want the current point in time being reflected in the snapshot
        # TODO: Remove volumes/ix_volumes check in next release as we are going to do a recursive snapshot
        #  from parent volumes ds moving on
        for filesystem in ('volumes', 'volumes/ix_volumes'):
            volumes_ds = os.path.join(release['dataset'], filesystem)
            snap_name = f'{volumes_ds}@{release["version"]}'
            if await self.middleware.call('zfs.snapshot.query', [['id', '=', snap_name]]):
                await self.middleware.call('zfs.snapshot.delete', snap_name, {'recursive': True})

        await self.middleware.call(
            'zfs.snapshot.create', {
                'dataset': os.path.join(release['dataset'], 'volumes'), 'name': release['version'], 'recursive': True
            }
        )
        job.set_progress(50, 'Created snapshot for upgrade')

        if release['update_available']:
            await self.upgrade_chart_release(job, release, options)
        else:
            await (await self.middleware.call('chart.release.redeploy', release_name)).wait(raise_error=True)

        chart_release = await self.middleware.call('chart.release.get_instance', release_name)
        self.middleware.send_event('chart.release.query', 'CHANGED', id=release_name, fields=chart_release)

        await self.chart_releases_update_checks_internal([['id', '=', release_name]])

        job.set_progress(100, 'Upgrade complete for chart release')

        return chart_release
Exemplo n.º 29
0
    def setacl_posix1e(self, job, data):
        job.set_progress(0, 'Preparing to set acl.')
        if osc.IS_FREEBSD:
            raise CallError(
                "POSIX1e brand ACLs not supported on the FreeBSD-based TrueNAS platform",
                errno.EOPNOTSUPP)

        options = data['options']
        recursive = options.get('recursive')
        dacl = data.get('dacl', [])
        path = data['path']

        aclcheck = ACLType.POSIX1E.validate(data)

        if not aclcheck['is_valid']:
            raise CallError(
                f"POSIX1e ACL is invalid: {' '.join(aclcheck['errors'])}")

        stripacl = subprocess.run(
            ['setfacl', '-bR' if recursive else '-b', path],
            check=False,
            capture_output=True)
        if stripacl.returncode != 0:
            raise CallError(f"Failed to remove POSIX1e ACL from [{path}]: "
                            f"{stripacl.stderr.decode()}")

        if options['stripacl']:
            job.set_progress(100, "Finished removing POSIX1e ACL")
            return

        job.set_progress(50, 'Reticulating splines.')

        for idx, ace in enumerate(dacl):
            if idx == 0:
                aclstring = ""
            else:
                aclstring += ","

            if ace['id'] == -1:
                ace['id'] = ''

            ace['tag'] = ace['tag'].rstrip('_OBJ').lower()

            if ace['default']:
                aclstring += "default:"

            aclstring += f"{ace['tag']}:{ace['id']}:"
            aclstring += 'r' if ace['perms']['READ'] else '-'
            aclstring += 'w' if ace['perms']['WRITE'] else '-'
            aclstring += 'x' if ace['perms']['EXECUTE'] else '-'

        setacl = subprocess.run(
            ['setfacl', '-mR' if recursive else '-m', aclstring, path],
            check=False,
            capture_output=True)
        if setacl.returncode != 0:
            return CallError(f'Failed to set ACL on path [{path}]: ',
                             f'{setacl.stderr.decode()}')

        job.set_progress(100, 'Finished setting POSIX1e ACL.')
Exemplo n.º 30
0
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')
        if not os.path.exists(data['path']):
            raise CallError(f"Path {data['path']} not found.", errno.ENOENT)

        if not os.path.realpath(data['path']).startswith('/mnt/'):
            raise CallError(
                f"Changing ownership on path {data['path']} is not permitted.",
                errno.EPERM)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10,
                             f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')
Exemplo n.º 31
0
    def do_delete(self, job, id, force):
        """
        Delete certificate of `id`.

        If the certificate is an ACME based certificate, certificate service will try to
        revoke the certificate by updating it's status with the ACME server, if it fails an exception is raised
        and the certificate is not deleted from the system. However, if `force` is set to True, certificate is deleted
        from the system even if some error occurred while revoking the certificate with the ACME Server

        .. examples(websocket)::

          Delete certificate of `id`

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "certificate.delete",
                "params": [
                    1,
                    true
                ]
            }
        """
        check_dependencies(self.middleware, 'CERT', id)

        certificate = self.middleware.call_sync('certificate.get_instance', id)

        if certificate.get('acme'):
            client, key = self.middleware.call_sync(
                'acme.get_acme_client_and_key',
                certificate['acme']['directory'], True)

            try:
                client.revoke(
                    jose.ComparableX509(
                        crypto.load_certificate(crypto.FILETYPE_PEM,
                                                certificate['certificate'])),
                    0)
            except (errors.ClientError, messages.Error) as e:
                if not force:
                    raise CallError(f'Failed to revoke certificate: {e}')

        response = self.middleware.call_sync('datastore.delete',
                                             self._config.datastore, id)

        self.middleware.call_sync('service.start', 'ssl')

        job.set_progress(100)
        return response
Exemplo n.º 32
0
    def chown(self, job, data):
        """
        Change owner or group of file at `path`.

        `uid` and `gid` specify new owner of the file. If either
        key is absent or None, then existing value on the file is not
        changed.

        `recursive` performs action recursively, but does
        not traverse filesystem mount points.

        If `traverse` and `recursive` are specified, then the chown
        operation will traverse filesystem mount points.
        """
        job.set_progress(0, 'Preparing to change owner.')

        self._common_perm_path_validate(data['path'])

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']
        options = data['options']

        if not options['recursive']:
            job.set_progress(100, 'Finished changing owner.')
            os.chown(data['path'], uid, gid)
        else:
            job.set_progress(10, f'Recursively changing owner of {data["path"]}.')
            self._winacl(data['path'], 'chown', uid, gid, options)
            job.set_progress(100, 'Finished changing owner.')
Exemplo n.º 33
0
    async def create_imported_csr(self, job, data):

        # TODO: We should validate csr with private key ?

        data['type'] = CERT_TYPE_CSR

        job.set_progress(80)

        if 'passphrase' in data:
            data['privatekey'] = export_private_key(data['privatekey'],
                                                    data['passphrase'])

        job.set_progress(90, 'Finalizing changes')

        return data
Exemplo n.º 34
0
    async def sync_all(self, job):
        """
        Refresh all available catalogs from upstream.
        """
        catalogs = await self.middleware.call('catalog.query')
        catalog_len = len(catalogs)
        for index, catalog in enumerate(catalogs):
            job.set_progress(((index + 1) / catalog_len) * 100, f'Syncing {catalog["id"]} catalog')
            try:
                await self.middleware.call('catalog.sync', catalog['id'])
            except Exception as e:
                self.logger.error('Failed to sync %r catalog: %s', catalog['id'], e)

        if await self.middleware.call('service.started', 'kubernetes'):
            asyncio.ensure_future(self.middleware.call('chart.release.chart_releases_update_checks_internal'))
Exemplo n.º 35
0
    async def update(self, job, attrs=None):
        """
        Downloads (if not already in cache) and apply an update.
        """
        attrs = attrs or {}

        trains = await self.middleware.call('update.get_trains')
        train = attrs.get('train') or trains['selected']

        if attrs.get('train'):
            await self.middleware.run_in_thread(self.__set_train, attrs.get('train'), trains)

        location = await self.middleware.call('update.get_update_location')

        job.set_progress(0, 'Retrieving update manifest')

        handler = UpdateHandler(self, job)

        update = Update.DownloadUpdate(
            train,
            location,
            check_handler=handler.check_handler,
            get_handler=handler.get_handler,
        )
        if update is False:
            raise ValueError('No update available')

        new_manifest = Manifest.Manifest(require_signature=True)
        new_manifest.LoadPath('{}/MANIFEST'.format(location))

        Update.ApplyUpdate(
            location,
            install_handler=handler.install_handler,
        )
        await self.middleware.call('cache.put', 'update.applied', True)

        if (
            await self.middleware.call('system.is_freenas') or
            (
                await self.middleware.call('failover.licensed') and
                await self.middleware.call('failover.status') != 'BACKUP'
            )
        ):
            await self.middleware.call('update.take_systemdataset_samba4_snapshot')

        if attrs.get('reboot'):
            await self.middleware.call('system.reboot', {'delay': 10})
        return True
Exemplo n.º 36
0
 async def check_progress(job, proc):
     RE_TRANSF = re.compile(r'Transferred:\s*?(.+)$', re.S)
     read_buffer = ''
     while True:
         read = (await proc.stderr.readline()).decode()
         if read == '':
             break
         read_buffer += read
         if len(read_buffer) > 10240:
             read_buffer = read_buffer[-10240:]
         reg = RE_TRANSF.search(read)
         if reg:
             transferred = reg.group(1).strip()
             if not transferred.isdigit():
                 job.set_progress(None, transferred)
     return read_buffer
Exemplo n.º 37
0
        def watch():
            while True:
                scrub = pool.scrub
                if scrub.function != libzfs.ScanFunction.SCRUB:
                    break

                if scrub.state == libzfs.ScanState.FINISHED:
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub.state == libzfs.ScanState.CANCELED:
                    break

                if scrub.state == libzfs.ScanState.SCANNING:
                    job.set_progress(scrub.percentage, 'Scrubbing')
                time.sleep(1)
Exemplo n.º 38
0
        def watch():
            while True:
                with libzfs.ZFS() as zfs:
                    scrub = zfs.get(name).scrub.__getstate__()
                if scrub['function'] != 'SCRUB':
                    break

                if scrub['state'] == 'FINISHED':
                    job.set_progress(100, 'Scrub finished')
                    break

                if scrub['state'] == 'CANCELED':
                    break

                if scrub['state'] == 'SCANNING':
                    job.set_progress(scrub['percentage'], 'Scrubbing')
                time.sleep(1)
Exemplo n.º 39
0
    async def execute_init_tasks(self, job, when):

        tasks = await self.middleware.call(
            'initshutdownscript.query', [
                ['enabled', '=', True],
                ['when', '=', when]
            ])

        for i, task in enumerate(tasks):
            try:
                await asyncio.wait_for(self.execute_task(task), timeout=task['timeout'])
            except asyncio.TimeoutError:
                self.middleware.logger.debug(
                    f'{task["type"]} {task["command"] if task["type"] == "COMMAND" else task["script"]} timed out'
                )
            finally:
                job.set_progress((100 / len(tasks)) * (i + 1))

        job.set_progress(100, f'Completed tasks for {when}')
Exemplo n.º 40
0
    async def sync(self, job, id):
        """
        Run the cloud_sync job `id`, syncing the local data to remote.
        """

        cloud_sync = await self._get_instance(id)

        credentials = cloud_sync["credentials"]

        local_path = cloud_sync["path"]
        local_direction = FsLockDirection.READ if cloud_sync["direction"] == "PUSH" else FsLockDirection.WRITE

        remote_path = get_remote_path(REMOTES[credentials["provider"]], cloud_sync["attributes"])
        remote_direction = FsLockDirection.READ if cloud_sync["direction"] == "PULL" else FsLockDirection.WRITE

        directions = {
            FsLockDirection.READ: "reading",
            FsLockDirection.WRITE: "writing",
        }

        job.set_progress(0, f"Locking local path {local_path!r} for {directions[local_direction]}")
        async with self.local_fs_lock_manager.lock(local_path, local_direction):
            job.set_progress(0, f"Locking remote path {remote_path!r} for {directions[remote_direction]}")
            async with self.remote_fs_lock_manager.lock(f"{credentials['id']}/{remote_path}", remote_direction):
                job.set_progress(0, "Starting")
                try:
                    await rclone(self.middleware, job, cloud_sync)
                    await self.middleware.call("alert.oneshot_delete", "CloudSyncTaskFailed", cloud_sync["id"])
                except Exception:
                    await self.middleware.call("alert.oneshot_create", "CloudSyncTaskFailed", {
                        "id": cloud_sync["id"],
                        "name": cloud_sync["description"],
                    })
                    raise
Exemplo n.º 41
0
    def debug(self, job):
        """
        Generate system debug file.

        Result value will be the absolute path of the file.
        """
        system_dataset_path = self.middleware.call_sync('systemdataset.config')['path']
        if system_dataset_path is not None:
            direc = os.path.join(system_dataset_path, 'ixdiagnose')
        else:
            direc = '/var/tmp/ixdiagnose'
        dump = os.path.join(direc, 'ixdiagnose.tgz')

        # Be extra safe in case we have left over from previous run
        if os.path.exists(direc):
            shutil.rmtree(direc)

        cp = subprocess.Popen(
            ['ixdiagnose', '-d', direc, '-s', '-F', '-p'],
            text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
            bufsize=1
        )

        for line in iter(cp.stdout.readline, ''):
            line = line.rstrip()

            if line.startswith('**'):
                percent, help = line.split(':')
                job.set_progress(
                    int(percent.split()[-1].strip('%')),
                    help.lstrip()
                )
        cp.communicate()

        if cp.returncode != 0:
            raise CallError(f'Failed to generate debug file: {cp.stderr}')

        job.set_progress(100, 'Debug generation finished')

        return dump
Exemplo n.º 42
0
    async def file(self, job, options):
        """
        Updates the system using the uploaded .tar file.

        Use null `destination` to create a temporary location.
        """

        dest = options.get('destination')

        if not dest:
            try:
                await self.middleware.call('notifier.create_upload_location')
                dest = '/var/tmp/firmware'
            except Exception as e:
                raise CallError(str(e))
        elif not dest.startswith('/mnt/'):
            raise CallError('Destination must reside within a pool')

        if not os.path.isdir(dest):
            raise CallError('Destination is not a directory')

        destfile = os.path.join(dest, 'manualupdate.tar')
        dest_extracted = os.path.join(dest, '.update')

        try:
            job.set_progress(10, 'Writing uploaded file to disk')
            with open(destfile, 'wb') as f:
                await self.middleware.run_in_io_thread(
                    shutil.copyfileobj, job.pipes.input.r, f, 1048576,
                )

            def do_update():
                try:
                    job.set_progress(30, 'Extracting uploaded file')
                    ExtractFrozenUpdate(destfile, dest_extracted, verbose=True)
                    job.set_progress(50, 'Applying update')
                    ApplyUpdate(dest_extracted)
                except Exception as e:
                    raise CallError(str(e))

            await self.middleware.run_in_io_thread(do_update)

            job.set_progress(95, 'Cleaning up')

        finally:
            if os.path.exists(destfile):
                os.unlink(destfile)

            if os.path.exists(dest_extracted):
                shutil.rmtree(dest_extracted, ignore_errors=True)

        if dest == '/var/tmp/firmware':
            await self.middleware.call('notifier.destroy_upload_location')

        job.set_progress(100, 'Update completed')
Exemplo n.º 43
0
    def run(self, job, id):
        """
        Job to run cronjob task of `id`.
        """
        def __cron_log(line):
            job.logs_fd.write(line)
            syslog.syslog(syslog.LOG_INFO, line.decode())

        cron_task = self.middleware.call_sync('cronjob._get_instance', id)
        cron_cmd = ' '.join(
            self.middleware.call_sync(
                'cronjob.construct_cron_command', cron_task['schedule'], cron_task['user'],
                cron_task['command'], cron_task['stdout'], cron_task['stderr']
            )[7:]
        )

        job.set_progress(
            10,
            'Executing Cron Task'
        )

        syslog.openlog('cron', facility=syslog.LOG_CRON)

        syslog.syslog(syslog.LOG_INFO, f'({cron_task["user"]}) CMD ({cron_cmd})')

        cp = run_command_with_user_context(
            cron_cmd, cron_task['user'], __cron_log
        )

        syslog.closelog()

        job.set_progress(
            85,
            'Executed Cron Task'
        )

        if cp.stdout:
            email = (
                self.middleware.call_sync('user.query', [['username', '=', cron_task['user']]], {'get': True})
            )['email']
            stdout = cp.stdout.decode()
            if email:
                mail_job = self.middleware.call_sync(
                    'mail.send', {
                        'subject': 'CronTask Run',
                        'text': stdout,
                        'to': [email]
                    }
                )

                job.set_progress(
                    95,
                    'Sending mail for Cron Task output'
                )

                mail_job.wait_sync()
                if mail_job.error:
                    job.logs_fd.write(f'Failed to send email for CronTask run: {mail_job.error}'.encode())
            else:
                job.set_progress(
                    95,
                    'Email for root user not configured. Skipping sending mail.'
                )

            job.logs_fd.write(f'Executed CronTask - {cron_cmd}: {stdout}'.encode())

        job.set_progress(
            100,
            'Execution of Cron Task complete.'
        )
Exemplo n.º 44
0
    def copy(self, job, rcopy):
        """
        Starts an rsync copy task between current freenas machine
        and specified remote host (or local copy too). It reports
        the progress of the copy task.
        """

        # Assigning variables and such
        user = rcopy.get('user')
        path = rcopy.get('path')
        mode = rcopy.get('mode')
        remote_path = rcopy.get('remote_path')
        remote_host = rcopy.get('remote_host')
        remote_module = rcopy.get('remote_module')
        remote_user = rcopy.get('remote_user', rcopy.get('user'))
        remote_address = remote_host if '@' in remote_host else f'"{remote_user}"@{remote_host}'
        remote_password = rcopy.get('remote_password', None)
        password_file = None
        properties = rcopy.get('properties', defaultdict(bool))

        # Let's do a brief check of all the user provided parameters
        if not path:
            raise ValueError('The path is required')
        elif not os.path.exists(path):
            raise CallError(f'The specified path: {path} does not exist', errno.ENOENT)

        if not remote_host:
            raise ValueError('The remote host is required')

        if mode == 'SSH' and not remote_path:
            raise ValueError('The remote path is required')
        elif mode == 'MODULE' and not remote_module:
            raise ValueError('The remote module is required')

        try:
            pwd.getpwnam(user)
        except KeyError:
            raise CallError(f'User: {user} does not exist', errno.ENOENT)
        if (
            mode == 'SSH' and
            rcopy.get('remote_host') in ['127.0.0.1', 'localhost'] and
            not os.path.exists(remote_path)
        ):
            raise CallError(f'The specified path: {remote_path} does not exist', errno.ENOENT)

        # Phew! with that out of the let's begin the transfer

        line = f'{RSYNC_PATH} --info=progress2 -h'
        if properties:
            if properties.get('recursive'):
                line += ' -r'
            if properties.get('times'):
                line += ' -t'
            if properties.get('compress'):
                line += ' -z'
            if properties.get('archive'):
                line += ' -a'
            if properties.get('preserve_permissions'):
                line += ' -p'
            if properties.get('preserve_attributes'):
                line += ' -X'
            if properties.get('delete'):
                line += ' --delete-delay'
            if properties.get('delay_updates'):
                line += ' --delay-updates'

        if mode == 'MODULE':
            if rcopy.get('direction') == 'PUSH':
                line += f' "{path}" {remote_address}::"{remote_module}"'
            else:
                line += f' {remote_address}::"{remote_module}" "{path}"'
            if remote_password:
                password_file = tempfile.NamedTemporaryFile(mode='w')

                password_file.write(remote_password)
                password_file.flush()
                shutil.chown(password_file.name, user=user)
                os.chmod(password_file.name, 0o600)
                line += f' --password-file={password_file.name}'
        else:
            # there seems to be some code duplication here but hey its simple
            # if you find a way (THAT DOES NOT BREAK localhost based rsync copies)
            # then please go for it
            if rcopy.get('remote_host') in ['127.0.0.1', 'localhost']:
                if rcopy['direction'] == 'PUSH':
                    line += f' "{path}" "{remote_path}"'
                else:
                    line += f' "{remote_path}" "{path}"'
            else:
                line += ' -e "ssh -p {0} -o BatchMode=yes -o StrictHostKeyChecking=yes"'.format(
                    rcopy.get('remote_ssh_port', 22)
                )
                if rcopy['direction'] == 'PUSH':
                    line += f' "{path}" {remote_address}:\\""{remote_path}"\\"'
                else:
                    line += f' {remote_address}:\\""{remote_path}"\\" "{path}"'

        logger.debug(f'Executing rsync job id: {job.id} with the following command {line}')
        try:
            t = threading.Thread(target=self.__rsync_worker, args=(line, user, job), daemon=True)
            t.start()
            t.join()
        finally:
            if password_file:
                password_file.close()

        job.set_progress(100, 'Rsync copy job successfully completed')
Exemplo n.º 45
0
 def fetch_hookreport(self, blocknum, blocksize, totalsize, job, file_name):
     """Hook to report the download progress."""
     readchunk = blocknum * blocksize
     if totalsize > 0:
         percent = readchunk * 1e2 / totalsize
         job.set_progress(int(percent), 'Downloading', {'downloaded': readchunk, 'total': totalsize})
Exemplo n.º 46
0
    def debug_download(self, job):
        """
        Job to stream debug file.

        This method is meant to be used in conjuntion with `core.download` to get the debug
        downloaded via HTTP.
        """
        job.set_progress(0, 'Generating debug file')
        debug_job = self.middleware.call_sync('system.debug')

        standby_debug = None
        is_freenas = self.middleware.call_sync('system.is_freenas')
        if not is_freenas and self.middleware.call_sync('failover.licensed'):
            try:
                standby_debug = self.middleware.call_sync(
                    'failover.call_remote', 'system.debug', [], {'job': True}
                )
            except Exception:
                self.logger.warn('Failed to get debug from standby node', exc_info=True)
            else:
                remote_ip = self.middleware.call_sync('failover.remote_ip')
                url = self.middleware.call_sync(
                    'failover.call_remote', 'core.download', ['filesystem.get', [standby_debug], 'debug.txz'],
                )[1]

                url = f'http://{remote_ip}:6000{url}'
                standby_debug = io.BytesIO()
                with requests.get(url, stream=True) as r:
                    for i in r.iter_content(chunk_size=1048576):
                        if standby_debug.tell() > 20971520:
                            raise CallError(f'Standby debug file is bigger than 20MiB.')
                        standby_debug.write(i)

        debug_job.wait_sync()
        if debug_job.error:
            raise CallError(debug_job.error)

        job.set_progress(90, 'Preparing debug file for streaming')

        if standby_debug:
            # Debug file cannot be big on HA because we put both debugs in memory
            # so they can be downloaded at once.
            try:
                if os.stat(debug_job.result).st_size > 20971520:
                    raise CallError(f'Debug file is bigger than 20MiB.')
            except FileNotFoundError:
                raise CallError('Debug file was not found, try again.')

            network = self.middleware.call_sync('network.configuration.config')
            node = self.middleware.call_sync('failover.node')

            tario = io.BytesIO()
            with tarfile.open(fileobj=tario, mode='w') as tar:

                if node == 'A':
                    my_hostname = network['hostname']
                    remote_hostname = network['hostname_b']
                else:
                    my_hostname = network['hostname_b']
                    remote_hostname = network['hostname']

                tar.add(debug_job.result, f'{my_hostname}.txz')

                tarinfo = tarfile.TarInfo(f'{remote_hostname}.txz')
                tarinfo.size = standby_debug.tell()
                standby_debug.seek(0)
                tar.addfile(tarinfo, fileobj=standby_debug)

            tario.seek(0)
            shutil.copyfileobj(tario, job.pipes.output.w)
        else:
            with open(debug_job.result, 'rb') as f:
                shutil.copyfileobj(f, job.pipes.output.w)
        job.pipes.output.w.close()
Exemplo n.º 47
0
    async def new_ticket(self, job, data):
        """
        Creates a new ticket for support.
        This is done using the support proxy API.
        For FreeNAS it will be created on Redmine and for TrueNAS on SupportSuite.

        For FreeNAS `criticality`, `environment`, `phone`, `name` and `email` attributes are not required.
        For TrueNAS `username`, `password` and `type` attributes are not required.
        """

        job.set_progress(1, 'Gathering data')

        sw_name = 'freenas' if await self.middleware.call('system.is_freenas') else 'truenas'

        if sw_name == 'freenas':
            required_attrs = ('type', 'username', 'password')
        else:
            required_attrs = ('phone', 'name', 'email', 'criticality', 'environment')
            data['serial'] = (await (await Popen(['/usr/local/sbin/dmidecode', '-s', 'system-serial-number'], stdout=subprocess.PIPE)).communicate())[0].decode().split('\n')[0].upper()
            data['company'] = 'Unknown'

        for i in required_attrs:
            if i not in data:
                raise CallError(f'{i} is required', errno.EINVAL)

        data['version'] = (await self.middleware.call('system.version')).split('-', 1)[-1]
        data['user'] = data.pop('username')
        debug = data.pop('attach_debug')

        type_ = data.get('type')
        if type_:
            data['type'] = type_.lower()

        job.set_progress(20, 'Submitting ticket')

        try:
            r = await self.middleware.threaded(lambda: requests.post(
                f'https://{ADDRESS}/{sw_name}/api/v1.0/ticket',
                data=json.dumps(data),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            ))
            result = r.json()
        except simplejson.JSONDecodeError:
            self.logger.debug(f'Failed to decode ticket attachment response: {r.text}')
            raise CallError('Invalid proxy server response', errno.EBADMSG)
        except requests.ConnectionError as e:
            raise CallError(f'Connection error {e}', errno.EBADF)
        except requests.Timeout:
            raise CallError('Connection time out', errno.ETIMEDOUT)

        if r.status_code != 200:
            self.logger.debug(f'Support Ticket failed ({r.status_code}): {r.text}', r.status_code, r.text)
            raise CallError('Ticket creation failed, try again later.', errno.EINVAL)

        if result['error']:
            raise CallError(result['message'], errno.EINVAL)

        ticket = result.get('ticketnum')
        url = result.get('message')
        if not ticket:
            raise CallError('New ticket number was not informed', errno.EINVAL)
        job.set_progress(50, f'Ticket created: {ticket}', extra={'ticket': ticket})

        if debug:
            # FIXME: generate debug from middleware
            mntpt, direc, dump = await self.middleware.threaded(debug_get_settings)

            job.set_progress(60, 'Generating debug file')
            await self.middleware.threaded(debug_generate)

            not_freenas = not (await self.middleware.call('system.is_freenas'))
            if not_freenas:
                not_freenas &= await self.middleware.call('notifier.failover_licensed')
            if not_freenas:
                debug_file = f'{direc}/debug.tar'
                debug_name = 'debug-{}.tar'.format(time.strftime('%Y%m%d%H%M%S'))
            else:
                debug_file = dump
                debug_name = 'debug-{}-{}.txz'.format(
                    socket.gethostname().split('.')[0],
                    time.strftime('%Y%m%d%H%M%S'),
                )

            job.set_progress(80, 'Attaching debug file')

            tjob = await self.middleware.call('support.attach_ticket', {
                'ticket': ticket,
                'filename': debug_name,
                'username': data.get('user'),
                'password': data.get('password'),
            })

            def writer():
                with open(debug_file, 'rb') as f:
                    while True:
                        read = f.read(10240)
                        if read == b'':
                            break
                        os.write(tjob.write_fd, read)
                    os.close(tjob.write_fd)
            await self.middleware.threaded(writer)
            await tjob.wait()
        else:
            job.set_progress(100)

        return {
            'ticket': ticket,
            'url': url,
        }
Exemplo n.º 48
0
    async def import_disk(self, job, volume, fs_type, fs_options, dst_path):
        job.set_progress(None, description="Mounting")

        src = os.path.join('/var/run/importcopy/tmpdir', os.path.relpath(volume, '/'))

        if os.path.exists(src):
            os.rmdir(src)

        try:
            os.makedirs(src)

            async with KernelModuleContextManager({"msdosfs": "msdosfs_iconv",
                                                   "ntfs": "fuse"}.get(fs_type)):
                async with MountFsContextManager(self.middleware, volume, src, fs_type, fs_options, ["ro"]):
                    job.set_progress(None, description="Importing")

                    line = [
                        '/usr/local/bin/rsync',
                        '--info=progress2',
                        '--modify-window=1',
                        '-rltvh',
                        '--no-perms',
                        src + '/',
                        dst_path
                    ]
                    rsync_proc = await Popen(
                        line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=0, preexec_fn=os.setsid,
                    )
                    try:
                        progress_buffer = JobProgressBuffer(job)
                        while True:
                            line = await rsync_proc.stdout.readline()
                            job.logs_fd.write(line)
                            if line:
                                try:
                                    line = line.decode("utf-8", "ignore").strip()
                                    bits = re.split("\s+", line)
                                    if len(bits) == 6 and bits[1].endswith("%") and bits[1][:-1].isdigit():
                                        progress_buffer.set_progress(int(bits[1][:-1]))
                                    elif not line.endswith('/'):
                                        if (
                                            line not in ['sending incremental file list'] and
                                            'xfr#' not in line
                                        ):
                                            progress_buffer.set_progress(None, extra=line)
                                except Exception:
                                    logger.warning('Parsing error in rsync task', exc_info=True)
                            else:
                                break

                        progress_buffer.flush()
                        await rsync_proc.wait()
                        if rsync_proc.returncode != 0:
                            raise Exception("rsync failed with exit code %r" % rsync_proc.returncode)
                    except asyncio.CancelledError:
                        rsync_proc.kill()
                        raise

                    job.set_progress(100, description="Done", extra="")
        finally:
            os.rmdir(src)
Exemplo n.º 49
0
    async def wipe(self, job, dev, mode):
        """
        Performs a wipe of a disk `dev`.
        It can be of the following modes:
          - QUICK: clean the first few and last megabytes of every partition and disk
          - FULL: write whole disk with zero's
          - FULL_RANDOM: write whole disk with random bytes
        """
        await self.swaps_remove_disks([dev])

        # First do a quick wipe of every partition to clean things like zfs labels
        if mode == 'QUICK':
            await self.middleware.threaded(geom.scan)
            klass = geom.class_by_name('PART')
            for g in klass.xml.findall(f'./geom[name=\'{dev}\']'):
                for p in g.findall('./provider'):
                    size = p.find('./mediasize')
                    if size is not None:
                        try:
                            size = int(size.text)
                        except ValueError:
                            size = None
                    name = p.find('./name')
                    await self.wipe_quick(name.text, size=size)

        await run('gpart', 'destroy', '-F', f'/dev/{dev}', check=False)

        # Wipe out the partition table by doing an additional iterate of create/destroy
        await run('gpart', 'create', '-s', 'gpt', f'/dev/{dev}')
        await run('gpart', 'destroy', '-F', f'/dev/{dev}')

        if mode == 'QUICK':
            await self.wipe_quick(dev)
        else:
            cp = await run('diskinfo', dev)
            size = int(re.sub(r'\s+', ' ', cp.stdout.decode()).split()[2])

            proc = await Popen([
                'dd',
                'if=/dev/{}'.format('zero' if mode == 'FULL' else 'random'),
                f'of=/dev/{dev}',
                'bs=1m',
            ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)

            async def dd_wait():
                while True:
                    if proc.returncode is not None:
                        break
                    os.kill(proc.pid, signal.SIGINFO)
                    await asyncio.sleep(1)

            asyncio.ensure_future(dd_wait())

            while True:
                line = await proc.stderr.readline()
                if line == b'':
                    break
                line = line.decode()
                reg = RE_DD.search(line)
                if reg:
                    job.set_progress(int(reg.group(1)) / size, extra={'speed': int(reg.group(2))})

        await self.sync(dev)