示例#1
0
 def common_load_dataset_checks(self, ds):
     self.common_encryption_checks(ds)
     if ds.key_loaded:
         raise CallError(f'{id} key is already loaded')
    def _install(self, path, progress_callback):
        with open(SCALE_MANIFEST_FILE) as f:
            old_manifest = json.load(f)

        progress_callback(0, "Reading update file")
        with mount_update(path) as mounted:
            with open(os.path.join(mounted, "manifest.json")) as f:
                manifest = json.load(f)

            old_version = old_manifest["version"]
            new_version = manifest["version"]
            if not can_update(old_version, new_version):
                raise CallError(
                    f'Unable to downgrade from {old_version} to {new_version}')

            for file, checksum in manifest["checksums"].items():
                progress_callback(0, f"Verifying {file}")
                our_checksum = subprocess.run(
                    ["sha1sum", os.path.join(mounted, file)],
                    **run_kw).stdout.split()[0]
                if our_checksum != checksum:
                    raise CallError(
                        f"Checksum mismatch for {file!r}: {our_checksum} != {checksum}"
                    )

            command = {
                "disks": self.middleware.call_sync("boot.get_disks"),
                "json": True,
                "old_root": "/",
                "pool_name": self.middleware.call_sync("boot.pool_name"),
                "src": mounted,
            }

            p = subprocess.Popen(
                ["python3", "-m", "truenas_install"],
                cwd=mounted,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.STDOUT,
                encoding="utf-8",
                errors="ignore",
            )
            p.stdin.write(json.dumps(command))
            p.stdin.close()
            stderr = ""
            error = None
            for line in iter(p.stdout.readline, ""):
                try:
                    data = json.loads(line)
                except ValueError:
                    stderr += line
                else:
                    if "progress" in data and "message" in data:
                        progress_callback(data["progress"], data["message"])
                    elif "error" in data:
                        error = data["error"]
                    else:
                        raise ValueError(
                            f"Invalid truenas_install JSON: {data!r}")
            p.wait()
            if p.returncode != 0:
                if error is not None:
                    raise CallError(error)
                else:
                    raise CallError(stderr)
示例#3
0
    def get_datastores(self, data):
        """
        Get datastores from VMWare.
        """
        try:
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
            ssl_context.verify_mode = ssl.CERT_NONE
            server_instance = connect.SmartConnect(
                host=data['hostname'],
                user=data['username'],
                pwd=data['password'],
                sslContext=ssl_context,
            )
        except (vim.fault.InvalidLogin, vim.fault.NoPermission,
                vim.fault.RestrictedVersion) as e:
            raise CallError(e.msg, errno.EPERM)
        except vmodl.RuntimeFault as e:
            raise CallError(e.msg)
        except (socket.gaierror, socket.error, OSError) as e:
            raise CallError(str(e), e.errno)

        content = server_instance.RetrieveContent()
        objview = content.viewManager.CreateContainerView(
            content.rootFolder, [vim.HostSystem], True)

        esxi_hosts = objview.view
        objview.Destroy()

        datastores = {}
        for esxi_host in esxi_hosts:
            storage_system = esxi_host.configManager.storageSystem
            datastores_host = {}

            if storage_system.fileSystemVolumeInfo is None:
                continue

            for host_mount_info in storage_system.fileSystemVolumeInfo.mountInfo:
                if host_mount_info.volume.type == 'VMFS':
                    datastores_host[host_mount_info.volume.name] = {
                        'type': host_mount_info.volume.type,
                        'uuid': host_mount_info.volume.uuid,
                        'capacity': host_mount_info.volume.capacity,
                        'vmfs_version': host_mount_info.volume.version,
                        'local': host_mount_info.volume.local,
                        'ssd': host_mount_info.volume.ssd
                    }
                elif host_mount_info.volume.type == 'NFS':
                    datastores_host[host_mount_info.volume.name] = {
                        'type': host_mount_info.volume.type,
                        'capacity': host_mount_info.volume.capacity,
                        'remote_host': host_mount_info.volume.remoteHost,
                        'remote_path': host_mount_info.volume.remotePath,
                        'remote_hostnames':
                        host_mount_info.volume.remoteHostNames,
                        'username': host_mount_info.volume.userName,
                    }
                elif host_mount_info.volume.type in ('OTHER', 'VFFS'):
                    # Ignore VFFS type, it does not store VM's
                    # Ignore OTHER type, it does not seem to be meaningful
                    pass
                else:
                    self.logger.debug(
                        f'Unknown volume type "{host_mount_info.volume.type}": {host_mount_info.volume}'
                    )
                    continue
            datastores[esxi_host.name] = datastores_host

        connect.Disconnect(server_instance)
        return datastores
示例#4
0
 def do_delete(self, name, options):
     try:
         with libzfs.ZFS() as zfs:
             zfs.destroy(name, force=options['force'])
     except libzfs.ZFSException as e:
         raise CallError(str(e))
示例#5
0
 def get_devices(self, name):
     try:
         with libzfs.ZFS() as zfs:
             return [i.replace('/dev/', '') for i in zfs.get(name).disks]
     except libzfs.ZFSException as e:
         raise CallError(str(e), errno.ENOENT)
示例#6
0
    def setacl_nfs4(self, job, data):
        job.set_progress(0, 'Preparing to set acl.')
        options = data['options']
        dacl = data.get('dacl', [])

        if osc.IS_LINUX or not os.pathconf(data['path'], 64):
            raise CallError(
                f"NFSv4 ACLS are not supported on path {data['path']}",
                errno.EOPNOTSUPP)

        self._common_perm_path_validate(data['path'])

        if dacl and options['stripacl']:
            raise CallError(
                'Setting ACL and stripping ACL are not permitted simultaneously.',
                errno.EINVAL)

        uid = -1 if data.get('uid', None) is None else data['uid']
        gid = -1 if data.get('gid', None) is None else data['gid']
        if options['stripacl']:
            a = acl.ACL(file=data['path'])
            a.strip()
            a.apply(data['path'])
        else:
            inheritable_is_present = False
            cleaned_acl = []
            lockace_is_present = False
            for entry in dacl:
                ace = {
                    'tag': (acl.ACLWho(entry['tag'])).name,
                    'id':
                    entry['id'],
                    'type':
                    entry['type'],
                    'perms':
                    self.__convert_to_adv_permset(entry['perms']['BASIC'])
                    if 'BASIC' in entry['perms'] else entry['perms'],
                    'flags':
                    self.__convert_to_adv_flagset(entry['flags']['BASIC'])
                    if 'BASIC' in entry['flags'] else entry['flags'],
                }
                if ace['flags'].get('INHERIT_ONLY') and not ace['flags'].get(
                        'DIRECTORY_INHERIT', False) and not ace['flags'].get(
                            'FILE_INHERIT', False):
                    raise CallError(
                        'Invalid flag combination. DIRECTORY_INHERIT or FILE_INHERIT must be set if INHERIT_ONLY is set.',
                        errno.EINVAL)
                if ace['tag'] == 'EVERYONE' and self.__convert_to_basic_permset(
                        ace['perms']) == 'NOPERMS':
                    lockace_is_present = True
                elif ace['flags'].get('DIRECTORY_INHERIT') or ace['flags'].get(
                        'FILE_INHERIT'):
                    inheritable_is_present = True

                cleaned_acl.append(ace)

            if not inheritable_is_present:
                raise CallError(
                    'At least one inheritable ACL entry is required',
                    errno.EINVAL)

            if options['canonicalize']:
                cleaned_acl = self.canonicalize_acl_order(cleaned_acl)

            if not lockace_is_present:
                locking_ace = {
                    'tag': 'EVERYONE',
                    'id': None,
                    'type': 'ALLOW',
                    'perms': self.__convert_to_adv_permset('NOPERMS'),
                    'flags': self.__convert_to_adv_flagset('INHERIT')
                }
                cleaned_acl.append(locking_ace)

            a = acl.ACL()
            a.__setstate__(cleaned_acl)
            a.apply(data['path'])

        if not options['recursive']:
            os.chown(data['path'], uid, gid)
            job.set_progress(100, 'Finished setting NFS4 ACL.')
            return

        job.set_progress(10, f'Recursively setting ACL on {data["path"]}.')
        self._winacl(data['path'], 'clone', uid, gid, options)
        job.set_progress(100, 'Finished setting NFS4 ACL.')
示例#7
0
    def setperm(self, job, data):
        """
        Remove extended ACL from specified path.

        If `mode` is specified then the mode will be applied to the
        path and files and subdirectories depending on which `options` are
        selected. Mode should be formatted as string representation of octal
        permissions bits.

        `uid` the desired UID of the file user. If set to None (the default), then user is not changed.

        `gid` the desired GID of the file group. If set to None (the default), then group is not changed.

        `stripacl` setperm will fail if an extended ACL is present on `path`,
        unless `stripacl` is set to True.

        `recursive` remove ACLs recursively, but do not traverse dataset
        boundaries.

        `traverse` remove ACLs from child datasets.

        If no `mode` is set, and `stripacl` is True, then non-trivial ACLs
        will be converted to trivial ACLs. An ACL is trivial if it can be
        expressed as a file mode without losing any access rules.

        """
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial',
                                                   data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL)

        if mode is not None:
            mode = int(mode, 8)

        a = acl.ACL(file=data['path'])
        a.strip()
        a.apply(data['path'])

        if mode:
            os.chmod(data['path'], mode)

        os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(
            10, f'Recursively setting permissions on {data["path"]}.')
        self._winacl(data['path'], action, uid, gid, options)
        job.set_progress(100, 'Finished setting permissions.')
示例#8
0
 def setacl_nfs4(self, job, data):
     raise CallError('NFSv4 ACLs are not yet implemented.', errno.ENOTSUP)
示例#9
0
    def setperm(self, job, data):
        job.set_progress(0, 'Preparing to set permissions.')
        options = data['options']
        mode = data.get('mode', None)

        uid = -1 if data['uid'] is None else data['uid']
        gid = -1 if data['gid'] is None else data['gid']

        self._common_perm_path_validate(data['path'])

        acl_is_trivial = self.middleware.call_sync('filesystem.acl_is_trivial', data['path'])
        if not acl_is_trivial and not options['stripacl']:
            raise CallError(
                f'Non-trivial ACL present on [{data["path"]}]. Option "stripacl" required to change permission.',
                errno.EINVAL
            )

        if mode is not None:
            mode = int(mode, 8)

        stripacl = subprocess.run(['setfacl', '-b', data['path']],
                                  check=False, capture_output=True)
        if stripacl.returncode != 0:
            raise CallError(f"Failed to remove POSIX1e ACL from [{data['path']}]: "
                            f"{stripacl.stderr.decode()}")

        if mode:
            os.chmod(data['path'], mode)

        os.chown(data['path'], uid, gid)

        if not options['recursive']:
            job.set_progress(100, 'Finished setting permissions.')
            return

        action = 'clone' if mode else 'strip'
        job.set_progress(10, f'Recursively setting permissions on {data["path"]}.')
        if action == 'strip':
            stripacl = subprocess.run(['setfacl', '-bR', data['path']],
                                      check=False, capture_output=True)
            if stripacl.returncode != 0:
                raise CallError(f"Failed to remove POSIX1e ACL from [{data['path']}]: "
                                f"{stripacl.stderr.decode()}")

        if uid != -1 or gid != -1:
            if gid == -1:
                chown = subprocess.run(['chown', '-R', str(uid), data['path']],
                                       check=False, capture_output=True)
            elif uid == -1:
                chown = subprocess.run(['chgrp', '-R', str(gid), data['path']],
                                       check=False, capture_output=True)
            else:
                chown = subprocess.run(['chown', '-R', f'{uid}:{gid}', data['path']],
                                       check=False, capture_output=True)

            if chown.returncode != 0:
                raise CallError(f"Failed to chown [{data['path']}]: "
                                f"{chown.stderr.decode()}")

        chmod = subprocess.run(['chmod', '-R', str(data.get('mode')), data['path']],
                               check=False, capture_output=True)
        if chmod.returncode != 0:
            raise CallError(f"Failed to chmod [{data['path']}]: "
                            f"{chmod.stderr.decode()}")

        job.set_progress(100, 'Finished setting permissions.')
示例#10
0
    def restore_backup(self, job, backup_name, options):
        """
        Restore `backup_name` chart releases backup.

        It should be noted that a rollback will be initiated which will destroy any newer snapshots/clones
        of `ix-applications` dataset then the snapshot in question of `backup_name`.
        """
        self.middleware.call_sync('kubernetes.validate_k8s_setup')
        backup = self.middleware.call_sync('kubernetes.list_backups').get(
            backup_name)
        if not backup:
            raise CallError(f'Backup {backup_name!r} does not exist',
                            errno=errno.ENOENT)

        job.set_progress(5, 'Basic validation complete')

        # Add taint to force stop pods
        self.middleware.call_sync('k8s.node.add_taints', [{
            'key': 'ix-stop-cluster',
            'effect': 'NoExecute'
        }])

        job.set_progress(10, 'Removing old containers')
        for container in self.middleware.call_sync('docker.container.query'):
            try:
                self.middleware.call_sync('docker.container.delete',
                                          container['id'])
            except CallError:
                # This is okay - we just want to make sure there are no leftover datasets and it's possible that
                # because of the taint, we have containers being removed
                pass

        self.middleware.call_sync('service.stop', 'kubernetes')
        job.set_progress(15, 'Stopped kubernetes')
        shutil.rmtree('/etc/rancher', True)
        db_config = self.middleware.call_sync('datastore.config',
                                              'services.kubernetes')
        self.middleware.call_sync('datastore.update', 'services.kubernetes',
                                  db_config['id'], {'cni_config': {}})

        k8s_config = self.middleware.call_sync('kubernetes.config')
        job.set_progress(20, f'Rolling back {backup["snapshot_name"]}')
        self.middleware.call_sync(
            'zfs.snapshot.rollback', backup['snapshot_name'], {
                'force': True,
                'recursive': True,
                'recursive_clones': True,
                'recursive_rollback': True,
            })

        # FIXME: Remove this sleep, sometimes the k3s dataset fails to umount
        #  After discussion with mav, it sounds like a bug to him in zfs, so until that is fixed, we have this sleep
        time.sleep(20)

        k3s_ds = os.path.join(k8s_config['dataset'], 'k3s')
        self.middleware.call_sync('zfs.dataset.delete', k3s_ds, {
            'force': True,
            'recursive': True
        })
        self.middleware.call_sync('zfs.dataset.create', {
            'name': k3s_ds,
            'type': 'FILESYSTEM'
        })
        self.middleware.call_sync('zfs.dataset.mount', k3s_ds)

        job.set_progress(25, 'Initializing new kubernetes cluster')
        self.middleware.call_sync('service.start', 'kubernetes')

        while True:
            config = self.middleware.call_sync('k8s.node.config')
            if (config['node_configured'] and not config['spec']['taints'] and
                (not options['wait_for_csi']
                 or self.middleware.call_sync('k8s.csi.config')['csi_ready'])):
                break

            time.sleep(5)

        job.set_progress(30, 'Kubernetes cluster re-initialized')

        backup_dir = backup['backup_path']
        releases_datasets = set(
            ds['id'].split('/', 3)[-1].split('/', 1)[0]
            for ds in self.middleware.call_sync(
                'zfs.dataset.get_instance',
                f'{k8s_config["dataset"]}/releases')['children'])

        releases = os.listdir(backup_dir)
        len_releases = len(releases)
        restored_chart_releases = collections.defaultdict(
            lambda: {'pv_info': {}})

        for index, release_name in enumerate(releases):
            job.set_progress(
                30 + ((index + 1) / len_releases) * 60,
                f'Restoring helm configuration for {release_name!r} chart release'
            )

            if release_name not in releases_datasets:
                self.logger.error(
                    'Skipping backup of %r chart release due to missing chart release dataset',
                    release_name)
                continue

            r_backup_dir = os.path.join(backup_dir, release_name)
            if any(not os.path.exists(os.path.join(r_backup_dir, f))
                   for f in ('namespace.yaml', 'secrets')) or not os.listdir(
                       os.path.join(r_backup_dir, 'secrets')):
                self.logger.error(
                    'Skipping backup of %r chart release due to missing configuration files',
                    release_name)
                continue

            # First we will restore namespace and then the secrets
            with open(os.path.join(r_backup_dir, 'namespace.yaml'), 'r') as f:
                namespace_body = yaml.load(f.read(), Loader=yaml.FullLoader)
                self.middleware.call_sync('k8s.namespace.create',
                                          {'body': namespace_body})

            secrets_dir = os.path.join(r_backup_dir, 'secrets')
            for secret in sorted(os.listdir(secrets_dir)):
                with open(os.path.join(secrets_dir, secret)) as f:
                    self.middleware.call_sync(
                        'k8s.secret.create', {
                            'namespace': namespace_body['metadata']['name'],
                            'body': yaml.load(f.read(),
                                              Loader=yaml.FullLoader),
                        })

            with open(
                    os.path.join(r_backup_dir,
                                 'workloads_replica_counts.json'), 'r') as f:
                restored_chart_releases[release_name][
                    'replica_counts'] = json.loads(f.read())

            pv_info_path = os.path.join(r_backup_dir, 'pv_info.json')
            if os.path.exists(pv_info_path):
                with open(pv_info_path, 'r') as f:
                    restored_chart_releases[release_name][
                        'pv_info'] = json.loads(f.read())

        # Now helm will recognise the releases as valid, however we don't have any actual k8s deployed resource
        # That will be adjusted with updating chart releases with their existing values and helm will see that
        # k8s resources don't exist and will create them for us
        job.set_progress(92, 'Creating kubernetes resources')
        update_jobs = []
        datasets = set(
            d['id'] for d in self.middleware.call_sync('zfs.dataset.query', [[
                'id', '^',
                f'{os.path.join(k8s_config["dataset"], "releases")}/'
            ]], {'extra': {
                'retrieve_properties': False
            }}))
        for chart_release in restored_chart_releases:
            # Before we have resources created for the chart releases, we will restore PVs if possible and then
            # restore the chart release, so if there is any PVC expecting a PV, it will be able to claim it as soon
            # as it is created. If this is not done in this order, PVC will request a new dataset and we will lose
            # the mapping with the old dataset.
            self.middleware.call_sync(
                'chart.release.create_update_storage_class_for_chart_release',
                chart_release,
                os.path.join(k8s_config['dataset'], 'releases', chart_release,
                             'volumes'))
            failed_pv_restores = []
            for pvc, pv in restored_chart_releases[chart_release][
                    'pv_info'].items():
                if pv['dataset'] not in datasets:
                    failed_pv_restores.append(
                        f'Unable to locate PV dataset {pv["dataset"]!r} for {pvc!r} PVC.'
                    )
                    continue

                zv_details = pv['zv_details']
                try:
                    self.middleware.call_sync(
                        'k8s.zv.create', {
                            'metadata': {
                                'name': zv_details['metadata']['name'],
                            },
                            'spec': {
                                'capacity': zv_details['spec']['capacity'],
                                'poolName': zv_details['spec']['poolName'],
                            },
                        })
                except Exception as e:
                    failed_pv_restores.append(
                        f'Unable to create ZFS Volume for {pvc!r} PVC: {e}')
                    continue

                pv_spec = pv['pv_details']['spec']
                try:
                    self.middleware.call_sync(
                        'k8s.pv.create', {
                            'metadata': {
                                'name': pv['name'],
                            },
                            'spec': {
                                'capacity': {
                                    'storage': pv_spec['capacity']['storage'],
                                },
                                'claimRef': {
                                    'name': pv_spec['claim_ref']['name'],
                                    'namespace':
                                    pv_spec['claim_ref']['namespace'],
                                },
                                'csi': {
                                    'volumeAttributes': {
                                        'openebs.io/poolname':
                                        pv_spec['csi']['volume_attributes']
                                        ['openebs.io/poolname']
                                    },
                                    'volumeHandle':
                                    pv_spec['csi']['volume_handle'],
                                },
                                'storageClassName':
                                pv_spec['storage_class_name'],
                            },
                        })
                except Exception as e:
                    failed_pv_restores.append(
                        f'Unable to create PV for {pvc!r} PVC: {e}')

            if failed_pv_restores:
                self.logger.error(
                    'Failed to restore PVC(s) for %r chart release:\n%s',
                    chart_release, '\n'.join(failed_pv_restores))

            update_jobs.append(
                self.middleware.call_sync('chart.release.update',
                                          chart_release, {'values': {}}))

        for update_job in update_jobs:
            update_job.wait_sync()

        # We should have k8s resources created now. Now a new PVC will be created as k8s won't retain the original
        # information which was in it's state at backup time. We will get current dataset mapping and then
        # rename old ones which were mapped to the same PVC to have the new name
        chart_releases = {
            c['name']: c
            for c in
            self.middleware.call_sync('chart.release.query', [],
                                      {'extra': {
                                          'retrieve_resources': True
                                      }})
        }

        for release_name in list(restored_chart_releases):
            if release_name not in chart_releases:
                restored_chart_releases.pop(release_name)
            else:
                restored_chart_releases[release_name][
                    'resources'] = chart_releases[release_name]['resources']

        job.set_progress(97, 'Scaling scalable workloads')

        for chart_release in restored_chart_releases.values():
            self.middleware.call_sync(
                'chart.release.scale_release_internal',
                chart_release['resources'],
                None,
                chart_release['replica_counts'],
                True,
            )

        job.set_progress(100, f'Restore of {backup_name!r} backup complete')
示例#11
0
    def getacl(self, path, simplified=True):
        if not os.path.exists(path):
            raise CallError('Path not found.', errno.ENOENT)

        return self.getacl_posix1e(path, simplified)
示例#12
0
    def check_available(self, attrs=None):
        """
        Checks if there is an update available from update server.

        status:
          - REBOOT_REQUIRED: an update has already been applied
          - AVAILABLE: an update is available
          - UNAVAILABLE: no update available

        .. examples(websocket)::

          Check available update using default train:

            :::javascript
            {
                "id": "6841f242-840a-11e6-a437-00e04d680384",
                "msg": "method",
                "method": "update.check_available"
            }
        """

        try:
            applied = self.middleware.call_sync('cache.get', 'update.applied')
        except Exception:
            applied = False
        if applied is True:
            return {'status': 'REBOOT_REQUIRED'}

        if (not self.middleware.call_sync('system.is_freenas')
                and self.middleware.call_sync('failover.licensed')):
            # If its HA and standby is running old version we assume
            # legacy upgrade and check update on standby.
            try:
                self.middleware.call_sync(
                    'failover.call_remote',
                    'failover.upgrade_version',
                )
            except CallError as e:
                if e.errno != CallError.ENOMETHOD:
                    raise
                return self.middleware.call_sync(
                    'failover.call_remote',
                    'update.check_available',
                    [attrs],
                )

        trains = self.middleware.call_sync('update.get_trains')
        train = (attrs or {}).get('train')
        if not train:
            train = trains['selected']
        elif train not in trains['trains']:
            raise CallError('Invalid train name.', errno.ENOENT)

        handler = CheckUpdateHandler()
        manifest = CheckForUpdates(
            diff_handler=handler.diff_call,
            handler=handler.call,
            train=train,
        )

        if not manifest:
            return {'status': 'UNAVAILABLE'}

        data = {
            'status': 'AVAILABLE',
            'changes': handler.changes,
            'notice': manifest.Notice(),
            'notes': manifest.Notes(),
        }

        conf = Configuration.Configuration()
        sys_mani = conf.SystemManifest()
        if sys_mani:
            sequence = sys_mani.Sequence()
        else:
            sequence = ''
        data['changelog'] = get_changelog(train,
                                          start=sequence,
                                          end=manifest.Sequence())

        data['version'] = manifest.Version()
        return data
示例#13
0
    def get_quota(self, ds, quota_type):
        if quota_type == 'dataset':
            dataset = self.query([('id', '=', ds)], {'get': True})
            return [{
                'quota_type': 'DATASET',
                'id': ds,
                'name': ds,
                'quota': int(dataset['properties']['quota']['rawvalue']),
                'refquota': int(dataset['properties']['refquota']['rawvalue']),
                'used_bytes': int(dataset['properties']['used']['rawvalue']),
            }]

        quota_list = []
        quota_get = subprocess.run(
            ['zfs', f'{quota_type}space', '-H', '-n', '-p', '-o', 'name,used,quota,objquota,objused', ds],
            capture_output=True,
            check=False,
        )
        if quota_get.returncode != 0:
            raise CallError(
                f'Failed to get {quota_type} quota for {ds}: [{quota_get.stderr.decode()}]'
            )

        for quota in quota_get.stdout.decode().splitlines():
            m = quota.split('\t')
            if len(m) != 5:
                self.logger.debug('Invalid %s quota: %s',
                                  quota_type.lower(), quota)
                continue

            entry = {
                'quota_type': quota_type.upper(),
                'id': int(m[0]),
                'name': None,
                'quota': int(m[2]),
                'used_bytes': int(m[1]),
                'used_percent': 0,
                'obj_quota': int(m[3]) if m[3] != '-' else 0,
                'obj_used': int(m[4]) if m[4] != '-' else 0,
                'obj_used_percent': 0,
            }
            if entry['quota'] > 0:
                entry['used_percent'] = entry['used_bytes'] / entry['quota'] * 100

            if entry['obj_quota'] > 0:
                entry['obj_used_percent'] = entry['obj_used'] / entry['obj_quota'] * 100

            try:
                if entry['quota_type'] == 'USER':
                    entry['name'] = (
                        self.middleware.call_sync('user.get_user_obj',
                                                  {'uid': entry['id']})
                    )['pw_name']
                else:
                    entry['name'] = (
                        self.middleware.call_sync('group.get_group_obj',
                                                  {'gid': entry['id']})
                    )['gr_name']

            except Exception:
                self.logger.debug('Unable to resolve %s id %d to name',
                                  quota_type.lower(), entry['id'])
                pass

            quota_list.append(entry)

        return quota_list
示例#14
0
 def common_encryption_checks(self, ds):
     if not ds.encrypted:
         raise CallError(f'{id} is not encrypted')
示例#15
0
 async def get_value(self, sysctl_name):
     cp = await run(['sysctl', sysctl_name], check=False)
     if cp.returncode:
         raise CallError(f'Unable to retrieve value of "{sysctl_name}" sysctl : {cp.stderr.decode()}')
     return cp.stdout.decode().split('=')[-1].strip()
示例#16
0
 def setacl_posix1e(self, job, data):
     raise CallError(
         "POSIX1e brand ACLs not supported on the FreeBSD-based TrueNAS platform",
         errno.EOPNOTSUPP)
示例#17
0
 async def get_pagesize(self):
     cp = await run(['getconf', 'PAGESIZE'], check=False)
     if cp.returncode:
         raise CallError(f'Unable to retrieve pagesize value: {cp.stderr.decode()}')
     return int(cp.stdout.decode().strip())
示例#18
0
    async def __geli_resize(self, pool, geli_resize, options):
        failed_rollback = []

        lock_job = await self.middleware.call('pool.lock', pool['id'],
                                              options['geli']['passphrase'])
        await lock_job.wait()
        if lock_job.error:
            logger.warning('Error locking pool: %s', lock_job.error)

            for geli_resize_cmd, rollback_cmd in geli_resize:
                if not await self.__run_rollback_cmd(rollback_cmd):
                    failed_rollback.append(rollback_cmd)

            if failed_rollback:
                raise CallError(
                    'Locking your encrypted pool failed and rolling back changes failed too. '
                    f'You\'ll need to run the following commands manually:\n%s'
                    % '\n'.join(map(join_commandline, failed_rollback)))
        else:
            for geli_resize_cmd, rollback_cmd in geli_resize:
                try:
                    await run(*geli_resize_cmd,
                              encoding='utf-8',
                              errors='ignore')
                except subprocess.CalledProcessError as geli_resize_error:
                    if geli_resize_error.stderr.strip(
                    ) == 'geli: Size hasn\'t changed.':
                        logger.info('%s: %s',
                                    join_commandline(geli_resize_cmd),
                                    geli_resize_error.stderr.strip())
                    else:
                        logger.error('%r failed: %s. Resizing partition back',
                                     join_commandline(geli_resize_cmd),
                                     geli_resize_error.stderr.strip())
                        if not await self.__run_rollback_cmd(rollback_cmd):
                            failed_rollback.append(rollback_cmd)

            if failed_rollback:
                raise CallError(
                    'Resizing partitions of your encrypted pool failed and rolling back '
                    'changes failed too. You\'ll need to run the following commands manually:\n%s'
                    % '\n'.join(map(join_commandline, failed_rollback)))

            if options['geli']['passphrase']:
                unlock_job = await self.middleware.call(
                    'pool.unlock', pool['id'],
                    {'passphrase': options['geli']['passphrase']})
            else:
                unlock_job = await self.middleware.call(
                    'pool.unlock',
                    pool['id'], {'recoverykey': True},
                    pipes=Pipes(input=self.middleware.pipe()))

                def copy():
                    with open(pool['encryptkey_path'], 'rb') as f:
                        shutil.copyfileobj(f, unlock_job.pipes.input.w)

                try:
                    await self.middleware.run_in_thread(copy)
                finally:
                    await self.middleware.run_in_thread(
                        unlock_job.pipes.input.w.close)

            await unlock_job.wait()
            if unlock_job.error:
                raise CallError(unlock_job.error)
示例#19
0
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
          acl(bool): extended ACL is present on file
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_symlink():
                etype = 'SYMLINK'
            elif entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size':
                    stat.st_size,
                    'mode':
                    stat.st_mode,
                    'acl':
                    False if self.acl_is_trivial(data["realpath"]) else True,
                    'uid':
                    stat.st_uid,
                    'gid':
                    stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'acl': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})
示例#20
0
    async def expand(self, job, id, options):
        """
        Expand pool to fit all available disk space.
        """
        pool = await self.middleware.call('pool.get_instance', id)
        if osc.IS_LINUX:
            if options.get('passphrase'):
                raise CallError(
                    'Passphrase should not be supplied for this platform.')
            # FIXME: We have issues in ZoL where when pool is created with partition uuids, we are unable
            #  to expand pool where all pool related options error out saying I/O error
            #  https://github.com/zfsonlinux/zfs/issues/9830
            raise CallError(
                'Expand is not supported on this platform yet because of underlying ZFS issues.'
            )
        else:
            if pool['encrypt']:
                if not pool['is_decrypted']:
                    raise CallError('You can only expand decrypted pool')

                for error in (await self.middleware.call(
                        'pool.pool_lock_pre_check', pool,
                        options['geli']['passphrase'])).errors:
                    raise CallError(error.errmsg)

        all_partitions = {
            p['name']: p
            for p in await self.middleware.call('disk.list_all_partitions')
        }

        try:
            if osc.IS_FREEBSD:
                sysctl.filter('kern.geom.debugflags')[0].value = 16
            geli_resize = []
            vdevs = []
            try:
                for vdev in sum(pool['topology'].values(), []):
                    if vdev['status'] != 'ONLINE':
                        logger.debug('Not expanding vdev(%r) that is %r',
                                     vdev['guid'], vdev['status'])
                        continue

                    c_vdevs = []
                    disks = vdev['children'] if vdev['type'] != 'DISK' else [
                        vdev
                    ]
                    skip_vdev = None
                    for child in disks:
                        if child['status'] != 'ONLINE':
                            skip_vdev = f'Device "{child["device"]}" status is not ONLINE ' \
                                        f'(Reported status is {child["status"]})'
                            break

                        part_data = all_partitions.get(child['device'])
                        if not part_data:
                            skip_vdev = f'Unable to find partition data for {child["device"]}'
                        elif not part_data['partition_number']:
                            skip_vdev = f'Could not parse partition number from {child["device"]}'
                        elif part_data['disk'] != child['disk']:
                            skip_vdev = f'Retrieved partition data for device {child["device"]} ' \
                                        f'({part_data["disk"]}) does not match with disk ' \
                                        f'reported by ZFS ({child["disk"]})'
                        if skip_vdev:
                            break
                        else:
                            c_vdevs.append((child['guid'], part_data))

                    if skip_vdev:
                        logger.debug('Not expanding vdev(%r): %r',
                                     vdev['guid'], skip_vdev)
                        continue

                    for guid, part_data in c_vdevs:
                        await self._resize_disk(part_data, pool['encrypt'],
                                                geli_resize)
                        vdevs.append(guid)
            finally:
                if osc.IS_FREEBSD and geli_resize:
                    await self.__geli_resize(pool, geli_resize, options)
        finally:
            if osc.IS_FREEBSD:
                sysctl.filter('kern.geom.debugflags')[0].value = 0

        # spare/cache devices cannot be expanded
        # We resize them anyways, for cache devices, whenever we are going to import the pool
        # next, it will register the new capacity. For spares, whenever that spare is going to
        # be used, it will register the new capacity as desired.
        for topology_type in filter(
                lambda t: t not in ('spare', 'cache') and pool['topology'][t],
                pool['topology']):
            for vdev in pool['topology'][topology_type]:
                for c_vd in filter(
                        lambda v: v['guid'] in vdevs, vdev['children']
                        if vdev['type'] != 'DISK' else [vdev]):
                    await self.middleware.call('zfs.pool.online', pool['name'],
                                               c_vd['guid'], True)
示例#21
0
    def getacl_posix1e(self, path, simplified):
        st = os.stat(path)
        ret = {
            'uid': st.st_uid,
            'gid': st.st_gid,
            'acl': [],
            'flags': {
                'setuid': bool(st.st_mode & pystat.S_ISUID),
                'setgid': bool(st.st_mode & pystat.S_ISGID),
                'sticky': bool(st.st_mode & pystat.S_ISVTX),
            },
            'acltype': ACLType.POSIX1E.name
        }

        ret['uid'] = st.st_uid
        ret['gid'] = st.st_gid

        gfacl = subprocess.run(
            ['getfacl', '-c' if osc.IS_LINUX else '-q', '-n', path],
            check=False,
            capture_output=True)
        if gfacl.returncode != 0:
            raise CallError(
                f"Failed to get POSIX1e ACL on path [{path}]: {gfacl.stderr.decode()}"
            )

        # linux output adds extra line to output if it's an absolute path and extra newline at end.
        entries = gfacl.stdout.decode().splitlines()
        if osc.IS_LINUX:
            entries = entries[:-1]

        for entry in entries:
            if entry.startswith("#"):
                continue
            ace = {
                "default": False,
                "tag": None,
                "id": -1,
                "perms": {
                    "READ": False,
                    "WRITE": False,
                    "EXECUTE": False,
                }
            }

            tag, id, perms = entry.rsplit(":", 2)
            ace['perms'].update({
                "READ": perms[0].casefold() == "r",
                "WRITE": perms[1].casefold() == "w",
                "EXECUTE": perms[2].casefold() == "x",
            })
            if tag.startswith('default'):
                ace['default'] = True
                tag = tag[8:]

            ace['tag'] = tag.upper()
            if id.isdigit():
                ace['id'] = int(id)
            ret['acl'].append(ace)

        return ret
示例#22
0
文件: mail.py 项目: bmhughes/freenas
    def send_raw(self, job, message, config):
        config = dict(self.middleware.call_sync('mail.config'), **config)

        if config['fromname']:
            from_addr = Header(config['fromname'], 'utf-8')
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr.append(f'<{config["fromemail"]}>', 'utf-8')
            else:
                from_addr.append(f'<{config["fromemail"]}>', 'ascii')
        else:
            try:
                config['fromemail'].encode('ascii')
            except UnicodeEncodeError:
                from_addr = Header(config['fromemail'], 'utf-8')
            else:
                from_addr = Header(config['fromemail'], 'ascii')

        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.version').split('-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(
                    os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError(
                    'This message was already sent in the given interval')

        verrors = self.__password_verify(config['pass'], 'mail-config.pass')
        if verrors:
            raise verrors
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        if message.get('attachments'):
            job.check_pipe("input")

            def read_json():
                f = job.pipes.input.r
                data = b''
                i = 0
                while True:
                    read = f.read(1048576)  # 1MiB
                    if read == b'':
                        break
                    data += read
                    i += 1
                    if i > 50:
                        raise ValueError(
                            'Attachments bigger than 50MB not allowed yet')
                if data == b'':
                    return None
                return json.loads(data)

            attachments = read_json()
        else:
            attachments = None

        if 'html' in message or attachments:
            msg = MIMEMultipart()
            msg.preamble = 'This is a multi-part message in MIME format.'
            if 'html' in message:
                msg2 = MIMEMultipart('alternative')
                msg2.attach(
                    MIMEText(message['text'], 'plain', _charset='utf-8'))
                msg2.attach(MIMEText(message['html'], 'html',
                                     _charset='utf-8'))
                msg.attach(msg2)
            if attachments:
                for attachment in attachments:
                    m = Message()
                    m.set_payload(attachment['content'])
                    for header in attachment.get('headers'):
                        m.add_header(header['name'], header['value'],
                                     **(header.get('params') or {}))
                    msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        msg['Subject'] = message['subject']

        msg['From'] = from_addr
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = self.middleware.call_sync('system.hostname')

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (
            sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            # We already have "Content-Type: multipart/mixed" and setting "Content-Type: text/plain" like some scripts
            # do will break python e-mail module.
            if key.lower() == "content-type":
                continue

            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        try:
            if config['oauth']:
                self.middleware.call_sync('mail.gmail_send', msg, config)
            else:
                server = self._get_smtp_server(config,
                                               message['timeout'],
                                               local_hostname=local_hostname)
                # NOTE: Don't do this.
                #
                # If smtplib.SMTP* tells you to run connect() first, it's because the
                # mailserver it tried connecting to via the outgoing server argument
                # was unreachable and it tried to connect to 'localhost' and barfed.
                # This is because FreeNAS doesn't run a full MTA.
                # else:
                #    server.connect()
                headers = '\n'.join([f'{k}: {v}' for k, v in msg._headers])
                syslog.syslog(f"sending mail to {', '.join(to)}\n{headers}")
                server.sendmail(from_addr.encode(), to, msg.as_string())
                server.quit()
        except Exception as e:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            # We are only interested in ValueError, not subclasses.
            if e.__class__ is ValueError:
                raise CallError(str(e))
            syslog.syslog(f'Failed to send email to {", ".join(to)}: {str(e)}')
            if isinstance(e, smtplib.SMTPAuthenticationError):
                raise CallError(
                    f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                    errno.EAUTH if osc.IS_FREEBSD else errno.EPERM)
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True
示例#23
0
 def upgrade(self, pool):
     try:
         with libzfs.ZFS() as zfs:
             zfs.get(pool).upgrade()
     except libzfs.ZFSException as e:
         raise CallError(str(e))
示例#24
0
    async def _kinit(self):
        """
        There are two ways of performing the kinit:
        1) username / password combination. In this case, password must be written
           to file or recieved via STDIN
        2) kerberos keytab.

        For now we only check for kerberos realms explicitly configured in AD and LDAP.
        """
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        await self.middleware.call('etc.generate', 'kerberos')
        if ad['enable']:
            if ad['kerberos_principal']:
                ad_kinit = await run([
                    '/usr/bin/kinit', '--renewable', '-k',
                    ad['kerberos_principal']
                ],
                                     check=False)
                if ad_kinit.returncode != 0:
                    raise CallError(
                        f"kinit for domain [{ad['domainname']}] with principal [{ad['kerberos_principal']}] failed: {ad_kinit.stderr.decode()}"
                    )
            else:
                principal = f'{ad["bindname"]}@{ad["domainname"].upper()}'
                ad_kinit = await Popen([
                    '/usr/bin/kinit', '--renewable', '--password-file=STDIN',
                    principal
                ],
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.PIPE,
                                       stdin=subprocess.PIPE)
                output = await ad_kinit.communicate(
                    input=ad['bindpw'].encode())
                if ad_kinit.returncode != 0:
                    raise CallError(
                        f"kinit for domain [{ad['domainname']}] with password failed: {output[1].decode()}"
                    )
        if ldap['enable'] and ldap['kerberos_realm']:
            if ldap['kerberos_principal']:
                ldap_kinit = await run([
                    '/usr/bin/kinit', '--renewable', '-k',
                    ldap['kerberos_principal']
                ],
                                       check=False)
                if ldap_kinit.returncode != 0:
                    raise CallError(
                        f"kinit for realm {ldap['kerberos_realm']} with keytab failed: {ldap_kinit.stderr.decode()}"
                    )
            else:
                krb_realm = await self.middleware.call(
                    'kerberos.realm.query',
                    [('id', '=', ldap['kerberos_realm'])], {'get': True})
                bind_cn = (ldap['binddn'].split(','))[0].split("=")
                principal = f'{bind_cn[1]}@{krb_realm["realm"]}'
                ldap_kinit = await Popen([
                    '/usr/bin/kinit', '--renewable', '--password-file=STDIN',
                    principal
                ],
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE,
                                         stdin=subprocess.PIPE)
                output = await ldap_kinit.communicate(
                    input=ldap['bindpw'].encode())
                if ldap_kinit.returncode != 0:
                    raise CallError(
                        f"kinit for realm {krb_realm['realm']} with password failed: {output[1].decode()}"
                    )
示例#25
0
    def send(self, job, message, config=None):
        """
        Sends mail using configured mail settings.

        If `attachments` is true, a list compromised of the following dict is required
        via HTTP upload:
          - headers(list)
            - name(str)
            - value(str)
            - params(dict)
          - content (str)

        [
         {
          "headers": [
           {
            "name": "Content-Transfer-Encoding",
            "value": "base64"
           },
           {
            "name": "Content-Type",
            "value": "application/octet-stream",
            "params": {
             "name": "test.txt"
            }
           }
          ],
          "content": "dGVzdAo="
         }
        ]
        """

        syslog.openlog(logoption=syslog.LOG_PID, facility=syslog.LOG_MAIL)
        interval = message.get('interval')
        if interval is None:
            interval = timedelta()
        else:
            interval = timedelta(seconds=interval)

        sw_name = self.middleware.call_sync('system.info')['version'].split(
            '-', 1)[0]

        channel = message.get('channel')
        if not channel:
            channel = sw_name.lower()
        if interval > timedelta():
            channelfile = '/tmp/.msg.%s' % (channel)
            last_update = datetime.now() - interval
            try:
                last_update = datetime.fromtimestamp(
                    os.stat(channelfile).st_mtime)
            except OSError:
                pass
            timediff = datetime.now() - last_update
            if (timediff >= interval) or (timediff < timedelta()):
                # Make sure mtime is modified
                # We could use os.utime but this is simpler!
                with open(channelfile, 'w') as f:
                    f.write('!')
            else:
                raise CallError(
                    'This message was already sent in the given interval')

        if not config:
            config = self.middleware.call_sync('mail.config')
        to = message.get('to')
        if not to:
            to = [
                self.middleware.call_sync('user.query',
                                          [('username', '=', 'root')],
                                          {'get': True})['email']
            ]
            if not to[0]:
                raise CallError('Email address for root is not configured')

        def read_json():
            f = os.fdopen(job.read_fd, 'rb')
            data = b''
            i = 0
            while True:
                read = f.read(1048576)  # 1MiB
                if read == b'':
                    break
                data += read
                i += 1
                if i > 50:
                    raise ValueError(
                        'Attachments bigger than 50MB not allowed yet')
            if data == b'':
                return None
            return json.loads(data)

        attachments = read_json() if message.get('attachments') else None
        if attachments:
            msg = MIMEMultipart()
            msg.preamble = message['text']
            for attachment in attachments:
                m = Message()
                m.set_payload(attachment['content'])
                for header in attachment.get('headers'):
                    m.add_header(header['name'], header['value'],
                                 **(header.get('params') or {}))
                msg.attach(m)
        else:
            msg = MIMEText(message['text'], _charset='utf-8')

        subject = message.get('subject')
        if subject:
            msg['Subject'] = subject

        msg['From'] = config['fromemail']
        msg['To'] = ', '.join(to)
        if message.get('cc'):
            msg['Cc'] = ', '.join(message.get('cc'))
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()

        msg['Message-ID'] = "<%s-%s.%s@%s>" % (
            sw_name.lower(), datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)), local_hostname)

        extra_headers = message.get('extra_headers') or {}
        for key, val in list(extra_headers.items()):
            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val

        try:
            server = self._get_smtp_server(config,
                                           message['timeout'],
                                           local_hostname=local_hostname)
            # NOTE: Don't do this.
            #
            # If smtplib.SMTP* tells you to run connect() first, it's because the
            # mailserver it tried connecting to via the outgoing server argument
            # was unreachable and it tried to connect to 'localhost' and barfed.
            # This is because FreeNAS doesn't run a full MTA.
            # else:
            #    server.connect()
            syslog.syslog("sending mail to " + ','.join(to) +
                          msg.as_string()[0:140])
            server.sendmail(config['fromemail'], to, msg.as_string())
            server.quit()
        except ValueError as ve:
            # Don't spam syslog with these messages. They should only end up in the
            # test-email pane.
            raise CallError(str(ve))
        except smtplib.SMTPAuthenticationError as e:
            raise CallError(
                f'Authentication error ({e.smtp_code}): {e.smtp_error}',
                errno.EAUTH)
        except Exception as e:
            self.logger.warn('Failed to send email: %s', str(e), exc_info=True)
            if message['queue']:
                with MailQueue() as mq:
                    mq.append(msg)
            raise CallError(f'Failed to send email: {e}')
        return True
示例#26
0
    async def _get_cached_klist(self):
        """
        Try to get retrieve cached kerberos tgt info. If it hasn't been cached,
        perform klist, parse it, put it in cache, then return it.
        """
        if await self.middleware.call('cache.has_key', 'KRB_TGT_INFO'):
            return (await self.middleware.call('cache.get', 'KRB_TGT_INFO'))
        ad = await self.middleware.call('activedirectory.config')
        ldap = await self.middleware.call('ldap.config')
        ad_TGT = []
        ldap_TGT = []
        if not ad['enable'] and not ldap['enable']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}
        if not ad['enable'] and not ldap['kerberos_realm']:
            return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}

        if not await self.status():
            await self.start()

        try:
            klist = await asyncio.wait_for(run(['/usr/bin/klist', '-v'],
                                               check=False,
                                               stdout=subprocess.PIPE),
                                           timeout=10.0)
        except Exception as e:
            await self.stop()
            raise CallError(
                "Attempt to list kerberos tickets failed with error: %s", e)

        if klist.returncode != 0:
            await self.stop()
            raise CallError(
                f'klist failed with error: {klist.stderr.decode()}')

        klist_output = klist.stdout.decode()
        tkts = klist_output.split('\n\n')
        for tkt in tkts:
            s = tkt.splitlines()
            if len(s) > 4:
                for entry in s:
                    if "Auth time" in entry:
                        issued = time.strptime(
                            (entry.split('Auth time: '))[1].lstrip().replace(
                                '  ', ' '), '%b %d %H:%M:%S %Y')
                    elif "End time" in entry:
                        expires = time.strptime(
                            (entry.split('End time: '))[1].lstrip().replace(
                                '  ', ' '), '%b %d %H:%M:%S %Y')
                    elif "Server" in entry:
                        server = (entry.split('Server: '))[1]
                    elif "Client" in entry:
                        client = (entry.split('Client: '))[1]
                    elif 'Ticket etype' in entry:
                        etype = (entry.split('Ticket etype: '))[1]
                    elif 'Ticket flags' in entry:
                        flags = (entry.split('Ticket flags: '))[1].split(',')

                if ad['enable'] and ad['kerberos_realm'] and ad[
                        'domainname'] in client:
                    ad_TGT.append({
                        'issued': issued,
                        'expires': expires,
                        'client': client,
                        'server': server,
                        'etype': etype,
                        'flags': flags,
                    })

                elif ldap['enable'] and ldap['kerberos_realm']:
                    if ldap['kerberos_realm']['krb_realm'] in client:
                        ldap_TGT.append({
                            'issued': issued,
                            'expires': expires,
                            'client': client,
                            'server': server,
                            'etype': etype,
                            'flags': flags,
                        })

        if ad_TGT or ldap_TGT:
            await self.middleware.call('cache.put', 'KRB_TGT_INFO', {
                'ad_TGT': ad_TGT,
                'ldap_TGT': ldap_TGT
            })
        return {'ad_TGT': ad_TGT, 'ldap_TGT': ldap_TGT}
示例#27
0
    async def setup(self, mount, exclude_pool=None):

        # FIXME: corefile for LINUX
        if osc.IS_FREEBSD:
            # We default kern.corefile value
            await run('sysctl', "kern.corefile='/var/tmp/%N.core'")

        config = await self.config()
        dbconfig = await self.middleware.call(
            'datastore.config', self._config.datastore,
            {'prefix': self._config.datastore_prefix})

        boot_pool = await self.middleware.call('boot.pool_name')
        if (not await self.middleware.call('system.is_freenas')
                and await self.middleware.call('failover.status') == 'BACKUP'
                and config.get('basename')
                and config['basename'] != f'{boot_pool}/.system'):
            try:
                os.unlink(SYSDATASET_PATH)
            except OSError:
                pass
            return

        # If the system dataset is configured in a data pool we need to make sure it exists.
        # In case it does not we need to use another one.
        if config['pool'] != boot_pool and not await self.middleware.call(
                'pool.query', [('name', '=', config['pool'])]):
            job = await self.middleware.call('systemdataset.update', {
                'pool': None,
                'pool_exclude': exclude_pool,
            })
            await job.wait()
            if job.error:
                raise CallError(job.error)
            return

        # If we dont have a pool configure in the database try to find the first data pool
        # to put it on.
        if not dbconfig['pool']:
            pool = None
            for p in await self.middleware.call('pool.query',
                                                [('encrypt', '!=', '2')],
                                                {'order_by': ['encrypt']}):
                if (exclude_pool and p['name']
                        == exclude_pool) or await self.middleware.call(
                            'pool.dataset.query',
                            [['name', '=', p['name']],
                             [
                                 'OR',
                                 [['key_format.value', '=', 'PASSPHRASE'],
                                  ['locked', '=', True]]
                             ]]):
                    continue
                if p['is_decrypted']:
                    pool = p
                    break
            if pool:
                job = await self.middleware.call('systemdataset.update',
                                                 {'pool': pool['name']})
                await job.wait()
                if job.error:
                    raise CallError(job.error)
                return

        if not config['basename']:
            if os.path.exists(SYSDATASET_PATH):
                try:
                    os.rmdir(SYSDATASET_PATH)
                except Exception:
                    self.logger.debug('Failed to remove system dataset dir',
                                      exc_info=True)
            return config

        if not config['is_decrypted']:
            return

        if await self.__setup_datasets(config['pool'], config['uuid']):
            # There is no need to wait this to finish
            # Restarting rrdcached will ensure that we start/restart collectd as well
            asyncio.ensure_future(
                self.middleware.call('service.restart', 'rrdcached'))

        if not os.path.isdir(SYSDATASET_PATH):
            if os.path.exists(SYSDATASET_PATH):
                os.unlink(SYSDATASET_PATH)
            os.makedirs(SYSDATASET_PATH)

        acltype = await self.middleware.call('zfs.dataset.query',
                                             [('id', '=', config['basename'])])
        if acltype and acltype[0]['properties']['acltype']['value'] == 'off':
            await self.middleware.call(
                'zfs.dataset.update',
                config['basename'],
                {'properties': {
                    'acltype': {
                        'value': 'off'
                    }
                }},
            )

        if mount:

            await self.__mount(config['pool'], config['uuid'])

            corepath = f'{SYSDATASET_PATH}/cores'
            if os.path.exists(corepath):
                # FIXME: corefile for LINUX
                if osc.IS_FREEBSD:
                    # FIXME: sysctl module not working
                    await run('sysctl', f"kern.corefile='{corepath}/%N.core'")
                os.chmod(corepath, 0o775)

            await self.__nfsv4link(config)
            await self.middleware.call('smb.configure')
            await self.middleware.call('dscache.initialize')

        return config
示例#28
0
 async def legacy_validate(self, keytab):
     err = await self._validate({'file': keytab})
     try:
         err.check()
     except Exception as e:
         raise CallError(e)
示例#29
0
 async def will_perform_activity(self, name):
     if not await self.middleware.call(
             'network.general.can_perform_activity', name):
         raise CallError(
             f'Network activity "{self.activities[name]}" is disabled')
示例#30
0
    def statfs(self, path):
        """
        Return stats from the filesystem of a given path.

        Paths on clustered volumes may be specifed with the path prefix
        `CLUSTER:<volume name>`. For example, to list directories
        in the directory 'data' in the clustered volume `smb01`, the
        path should be specified as `CLUSTER:smb01/data`.

        Raises:
            CallError(ENOENT) - Path not found
        """
        # check to see if this is a clustered path and if it is
        # resolve it to an absolute path
        # NOTE: this converts path prefixed with 'CLUSTER:' to '/cluster/...'
        path = self.resolve_cluster_path(path, ignore_ctdb=True)

        allowed_prefixes = ('/mnt/', FuseConfig.FUSE_PATH_BASE.value)
        if not path.startswith(allowed_prefixes):
            # if path doesn't start with '/mnt/' bail early
            raise CallError(
                f'Path must start with {" or ".join(allowed_prefixes)}')
        elif path == '/mnt/':
            # means the path given to us was a literal '/mnt/' which is incorrect.
            # NOTE: if the user provided 'CLUSTER:' as the literal path then
            # self.resolve_cluster_path() will raise a similar error
            raise CallError('Path must include more than "/mnt/"')

        try:
            st = os.statvfs(path)
        except FileNotFoundError:
            raise CallError('Path not found.', errno.ENOENT)

        # get the closest mountpoint to the path provided
        mountpoint = pathlib.Path(path)
        while not mountpoint.is_mount():
            mountpoint = mountpoint.parent.absolute()

        # strip the `/mnt/` or `/cluster/` prefix from the mountpoint
        device = mountpoint.as_posix().removeprefix('/mnt/')
        device = device.removeprefix('/cluster/')

        # we only look for /mnt/ or /cluster/ paths and, currently,
        # those 2 paths are limited to zfs and/or fuse.glusterfs
        fstype = 'zfs' if path.startswith('/mnt/') else 'fuse.glusterfs'

        return {
            'flags': [],
            'fstype': fstype,
            'source': device,
            'dest': mountpoint.as_posix(),
            'blocksize': st.f_frsize,
            'total_blocks': st.f_blocks,
            'free_blocks': st.f_bfree,
            'avail_blocks': st.f_bavail,
            'files': st.f_files,
            'free_files': st.f_ffree,
            'name_max': st.f_namemax,
            'fsid': [],
            'total_bytes': st.f_blocks * st.f_frsize,
            'free_bytes': st.f_bfree * st.f_frsize,
            'avail_bytes': st.f_bavail * st.f_frsize,
        }