示例#1
0
文件: zfs.py 项目: freenas/freenas
 def query(self, filters=None, options=None):
     """
     Query all ZFS Snapshots with `query-filters` and `query-options`.
     """
     # Special case for faster listing of snapshot names (#53149)
     if (
         options and options.get('select') == ['name'] and (
             not filters or
             filter_getattrs(filters).issubset({'name', 'pool'})
         )
     ):
         # Using zfs list -o name is dozens of times faster than py-libzfs
         cmd = ['zfs', 'list', '-H', '-o', 'name', '-t', 'snapshot']
         order_by = options.get('order_by')
         # -s name makes it even faster
         if not order_by or order_by == ['name']:
             cmd += ['-s', 'name']
         cp = subprocess.run(
             cmd,
             stdout=subprocess.PIPE,
             stderr=subprocess.PIPE,
             universal_newlines=True,
         )
         if cp.returncode != 0:
             raise CallError(f'Failed to retrieve snapshots: {cp.stderr}')
         snaps = [
             {'name': i, 'pool': i.split('/', 1)[0]}
             for i in cp.stdout.strip().split('\n')
         ]
         if filters:
             return filter_list(snaps, filters, options)
         return snaps
     with libzfs.ZFS() as zfs:
         # Handle `id` filter to avoid getting all snapshots first
         snapshots = []
         if filters and len(filters) == 1 and list(filters[0][:2]) == ['id', '=']:
             try:
                 snapshots.append(zfs.get_snapshot(filters[0][2]).__getstate__())
             except libzfs.ZFSException as e:
                 if e.code != libzfs.Error.NOENT:
                     raise
         else:
             for i in zfs.snapshots:
                 try:
                     snapshots.append(i.__getstate__())
                 except libzfs.ZFSException as e:
                     # snapshot may have been deleted while this is running
                     if e.code != libzfs.Error.NOENT:
                         raise
     # FIXME: awful performance with hundreds/thousands of snapshots
     return filter_list(snapshots, filters, options)
示例#2
0
    async def query(self, filters, options):
        peers = []
        if await self.middleware.call('service.started', 'glusterd'):
            peers = await self.middleware.call('gluster.peer.status')
            peers = list(map(lambda i: dict(i, id=i['uuid']), peers))

        return filter_list(peers, filters, options)
示例#3
0
    def query(self, filters=None, options=None):
        """
        In `query-options` we can provide `extra` arguments which control which data should be retrieved
        for a dataset.

        `query-options.extra.top_level_properties` is a list of properties which we will like to include in the
        top level dict of dataset. It defaults to adding only mountpoint key keeping legacy behavior. If none are
        desired in top level dataset, an empty list should be passed else if null is specified it will add mountpoint
        key to the top level dict if it's present in `query-options.extra.properties` or it's null as well.

        `query-options.extra.properties` is a list of properties which should be retrieved. If null ( by default ),
        it would retrieve all properties, if empty, it will retrieve no property ( `mountpoint` is special in this
        case and is controlled by `query-options.extra.mountpoint` attribute ).

        We provide 2 ways how zfs.dataset.query returns dataset's data. First is a flat structure ( default ), which
        means that all the datasets in the system are returned as separate objects which also contain all the data
        their is for their children. This retrieval type is slightly slower because of duplicates which exist in
        each object.
        Second type is hierarchical where only top level datasets are returned in the list and they contain all the
        children there are for them in `children` key. This retrieval type is slightly faster.
        These options are controlled by `query-options.extra.flat` attribute which defaults to true.

        `query-options.extra.user_properties` controls if user defined properties of datasets should be retrieved
        or not.

        While we provide a way to exclude all properties from data retrieval, we introduce a single attribute
        `query-options.extra.retrieve_properties` which if set to false will make sure that no property is retrieved
        whatsoever and overrides any other property retrieval attribute.
        """
        options = options or {}
        extra = options.get('extra', {}).copy()
        top_level_props = None if extra.get(
            'top_level_properties'
        ) is None else extra['top_level_properties'].copy()
        props = extra.get('properties', None)
        flat = extra.get('flat', True)
        user_properties = extra.get('user_properties', True)
        retrieve_properties = extra.get('retrieve_properties', True)
        if not retrieve_properties:
            # This is a short hand version where consumer can specify that they don't want any property to
            # be retrieved
            user_properties = False
            props = []

        with libzfs.ZFS() as zfs:
            # Handle `id` filter specially to avoiding getting all datasets
            kwargs = dict(props=props,
                          top_level_props=top_level_props,
                          user_props=user_properties)
            if filters and len(filters) == 1 and list(
                    filters[0][:2]) == ['id', '=']:
                kwargs['datasets'] = [filters[0][2]]

            datasets = zfs.datasets_serialized(**kwargs)
            if flat:
                datasets = self.flatten_datasets(datasets)
            else:
                datasets = list(datasets)

        return filter_list(datasets, filters, options)
示例#4
0
文件: zfs.py 项目: tejp/freenas
 def query(self, filters=None, options=None):
     # If we are only filtering by name, pool and type we can use
     # zfs(8) which is much faster than py-libzfs
     if (
         options and set(options['select']).issubset({'name', 'pool', 'type'}) and
         filter_getattrs(filters).issubset({'name', 'pool', 'type'})
     ):
         cp = subprocess.run([
             'zfs', 'list', '-H', '-o', 'name,type', '-t', 'filesystem,volume',
         ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8')
         datasets = []
         for i in cp.stdout.strip().split('\n'):
             name, type_ = i.split('\t')
             pool = name.split('/', 1)[0]
             datasets.append({
                 'name': name,
                 'pool': pool,
                 'type': type_.upper(),
             })
     else:
         with libzfs.ZFS() as zfs:
             # Handle `id` filter specially to avoiding getting all datasets
             if filters and len(filters) == 1 and list(filters[0][:2]) == ['id', '=']:
                 try:
                     datasets = [zfs.get_dataset(filters[0][2]).__getstate__()]
                 except libzfs.ZFSException:
                     datasets = []
             else:
                 datasets = [i.__getstate__() for i in zfs.datasets]
     return filter_list(datasets, filters, options)
示例#5
0
 def query(self, filters, options):
     data = []
     for name, iface in netif.list_interfaces().items():
         if name in ('lo0', 'pfsync0', 'pflog0'):
             continue
         data.append(self.iface_extend(iface.__getstate__()))
     return filter_list(data, filters, options)
示例#6
0
 def query(self, filters, options):
     data = []
     for name, iface in netif.list_interfaces().items():
         if name in ('lo0', 'pfsync0', 'pflog0'):
             continue
         data.append(self.iface_extend(iface.__getstate__()))
     return filter_list(data, filters, options)
示例#7
0
    def query(self, filters, options):
        enclosures = []
        for enc in self.__get_enclosures():
            enclosure = {
                "id": enc.encid,
                "name": enc.name,
                "model": enc.model,
                "controller": enc.controller,
                "label": enc.label,
                "elements": [],
            }

            for name, elems in enc.iter_by_name().items():
                header = None
                elements = []
                has_slot_status = False

                for elem in elems:
                    header = list(elem.get_columns().keys())
                    element = {
                        "slot":
                        elem.slot,
                        "data":
                        dict(zip(elem.get_columns().keys(),
                                 elem.get_values())),
                        "name":
                        elem.name,
                        "descriptor":
                        elem.descriptor,
                        "status":
                        elem.status,
                        "value_raw":
                        hex(elem.value_raw),
                    }
                    if hasattr(elem, "device_slot_set"):
                        has_slot_status = True
                        element["fault"] = elem.fault
                        element["identify"] = elem.identify

                    elements.append(element)

                if header is not None and elements:
                    enclosure["elements"].append({
                        "name":
                        name,
                        "descriptor":
                        enc.descriptors.get(name, ""),
                        "header":
                        header,
                        "elements":
                        elements,
                        "has_slot_status":
                        has_slot_status
                    })

            enclosures.append(enclosure)

        return filter_list(enclosures,
                           filters=filters or [],
                           options=options or {})
示例#8
0
 async def query(self, filters=None, options=None):
     async with api_client() as (api, context):
         return filter_list([
             d.to_dict() for d in (
                 await context['core_api'].
                 list_persistent_volume_claim_for_all_namespaces()).items
         ], filters, options)
示例#9
0
文件: images.py 项目: Qapf/freenas
    async def query(self, filters=None, options=None):
        results = []
        if not await self.middleware.call('service.started', 'docker'):
            return results

        update_cache = await self.middleware.call(
            'container.image.image_update_cache')
        system_images = await self.middleware.call(
            'container.image.get_system_images_tags')

        async with aiodocker.Docker() as docker:
            for image in await docker.images.list():
                repo_tags = image['RepoTags'] or []
                system_image = any(tag in system_images for tag in repo_tags)
                results.append({
                    'id':
                    image['Id'],
                    'labels':
                    image['Labels'],
                    'repo_tags':
                    repo_tags,
                    'size':
                    image['Size'],
                    'created':
                    datetime.fromtimestamp(int(image['Created'])),
                    'dangling':
                    len(repo_tags) == 1 and repo_tags[0] == '<none>:<none>',
                    'update_available':
                    not system_image
                    and any(update_cache[r] for r in repo_tags),
                    'system_image':
                    system_image,
                })
        return filter_list(results, filters, options)
示例#10
0
    async def catalog_extend(self, catalog, context):
        catalog.update({
            'location':
            os.path.join(
                context['catalogs_dir'],
                convert_repository_to_path(catalog['repository'],
                                           catalog['branch'])),
            'id':
            catalog['label'],
        })
        extra = context['extra']
        if extra.get('item_details'):
            catalog.update(await self.normalize_data_from_item_job(
                catalog['id'], context['item_jobs'][catalog['id']]))
            if catalog['cached']:
                return catalog

            # We would like to report progress here for catalogs which have not been cached and hence their
            # data has not been retrieved as well due to this
            caching_job = filter_list(context['all_jobs'], [[
                'arguments', '=', [catalog['id'], context['item_sync_params']]
            ]])
            if caching_job:
                catalog.update({
                    'caching_job': caching_job[0],
                    'caching_progress': caching_job[0]['progress'],
                })
        return catalog
示例#11
0
    def sessions(self, filters, options):
        """
        Get a list of currently running iSCSI sessions. This includes initiator and target names
        and the unique connection IDs.
        """
        def transform(tag, text):
            if tag in (
                    'target_portal_group_tag',
                    'max_data_segment_length',
                    'max_burst_length',
                    'first_burst_length',
            ) and text.isdigit():
                return int(text)
            if tag in ('immediate_data', 'iser'):
                return bool(int(text))
            if tag in ('header_digest', 'data_digest',
                       'offload') and text == 'None':
                return None
            return text

        cp = subprocess.run(['ctladm', 'islist', '-x'],
                            capture_output=True,
                            text=True)
        connections = etree.fromstring(cp.stdout)
        sessions = []
        for connection in connections.xpath("//connection"):
            sessions.append({
                i.tag: transform(i.tag, i.text)
                for i in connection.iterchildren()
            })
        return filter_list(sessions, filters, options)
示例#12
0
 async def query(self, filters=None, options=None):
     """
     Use query-filters to search the SMB share ACLs present on server.
     """
     share_acls = await self._view_all({'resolve_sids': True})
     ret = filter_list(share_acls, filters, options)
     return ret
示例#13
0
文件: iscsi.py 项目: freenas/freenas
    def sessions(self, filters, options):
        """
        Get a list of currently running iSCSI sessions. This includes initiator and target names
        and the unique connection IDs.
        """
        def transform(tag, text):
            if tag in (
                'target_portal_group_tag', 'max_data_segment_length', 'max_burst_length',
                'first_burst_length',
            ) and text.isdigit():
                return int(text)
            if tag in ('immediate_data', 'iser'):
                return bool(int(text))
            if tag in ('header_digest', 'data_digest', 'offload') and text == 'None':
                return None
            return text

        cp = subprocess.run(['ctladm', 'islist', '-x'], capture_output=True, text=True)
        connections = etree.fromstring(cp.stdout)
        sessions = []
        for connection in connections.xpath("//connection"):
            sessions.append({
                i.tag: transform(i.tag, i.text) for i in connection.iterchildren()
            })
        return filter_list(sessions, filters, options)
示例#14
0
 def system_routes(self, filters, options):
     """
     Get current/applied network routes.
     """
     rtable = netif.RoutingTable()
     return filter_list([r.__getstate__() for r in rtable.routes], filters,
                        options)
示例#15
0
    def query(self, filters, options):
        """
        Query all ZFS Snapshots with `query-filters` and `query-options`.
        """
        # Special case for faster listing of snapshot names (#53149)
        if (
            options and options.get('select') == ['name'] and (
                not filters or
                filter_getattrs(filters).issubset({'name', 'pool'})
            )
        ):
            with libzfs.ZFS() as zfs:
                snaps = zfs.snapshots_serialized(['name'])

            if filters or len(options) > 1:
                return filter_list(snaps, filters, options)
            return snaps

        extra = copy.deepcopy(options['extra'])
        properties = extra.get('properties')
        with libzfs.ZFS() as zfs:
            # Handle `id` filter to avoid getting all snapshots first
            kwargs = dict(holds=False, mounted=False, props=properties)
            if filters and len(filters) == 1 and len(filters[0]) == 3 and filters[0][0] in (
                'id', 'name'
            ) and filters[0][1] == '=':
                kwargs['datasets'] = [filters[0][2]]

            snapshots = zfs.snapshots_serialized(**kwargs)

        # FIXME: awful performance with hundreds/thousands of snapshots
        select = options.pop('select', None)
        result = filter_list(snapshots, filters, options)

        if not select or 'retention' in select:
            if isinstance(result, list):
                result = self.middleware.call_sync('zettarepl.annotate_snapshots', result)
            elif isinstance(result, dict):
                result = self.middleware.call_sync('zettarepl.annotate_snapshots', [result])[0]

        if select:
            if isinstance(result, list):
                result = [{k: v for k, v in item.items() if k in select} for item in result]
            elif isinstance(result, dict):
                result = {k: v for k, v in result.items() if k in select}

        return result
示例#16
0
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name':
                entry.name,
                'path':
                entry.path,
                'realpath':
                os.path.realpath(entry.path)
                if etype == 'SYMLINK' else entry.path,
                'type':
                etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size': stat.st_size,
                    'mode': stat.st_mode,
                    'uid': stat.st_uid,
                    'gid': stat.st_gid,
                })
            except FileNotFoundError:
                data.update({
                    'size': None,
                    'mode': None,
                    'uid': None,
                    'gid': None
                })
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})
示例#17
0
    async def query(self, objtype='USERS', filters=None, options=None):
        """
        Query User / Group cache with `query-filters` and `query-options`.

        `objtype`: 'USERS' or 'GROUPS'

        Each directory service, when enabled, will generate a user and group cache using its
        respective 'fill_cache' method (ex: ldap.fill_cache). The cache entry is formatted
        as follows:

        The cache can be refreshed by calliing 'dscache.refresh'. The actual cache fill
        will run in the background (potentially for a long time). The exact duration of the
        fill process depends factors such as number of users and groups, and network
        performance. In environments with a large number of users (over a few thousand),
        administrators may consider disabling caching. In the case of active directory,
        the dscache will continue to be filled using entries from samba's gencache (the end
        result in this case will be that only users and groups actively accessing the share
        will be populated in UI dropdowns). In the case of other directory services, the
        users and groups will simply not appear in query results (UI features).

        """
        ds_enabled = {}
        res = []

        is_name_check = bool(filters and len(filters) == 1
                             and filters[0][0] in ['username', 'groupname'])

        for ds in ['activedirectory', 'ldap', 'nis']:
            ds_enabled.update({
                str(ds):
                True
                if await self.middleware.call(f'{ds}.get_state') != 'DISABLED'
                else False
            })

        res.extend((await self.middleware.call(f'{objtype.lower()[:-1]}.query',
                                               filters, options)))

        for dstype, enabled in ds_enabled.items():
            if enabled:
                """
                Avoid iteration here if possible.  Use keys if single filter "=" and x in x=y is a
                username or groupname.
                """
                if is_name_check and filters[0][1] == '=':
                    cache = (await self.middleware.call(f'{dstype}.get_cache')
                             )[objtype.lower()]
                    name = filters[0][2]
                    return [cache.get(name)] if cache.get(name) else []

                else:
                    res.extend(
                        filter_list(
                            list((await
                                  self.middleware.call(f'{dstype}.get_cache')
                                  )[objtype.lower()].values()), filters,
                            options))

        return res
示例#18
0
 async def query(self, filters, options):
     async with api_client() as (api, context):
         return filter_list([
             d for d in
             (await context['custom_object_api'].list_cluster_custom_object(
                 group=self.GROUP, version=self.VERSION, plural=self.PLURAL)
              )['items']
         ], filters, options)
示例#19
0
    async def validate_certificate_authority(self, verrors, value, question, schema_name, release_data):
        if not value:
            return

        if not filter_list(
            await self.middleware.call('chart.release.certificate_authority_choices'), [['id', '=', value]]
        ):
            verrors.add(schema_name, 'Unable to locate certificate authority.')
示例#20
0
文件: ipmi.py 项目: bmhughes/freenas
 async def query_sel(self, filters, options):
     """
     Query IPMI System Event Log
     """
     return filter_list([
         record._asdict() for record in parse_ipmitool_output(await run(
             'ipmitool', '-c', 'sel', 'elist'))
     ], filters, options)
示例#21
0
 def get_nfs3_clients(self, filters, options):
     """
     Read contents of rmtab. This information may not
     be accurate due to stale entries. This is ultimately
     a limitation of the NFSv3 protocol.
     """
     rmtab = self.get_rmtab()
     return filter_list(rmtab, filters, options)
示例#22
0
    async def query(self, filters, options):
        baseboard_manufacturer = ((await run(
            ["dmidecode", "-s", "baseboard-manufacturer"],
            check=False)).stdout.decode(errors="ignore")).strip()

        failover_hardware = await self.middleware.call("failover.hardware")

        is_gigabyte = baseboard_manufacturer == "GIGABYTE"
        is_m_series = baseboard_manufacturer == "Supermicro" and failover_hardware == "ECHOWARP"

        if not (is_gigabyte or is_m_series):
            return []

        sensors = await self._sensor_list()
        if is_m_series:
            for sensor in sensors:
                ps_match = re.match("(PS[0-9]+) Status", sensor["name"])
                if ps_match:
                    ps = ps_match.group(1)

                    if sensor["value"] == 0:
                        # PMBus (which controls the PSU's status) can not be probed at the same time because it's not a
                        # shared bus.
                        # HA systems show false positive "No presence detected" more often because both controllers are
                        # randomly probing the status of the PSU's at the same time.
                        for i in range(3):
                            self.logger.info("%r Status = 0x0, rereading", ps)
                            await asyncio.sleep(random.uniform(1, 3))

                            found = False
                            for sensor_2 in await self._sensor_list():
                                ps_match_2 = re.match("(PS[0-9]+) Status",
                                                      sensor_2["name"])
                                if ps_match_2:
                                    ps_2 = ps_match_2.group(1)
                                    if ps == ps_2:
                                        if sensor_2["value"] != 0:
                                            sensor.update(sensor_2)
                                            found = True
                                            break
                            if found:
                                break

                    sensor["notes"] = []
                    ps_failures = [
                        (0x2, "Failure detected"),
                        (0x4, "Predictive failure"),
                        (0x8, "Power Supply AC lost"),
                        (0x10, "AC lost or out-of-range"),
                        (0x20, "AC out-of-range, but present"),
                    ]
                    if not (sensor["value"] & 0x1):
                        sensor["notes"].append("No presence detected")
                    for b, title in ps_failures:
                        if sensor["value"] & b:
                            sensor["notes"].append(title)

        return filter_list(sensors, filters, options)
示例#23
0
    def sessions(self, filters, options):
        sessions = []
        global_info = self.middleware.call_sync('iscsi.global.config')
        base_path = '/sys/kernel/scst_tgt/targets/iscsi'
        for target_dir in glob.glob(f'{base_path}/{global_info["basename"]}*'):
            target = target_dir.rsplit('/', 1)[-1]
            for session in os.listdir(os.path.join(target_dir, 'sessions')):
                session_dir = os.path.join(target_dir, 'sessions', session)
                ip_file = glob.glob(f'{session_dir}/*/ip')
                if not ip_file:
                    continue

                # Initiator alias is another name sent by initiator but we are unable to retrieve it in scst
                session_dict = {
                    'initiator': session.rsplit('#', 1)[0],
                    'initiator_alias': None,
                    'target': target,
                    'target_alias': target.rsplit(':', 1)[-1],
                    'header_digest': None,
                    'data_digest': None,
                    'max_data_segment_length': None,
                    'max_receive_data_segment_length': None,
                    'max_xmit_data_segment_length': None,
                    'max_burst_length': None,
                    'first_burst_length': None,
                    'immediate_data': False,
                    'iser': False,
                    'offload': False,  # It is a chelsio NIC driver to offload iscsi, we are not using it so far
                }
                with open(ip_file[0], 'r') as f:
                    session_dict['initiator_addr'] = f.read().strip()
                for k, f, op in (
                    ('header_digest', 'HeaderDigest', None),
                    ('data_digest', 'DataDigest', None),
                    ('max_burst_length', 'MaxBurstLength', lambda i: int(i)),
                    ('max_receive_data_segment_length', 'MaxRecvDataSegmentLength', lambda i: int(i)),
                    ('max_xmit_data_segment_length', 'MaxXmitDataSegmentLength', lambda i: int(i)),
                    ('first_burst_length', 'FirstBurstLength', lambda i: int(i)),
                    ('immediate_data', 'ImmediateData', lambda i: True if i == 'Yes' else False),
                ):
                    f_path = os.path.join(session_dir, f)
                    if os.path.exists(f_path):
                        with open(f_path, 'r') as fd:
                            data = fd.read().strip()
                            if data != 'None':
                                if op:
                                    data = op(data)
                                session_dict[k] = data

                # We get recv/emit data segment length, keeping consistent with freebsd, we can
                # take the maximum of two and show it for max_data_segment_length
                if session_dict['max_xmit_data_segment_length'] and session_dict['max_receive_data_segment_length']:
                    session_dict['max_data_segment_length'] = max(
                        session_dict['max_receive_data_segment_length'], session_dict['max_xmit_data_segment_length']
                    )

                sessions.append(session_dict)
        return filter_list(sessions, filters, options)
示例#24
0
    async def catalog_extend_context(self, rows, extra):
        k8s_dataset = (await
                       self.middleware.call('kubernetes.config'))['dataset']
        catalogs_dir = os.path.join(
            '/mnt', k8s_dataset,
            'catalogs') if k8s_dataset else f'{TMP_IX_APPS_DIR}/catalogs'
        context = {
            'catalogs_dir': catalogs_dir,
            'extra': extra or {},
            'catalogs_context': {},
        }
        if extra.get('item_details'):
            item_sync_params = await self.middleware.call(
                'catalog.sync_items_params')
            item_jobs = await self.middleware.call(
                'core.get_jobs',
                [['method', '=', 'catalog.items'], ['state', '=', 'RUNNING']])
            for row in rows:
                label = row['label']
                catalog_info = {
                    'item_job':
                    await self.middleware.call(
                        'catalog.items', label, {
                            'cache':
                            True,
                            'cache_only':
                            await self.official_catalog_label() !=
                            row['label'],
                            'retrieve_all_trains':
                            extra.get('retrieve_all_trains', True),
                            'trains':
                            extra.get('trains', []),
                        }),
                    'cached':
                    label == OFFICIAL_LABEL or await self.middleware.call(
                        'catalog.cached', label, False) or await
                    self.middleware.call('catalog.cached', label, True),
                    'normalized_progress':
                    None,
                }
                if not catalog_info['cached']:
                    caching_job = filter_list(
                        item_jobs,
                        [['arguments', '=', [row['label'], item_sync_params]]])
                    if not caching_job:
                        caching_job_obj = await self.middleware.call(
                            'catalog.items', label, item_sync_params)
                        caching_job = caching_job_obj.__encode__()
                    else:
                        caching_job = caching_job[0]

                    catalog_info['normalized_progress'] = {
                        'caching_job': caching_job,
                        'caching_progress': caching_job['progress'],
                    }
                context['catalogs_context'][label] = catalog_info

        return context
示例#25
0
 def query(self, filters=None, options=None):
     """
     Query all Boot Environments with `query-filters` and `query-options`.
     """
     results = []
     for clone in Update.ListClones():
         clone['id'] = clone['name']
         results.append(clone)
     return filter_list(results, filters, options)
示例#26
0
    async def query(self, filters, options):
        async with api_client() as (api, context):
            replica_sets = [
                d.to_dict()
                for d in (await context['apps_api'].
                          list_replica_set_for_all_namespaces()).items
            ]

        return filter_list(replica_sets, filters, options)
示例#27
0
    async def query(self, filters, options):
        vols = []
        if await self.middleware.call('service.started', 'glusterd'):
            method = volume.status_detail
            options = {'kwargs': {'group_subvols': True}}
            vols = await self.middleware.call('gluster.method.run', method, options['kwargs'])
            vols = list(map(lambda i: dict(i, id=i['name']), vols))

        return filter_list(vols, filters, options)
示例#28
0
 def query(self, filters=None, options=None):
     """
     Query all Boot Environments with `query-filters` and `query-options`.
     """
     results = []
     for clone in Update.ListClones():
         clone['id'] = clone['name']
         results.append(clone)
     return filter_list(results, filters, options)
示例#29
0
 def query(self, filters, options):
     # Otimization for cases in which they can be filtered at zfs.dataset.query
     zfsfilters = []
     for f in filters:
         if len(f) == 3:
             if f[0] in ('id', 'name', 'pool', 'type'):
                 zfsfilters.append(f)
     datasets = self.middleware.call_sync('zfs.dataset.query', zfsfilters, None)
     return filter_list(self.__transform(datasets), filters, options)
示例#30
0
 def get_disk_description(self, name):
     disk_desc = ''
     try:
         disk_desc = filter_list(get_disks(), [('name', '=', name)], {'get': True})['description']
     except BaseException as error:
         # it would be lame to fail just coz we could not get disk description
         # but lets log it
         log.debug(f'Failed to get disk description of disk: {name}', exc_info=True)
     return f'Disk description: {disk_desc}' if disk_desc else ''
示例#31
0
 async def query(self, filters, options):
     options = options or {}
     label_selector = options.get('extra', {}).get('label_selector')
     kwargs = {k: v for k, v in [('label_selector', label_selector)] if v}
     async with api_client() as (api, context):
         return filter_list(
             [d.to_dict() for d in (await context['core_api'].list_secret_for_all_namespaces(**kwargs)).items],
             filters, options
         )
示例#32
0
文件: rrd.py 项目: binzyw/freenas
 def get_disk_description(self, name):
     disk_desc = ''
     try:
         disk_desc = filter_list(get_disks(), [('name', '=', name)], {'get': True})['description']
     except BaseException as error:
         # it would be lame to fail just coz we could not get disk description
         # but lets log it
         log.debug(f'Failed to get disk description of disk: {name}', exc_info=True)
     return f'Disk description: {disk_desc}' if disk_desc else ''
示例#33
0
文件: pool.py 项目: razzfazz/freenas
 def query(self, filters, options):
     # Otimization for cases in which they can be filtered at zfs.dataset.query
     zfsfilters = []
     for f in filters:
         if len(f) == 3:
             if f[0] in ('id', 'name', 'pool', 'type'):
                 zfsfilters.append(f)
     datasets = self.middleware.call_sync('zfs.dataset.query', zfsfilters, None)
     return filter_list(self.__transform(datasets), filters, options)
示例#34
0
    def generate_failover_data(self):

        # only care about name, guid, and status
        volumes = self.run_call('pool.query', [],
                                {'select': ['name', 'guid', 'status']})

        # get list of all services on system
        # we query db directly since on SCALE calling `service.query`
        # actually builds a list of all services and includes if they're
        # running or not. Probing all services on the system to see if
        # they're running takes longer than what we need since failover
        # needs to be as fast as possible.
        services = self.run_call('datastore.query', 'services_services')

        failovercfg = self.run_call('failover.config')
        interfaces = self.run_call('interface.query')
        internal_ints = self.run_call('failover.internal_interfaces')

        data = {
            'services':
            services,
            'disabled':
            failovercfg['disabled'],
            'master':
            failovercfg['master'],
            'timeout':
            failovercfg['timeout'],
            'groups':
            defaultdict(list),
            'volumes':
            volumes,
            'non_crit_interfaces': [
                i['id'] for i in filter_list(interfaces, [
                    ('failover_critical', '!=', True),
                ])
            ],
            'internal_interfaces':
            internal_ints,
        }

        for i in filter_list(interfaces, [('failover_critical', '=', True)]):
            data['groups'][i['failover_group']].append(i['id'])

        return data
示例#35
0
    def query(self, filters, options):
        # logic is as follows:
        #   1. if ctdb daemon is started
        #       ctdb just reads the /etc public ip file and loads
        #       the ips written there into the cluster. However,
        #       if a public ip is added/removed, it doesn't
        #       mean the ctdb cluster has been reloaded to
        #       see the changes in the file. So return what
        #       the daemon sees.
        #   2. if the ctdb shared volume is mounted and the /etc/ public
        #       ip file exists and is a symlink and the symlink is
        #       pointed to the /cluster public ip file then read it and
        #       return the contents
        ips = []
        if self.middleware.call_sync('service.started', 'ctdb'):
            ips = self.middleware.call_sync('ctdb.general.ips')
            ips = list(map(lambda i: dict(i, id=i['pnn']), ips))
        else:
            try:
                shared_vol = Path(CTDBConfig.CTDB_LOCAL_MOUNT.value)
                mounted = shared_vol.is_mount()
            except Exception:
                # can happen when mounted but glusterd service
                # is stopped/crashed etc
                mounted = False

            if mounted:
                pub_ip_file = Path(CTDBConfig.GM_PUB_IP_FILE.value)
                etc_ip_file = Path(CTDBConfig.ETC_PUB_IP_FILE.value)
                if pub_ip_file.exists():
                    if etc_ip_file.is_symlink() and etc_ip_file.resolve() == pub_ip_file:
                        with open(pub_ip_file) as f:
                            for idx, i in enumerate(f.read().splitlines()):
                                # we build a list of dicts that match what the
                                # ctdb daemon returns if it's running to keep
                                # things consistent
                                if not i.startswith('#'):
                                    enabled = True
                                    public_ip = i.split('/')[0]
                                else:
                                    enabled = False
                                    public_ip = i.split('#')[1]

                                ips.append({
                                    'id': idx,
                                    'pnn': idx,
                                    'enabled': enabled,
                                    'public_ip': public_ip,
                                    'interfaces': [{
                                        'name': i.split()[-1],
                                        'active': False,
                                        'available': False,
                                    }]
                                })

        return filter_list(ips, filters, options)
示例#36
0
    def get_attrs_to_skip(self, data):
        skip_attrs = defaultdict(set)
        check_data = self.get_defaults(data, {}, ValidationErrors(), False) if not self.update else data
        for attr, attr_data in filter(
            lambda k: not filter_list([check_data], k[1]['filters']), self.conditional_defaults.items()
        ):
            for k in attr_data['attrs']:
                skip_attrs[k].update({attr})

        return skip_attrs
示例#37
0
文件: zfs.py 项目: razzfazz/freenas
 def query(self, filters, options):
     with libzfs.ZFS() as zfs:
         # Handle `id` filter specially to avoiding getting all pool
         if filters and len(filters) == 1 and list(filters[0][:2]) == ['id', '=']:
             try:
                 pools = [zfs.get(filters[0][2]).__getstate__()]
             except libzfs.ZFSException:
                 pools = []
         else:
             pools = [i.__getstate__() for i in zfs.pools]
     return filter_list(pools, filters, options)
示例#38
0
文件: zfs.py 项目: freenas/freenas
 def query(self, filters, options):
     # We should not get datasets, there is zfs.dataset.query for that
     state_kwargs = {'datasets_recursive': False}
     with libzfs.ZFS() as zfs:
         # Handle `id` filter specially to avoiding getting all pool
         if filters and len(filters) == 1 and list(filters[0][:2]) == ['id', '=']:
             try:
                 pools = [zfs.get(filters[0][2]).__getstate__(**state_kwargs)]
             except libzfs.ZFSException:
                 pools = []
         else:
             pools = [i.__getstate__(**state_kwargs) for i in zfs.pools]
     return filter_list(pools, filters, options)
示例#39
0
    def listdir(self, path, filters=None, options=None):
        """
        Get the contents of a directory.

        Each entry of the list consists of:
          name(str): name of the file
          path(str): absolute path of the entry
          realpath(str): absolute real path of the entry (if SYMLINK)
          type(str): DIRECTORY | FILESYSTEM | SYMLINK | OTHER
          size(int): size of the entry
          mode(int): file mode/permission
          uid(int): user id of entry owner
          gid(int): group id of entry onwer
        """
        if not os.path.exists(path):
            raise CallError(f'Directory {path} does not exist', errno.ENOENT)

        if not os.path.isdir(path):
            raise CallError(f'Path {path} is not a directory', errno.ENOTDIR)

        rv = []
        for entry in os.scandir(path):
            if entry.is_dir():
                etype = 'DIRECTORY'
            elif entry.is_file():
                etype = 'FILE'
            elif entry.is_symlink():
                etype = 'SYMLINK'
            else:
                etype = 'OTHER'

            data = {
                'name': entry.name,
                'path': entry.path,
                'realpath': os.path.realpath(entry.path) if etype == 'SYMLINK' else entry.path,
                'type': etype,
            }
            try:
                stat = entry.stat()
                data.update({
                    'size': stat.st_size,
                    'mode': stat.st_mode,
                    'uid': stat.st_uid,
                    'gid': stat.st_gid,
                })
            except FileNotFoundError:
                data.update({'size': None, 'mode': None, 'uid': None, 'gid': None})
            rv.append(data)
        return filter_list(rv, filters=filters or [], options=options or {})
示例#40
0
文件: jail.py 项目: razzfazz/freenas
    def query(self, filters=None, options=None):
        options = options or {}
        jail_identifier = None
        jails = []

        if filters and len(filters) == 1 and list(
                filters[0][:2]) == ['host_hostuuid', '=']:
            jail_identifier = filters[0][2]

        recursive = False if jail_identifier == 'default' else True

        try:
            jail_dicts = ioc.IOCage(
                jail=jail_identifier).get('all', recursive=recursive)

            if jail_identifier == 'default':
                jail_dicts['host_hostuuid'] = 'default'
                jails.append(jail_dicts)
            else:
                for jail in jail_dicts:
                    jail = list(jail.values())[0]
                    jail['id'] = jail['host_hostuuid']
                    if jail['dhcp'] == 'on':
                        uuid = jail['host_hostuuid']

                        if jail['state'] == 'up':
                            interface = jail['interfaces'].split(',')[0].split(
                                ':')[0]
                            if interface == 'vnet0':
                                # Inside jails they are epair0b
                                interface = 'epair0b'
                            ip4_cmd = ['jexec', f'ioc-{uuid}', 'ifconfig',
                                       interface, 'inet']
                            out = su.check_output(ip4_cmd)
                            jail['ip4_addr'] = f'{interface}|' \
                                f'{out.splitlines()[2].split()[1].decode()}'
                        else:
                            jail['ip4_address'] = 'DHCP (not running)'
                    jails.append(jail)
        except BaseException:
            # Brandon is working on fixing this generic except, till then I
            # am not going to make the perfect the enemy of the good enough!
            self.logger.debug('iocage failed to fetch jails', exc_info=True)
            pass

        return filter_list(jails, filters, options)
示例#41
0
    async def query(self, filters=None, options=None):
        """
        Query all system services with `query-filters` and `query-options`.
        """
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            done, pending = await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the coroutines.
            In case of error or timeout, provide UNKNOWN state.
            """
            result = None
            try:
                if task in done:
                    result = task.result()
            except Exception:
                pass
            if result is None:
                entry = jobs.get(task)
                self.logger.warn('Failed to get status for %s', entry['service'])
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)
示例#42
0
    async def query(self, filters=None, options=None):
        if options is None:
            options = {}
        options['prefix'] = 'srv_'

        services = await self.middleware.call('datastore.query', 'services.services', filters, options)

        # In case a single service has been requested
        if not isinstance(services, list):
            services = [services]

        jobs = {
            asyncio.ensure_future(self._get_status(entry)): entry
            for entry in services
        }
        if jobs:
            await asyncio.wait(list(jobs.keys()), timeout=15)

        def result(task):
            """
            Method to handle results of the greenlets.
            In case a greenlet has timed out, provide UNKNOWN state
            """
            try:
                result = task.result()
            except Exception:
                result = None
                self.logger.warn('Failed to get status', exc_info=True)
            if result is None:
                entry = jobs.get(task)
                entry['state'] = 'UNKNOWN'
                entry['pids'] = []
                return entry
            else:
                return result

        services = list(map(result, jobs))
        return filter_list(services, filters, options)
示例#43
0
文件: ipmi.py 项目: binzyw/freenas
    async def query(self, filters=None, options=None):
        result = []
        for channel in await self.channels():
            try:
                cp = await run('ipmitool', 'lan', 'print', str(channel))
            except subprocess.CalledProcessError as e:
                raise CallError(f'Failed to get details from channel {channel}: {e}')

            output = cp.stdout.decode()
            data = {'channel': channel, 'id': channel}
            for line in output.split('\n'):
                if ':' not in line:
                    continue

                name, value = line.split(':', 1)
                if not name:
                    continue

                name = name.strip()
                value = value.strip()

                if name == 'IP Address':
                    data['ipaddress'] = value
                elif name == 'Subnet Mask':
                    data['netmask'] = value
                elif name == 'Default Gateway IP':
                    data['gateway'] = value
                elif name == '802.1q VLAN ID':
                    if value == 'Disabled':
                        data['vlan'] = None
                    else:
                        data['vlan'] = value
                elif name == 'IP Address Source':
                    data['dhcp'] = False if value == 'Static Address' else True
            result.append(data)
        return filter_list(result, filters, options)
示例#44
0
def test__filter_list_regex_contains():
    assert len(filter_list(DATA, [['foo', '~', '.*foo.*']])) == 3
示例#45
0
def test__filter_list_gte():
    assert len(filter_list(DATA, [['number', '>=', 1]])) == 3
示例#46
0
def test__filter_list_lte():
    assert len(filter_list(DATA, [['number', '<=', 3]])) == 3
示例#47
0
def test__filter_list_nin():
    assert len(filter_list(DATA, [['number', 'nin', [1, 3]]])) == 1
示例#48
0
def test__filter_list_rnin():
    assert len(filter_list(DATA, [['list', 'rnin', 1]])) == 2
示例#49
0
def test__filter_list_OR_eq2():
    assert len(filter_list(DATA, [['OR', [
        ['number', '=', 1],
        ['number', '=', 2],
    ]]])) == 2
示例#50
0
def test__filter_list_regex_begins():
    assert len(filter_list(DATA, [['foo', '~', '^foo']])) == 2
示例#51
0
 async def query(self, filters, options):
     data = []
     resolvconf = (await run('resolvconf', '-l')).stdout.decode()
     for nameserver in RE_NAMESERVER.findall(resolvconf):
         data.append({'nameserver': nameserver})
     return filter_list(data, filters, options)
示例#52
0
 def system_routes(self, filters, options):
     """
     Get current/applied network routes.
     """
     rtable = netif.RoutingTable()
     return filter_list([r.__getstate__() for r in rtable.routes], filters, options)
示例#53
0
 def query(self, filters=None, options=None):
     results = []
     for clone in Update.ListClones():
         clone['id'] = clone['name']
         results.append(clone)
     return filter_list(results, filters, options)
示例#54
0
def test__filter_list_ends():
    assert len(filter_list(DATA, [['foo', '$', '_']])) == 1
示例#55
0
def test__filter_list_starts():
    assert len(filter_list(DATA, [['foo', '^', 'foo']])) == 2
示例#56
0
 def query(filters=None, options=None):
     return filter_list(l, filters, options)
示例#57
0
文件: zfs.py 项目: razzfazz/freenas
 def query(self, filters, options):
     with libzfs.ZFS() as zfs:
         snapshots = [i.__getstate__() for i in list(zfs.snapshots)]
     # FIXME: awful performance with hundreds/thousands of snapshots
     return filter_list(snapshots, filters, options)
示例#58
0
 def graphs(self, filters, options):
     return filter_list([i.__getstate__() for i in self.__rrds.values()], filters, options)
示例#59
0
 def get_jobs(self, filters=None, options=None):
     """Get the long running jobs."""
     jobs = filter_list([
         i.__encode__() for i in self.middleware.get_jobs().all().values()
     ], filters, options)
     return jobs