Exemple #1
0
 def get_default_routes(self):
     routes = self.query_routes()
     default_ipv4 = first_or_default(lambda r: r.netmask == ipaddress.ip_address('0.0.0.0'), routes)
     default_ipv6 = first_or_default(lambda r: r.netmask == ipaddress.ip_address('::'), routes)
     return {
         'ipv4': default_ipv4.gateway if default_ipv4 else None,
         'ipv6': default_ipv6.gateway if default_ipv6 else None
     }
Exemple #2
0
    def get_task(self, id):
        self.distribution_lock.acquire()
        t = first_or_default(lambda x: x.id == id, self.task_list)
        if not t:
            t = first_or_default(lambda x: x.id == id, self.task_queue.queue)

        self.distribution_lock.release()
        return t
Exemple #3
0
 def get_default_routes(self):
     routes = self.query_routes()
     default_ipv4 = first_or_default(lambda r: r['netmask'] == '0.0.0.0', routes)
     default_ipv6 = first_or_default(lambda r: r['netmask'] == '::', routes)
     return {
         'ipv4': default_ipv4['gateway'] if default_ipv4 else None,
         'ipv6': default_ipv6['gateway'] if default_ipv6 else None
     }
Exemple #4
0
    def run(self, id, dataset=None, snapshot=None):
        backup = self.datastore.get_by_id('backup', id)
        if not backup:
            raise TaskException(errno.ENOENT, 'Backup {0} not found'.format(backup['id']))

        manifest = self.run_subtask_sync('backup.query', id)
        if not manifest:
            raise TaskException(errno.ENOENT, 'No valid backup found in specified location')

        if not dataset:
            dataset = manifest['dataset']

        created_datasets = []
        snapshots = manifest['snapshots']
        unique_datasets = list(set(map(lambda s: s['name'].split('@')[0], snapshots)))
        unique_datasets.sort(key=lambda d: d.count('/'))
        provider = backup['provider']

        total = len(snapshots)
        done = 0

        for i in unique_datasets:
            snaps = list(filter(lambda s: s['name'].split('@')[0] == i, snapshots))
            snap = first_or_default(lambda s: not s['incremental'], snaps)
            local_dataset = i.replace(manifest['dataset'], dataset, 1)

            while True:
                self.set_progress(done / total * 100, 'Receiving {0} into {1}'.format(snap['name'], local_dataset))

                if local_dataset != dataset and local_dataset not in created_datasets:
                    self.run_subtask_sync(
                        'zfs.create_dataset', local_dataset, 'FILESYSTEM'
                    )

                    created_datasets.append(local_dataset)

                rfd, wfd = os.pipe()
                self.join_subtasks(
                    self.run_subtask(
                        'backup.{0}.get'.format(provider),
                        backup['properties'],
                        snap['filename'],
                        FileDescriptor(wfd)
                    ),
                    self.run_subtask('zfs.receive', local_dataset, FileDescriptor(rfd), True)
                )

                if snap['name'] == snapshot:
                    break

                snap = first_or_default(lambda s: '{0}@{1}'.format(i, s['anchor']) == snap['name'], snaps)
                if not snap:
                    break

                done += 1
Exemple #5
0
    def get(self, name_or_id):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        return job.__getstate__()
Exemple #6
0
    def send_signal(self, name_or_id, signo):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        job.send_signal(signo)
Exemple #7
0
    def run(self, context, args, kwargs, opargs):
        if not args and not kwargs:
            raise CommandException(_("Command invalidate_token requires more arguments."))
        if len(args) > 1:
            raise CommandException(_("Wrong syntax for invalidate_token."))

        if len(args) == 1 and not kwargs.get('token'):
            kwargs['token'] = args.pop(0)

        if 'token' not in kwargs:
            raise CommandException(_('Please specify a valid token'))
        else:
            token = str(kwargs.pop('token'))

        try:
            if token.index('*') < 2:
                raise CommandException(_('You have to enter at least first two digits of a valid token.'))
        except ValueError:
            pass

        match = first_or_default(
            lambda c: c['code'][:2] == token[:2],
            context.call_sync('peer.freenas.get_auth_codes')
        )
        if not match:
            return Sequence(
                'No matching code found. You might have entered wrong token, or it has already expired.'
            )
        else:
            if match['code'] == token:
                token = match

            context.call_sync('peer.freenas.invalidate_code', token)
Exemple #8
0
    def configure_directory(self, id):
        ds_d = self.context.datastore.get_by_id('directories', id)
        directory = first_or_default(lambda d: d.id == id, self.context.directories)
        if not directory:
            try:
                directory = Directory(self.context, ds_d)
                self.context.directories.append(directory)
            except BaseException as err:
                raise RpcException(errno.ENXIO, str(err))

        if not ds_d:
            # Directory was removed
            directory.enabled = False
            directory.configure()
            self.context.directories.remove(directory)
            return

        if ds_d['enabled'] and not directory.enabled:
            self.logger.info('Enabling directory {0}'.format(id))

        if not ds_d['enabled'] and directory.enabled:
            self.logger.info('Disabling directory {0}'.format(id))

        if ds_d['uid_range']:
            directory.min_uid, directory.max_uid = ds_d['uid_range']

        if ds_d['gid_range']:
            directory.min_gid, directory.max_gid = ds_d['gid_range']

        directory.enabled = ds_d['enabled']
        directory.parameters = ds_d['parameters']
        directory.configure()
def auth_with_code(code):
    code_data = first_or_default(lambda c: c['code'] == code, auth_codes)
    if code_data:
        invalidate_code(code)
        return True
    else:
        return False
Exemple #10
0
    def run(self, context, args, kwargs, opargs):
        if not args and not kwargs:
            raise CommandException(_("Command invalidate_token requires more arguments."))
        if len(args) > 1:
            raise CommandException(_("Wrong syntax for invalidate_token."))

        if len(args) == 1 and not kwargs.get('token'):
            kwargs['token'] = args.pop(0)

        if 'token' not in kwargs:
            raise CommandException(_('Please specify a valid token'))
        else:
            token = str(kwargs.pop('token'))

        try:
            if token.index('*') < 2:
                raise CommandException(_('You have to enter at least first two digits of a valid token.'))
        except ValueError:
            pass

        match = first_or_default(
            lambda c: c['code'][:2] == token[:2],
            context.call_sync('peer.freenas.get_auth_codes')
        )
        if not match:
            return Sequence(
                'No matching code found. You might have entered wrong token, or it has already expired.'
            )
        else:
            if match['code'] == token:
                token = match

            context.call_sync('peer.freenas.invalidate_code', token)
def invalidate_code(code):
    code_data = first_or_default(lambda c: c['code'] == code, auth_codes)
    if code_data:
        try:
            auth_codes.remove(code_data)
        except ValueError:
            pass
Exemple #12
0
    def send_signal(self, name_or_id, signo):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        job.send_signal(signo)
Exemple #13
0
    def run(self, context, args, kwargs, opargs):
        if 'vdev' not in kwargs:
            raise CommandException(_("Please specify a vdev to mirror to."))
        vdev_ident = correct_disk_path(kwargs.pop('vdev'))
        if len(args) < 1:
            raise CommandException(_("Please specify a disk to add to the vdev."))
        elif len(args) > 1:
            raise CommandException(_("Invalid input: {0}".format(args)))

        disk = correct_disk_path(args[0])
        if disk not in context.call_sync('volume.get_available_disks'):
            raise CommandException(_("Disk {0} is not available".format(disk)))

        vdev = first_or_default(lambda v:
                                v['path'] == vdev_ident or
                                vdev_ident in [i['path'] for i in v['children']],
                                self.parent.entity['topology']['data']
                                )

        if vdev['type'] == 'disk':
            vdev['type'] = 'mirror'
            vdev['children'].append({
                'type': 'disk',
                'path': vdev_ident
                })

        vdev['children'].append({
            'type': 'disk',
            'path': disk
        })

        self.parent.modified = True
        self.parent.save()
Exemple #14
0
    def get_default_interface(self):
        routes = self.query_routes()
        default = first_or_default(lambda r: r['netmask'] == '0.0.0.0', routes)
        if default:
            return default['interface']

        return None
Exemple #15
0
    def get(self, name_or_id):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        return job.__getstate__()
Exemple #16
0
    def get_default_interface(self):
        routes = self.query_routes()
        default = first_or_default(lambda r: r.netmask == ipaddress.ip_address('0.0.0.0'), routes)
        if default:
            return default.interface

        return None
    def oneOf_discriminator(validator, oneOf, instance, schema):
        subschemas = enumerate(oneOf)
        all_errors = []

        if 'discriminator' in schema:
            discriminator = schema['discriminator']
            if discriminator in instance:
                subschema = first_or_default(lambda s: s['$ref'] == instance[discriminator], oneOf)
                if subschema:
                    for err in validator.descend(instance, subschema):
                        yield err

                return

        for index, subschema in subschemas:
            errs = list(validator.descend(instance, subschema, schema_path=index))
            if not errs:
                first_valid = subschema
                break
            all_errors.extend(errs)
        else:
            yield ValidationError(
                "%r is not valid under any of the given schemas" % (instance,),
                context=all_errors,
            )

        more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
        if more_valid:
            more_valid.append(first_valid)
            reprs = ", ".join(repr(schema) for schema in more_valid)
            yield ValidationError(
                "%r is valid under each of %s" % (instance, reprs)
            )
Exemple #18
0
    def verify(self, guid, name=None, properties=None):
        zfs = libzfs.ZFS()
        pool = first_or_default(lambda p: str(p.guid) == guid, zfs.find_import())
        if not pool:
            raise VerifyException(errno.ENOENT, 'Pool with GUID {0} not found'.format(guid))

        return get_disk_names(self.dispatcher, pool)
Exemple #19
0
    def run(self, context, args, kwargs, opargs):
        if 'vdev' not in kwargs:
            raise CommandException(_("Please specify a vdev to mirror to."))
        vdev_ident = correct_disk_path(kwargs.pop('vdev'))
        if len(args) < 1:
            raise CommandException(_("Please specify a disk to add to the vdev."))
        elif len(args) > 1:
            raise CommandException(_("Invalid input: {0}".format(args)))

        disk = correct_disk_path(args[0])
        if disk not in context.call_sync('volume.get_available_disks'):
            raise CommandException(_("Disk {0} is not available".format(disk)))

        vdev = first_or_default(lambda v:
                                v['path'] == vdev_ident or
                                vdev_ident in [i['path'] for i in v['children']],
                                self.parent.entity['topology']['data']
                                )

        if vdev['type'] == 'disk':
            vdev['type'] = 'mirror'
            vdev['children'].append({
                'type': 'disk',
                'path': vdev_ident
                })

        vdev['children'].append({
            'type': 'disk',
            'path': disk
        })

        self.parent.modified = True
        self.parent.save()
    def init_nat(self):
        default_if = self.client.call_sync('networkd.configuration.get_default_interface')
        if not default_if:
            self.logger.warning('No default route interface; not configuring NAT')
            return

        p = pf.PF()

        # Try to find and remove existing NAT rules for the same subnet
        oldrule = first_or_default(
            lambda r: r.src.address.address == MGMT_ADDR.network.network_address,
            p.get_rules('nat')
        )

        if oldrule:
            p.delete_rule('nat', oldrule.index)

        rule = pf.Rule()
        rule.src.address.address = MGMT_ADDR.network.network_address
        rule.src.address.netmask = MGMT_ADDR.netmask
        rule.action = pf.RuleAction.NAT
        rule.af = socket.AF_INET
        rule.ifname = default_if
        rule.redirect_pool.append(pf.Address(ifname=default_if))
        rule.proxy_ports = [50001, 65535]
        p.append_rule('nat', rule)

        try:
            p.enable()
        except OSError as err:
            if err.errno != errno.EEXIST:
                raise err
Exemple #21
0
    def configure_directory(self, id):
        ds_d = self.context.datastore.get_by_id('directories', id)
        directory = first_or_default(lambda d: d.id == id, self.context.directories)
        if not directory:
            try:
                directory = Directory(self.context, ds_d)
                self.context.directories.append(directory)
            except BaseException as err:
                raise RpcException(errno.ENXIO, str(err))

        if not ds_d:
            # Directory was removed
            directory.enabled = False
            directory.configure()
            self.context.directories.remove(directory)
            return

        if ds_d['enabled'] and not directory.enabled:
            self.logger.info('Enabling directory {0}'.format(id))

        if not ds_d['enabled'] and directory.enabled:
            self.logger.info('Disabling directory {0}'.format(id))

        if ds_d['uid_range']:
            directory.min_uid, directory.max_uid = ds_d['uid_range']

        if ds_d['gid_range']:
            directory.min_gid, directory.max_gid = ds_d['gid_range']

        directory.enabled = ds_d['enabled']
        directory.parameters = ds_d['parameters']
        directory.configure()
def invalidate_code(code):
    code_data = first_or_default(lambda c: c['code'] == code, auth_codes)
    if code_data:
        try:
            auth_codes.remove(code_data)
        except ValueError:
            pass
def auth_with_code(code):
    code_data = first_or_default(lambda c: c['code'] == code, auth_codes)
    if code_data:
        invalidate_code(code)
        return True
    else:
        return False
Exemple #24
0
        def extend(obj):
            ses = first_or_default(lambda s: s.config.target == obj['name'], sessions)
            obj['status'] = {
                'connected': ses.connected if ses else False,
                'status': ses.reason if ses else 'Unknown'
            }

            return obj
Exemple #25
0
 def save(self, this, new=False):
     if new:
         self.parent.entity['extents'].append(this.entity)
     else:
         entity = first_or_default(lambda a: a['name'] == this.entity['name'], self.parent.entity['extents'])
         entity.update(this.entity)
         
     self.parent.save()
Exemple #26
0
 def run(self, guid, name=None, properties=None):
     zfs = libzfs.ZFS()
     opts = properties or {}
     try:
         pool = first_or_default(lambda p: str(p.guid) == guid, zfs.find_import())
         zfs.import_pool(pool, name, opts)
     except libzfs.ZFSException as err:
         raise TaskException(errno.EFAULT, str(err))
        def extend(obj):
            ses = first_or_default(lambda s: s.config.target == obj['name'], sessions)
            obj['status'] = {
                'connected': ses.connected if ses else False,
                'status': ses.reason if ses else 'Unknown'
            }

            return obj
Exemple #28
0
    def get_status(self, id):
        directory = first_or_default(lambda d: d.id == id, self.context.directories)
        if not directory:
            raise RpcException(errno.ENOENT, 'Directory {0} not found'.format(id))

        return {

        }
Exemple #29
0
    def get_connected_clients(self, blah=None):
        result = []
        shares = self.dispatcher.call_sync('share.query', [('type', '=', 'afp')])
        for proc in bsd.getprocs(bsd.ProcessLookupPredicate.PROC):
            if proc.command != 'afpd':
                continue

            def test_descriptor(d):
                if d.type != bsd.DescriptorType.SOCKET:
                    return False

                if not d.local_address:
                    return False

                return d.local_address[1] == 548

            cnid_pid = None
            path = proc.cwd
            share = first_or_default(lambda s: s['filesystem_path'] == path, shares)
            sock = first_or_default(test_descriptor, proc.files)
            if not share or not sock:
                continue

            # Look up the cnid_dbd process too
            for p in bsd.getprocs(bsd.ProcessLookupPredicate.PROC):
                if p.command == 'cnid_dbd' and p.cwd == os.path.join(path, '.AppleDB'):
                    cnid_pid = p.pid

            try:
                u = pwd.getpwuid(proc.uid)
                user = u.pw_name
            except KeyError:
                user = str(proc.uid)

            result.append({
                'host': str(sock.peer_address[0]),
                'share': share['name'],
                'user': user,
                'connected_at': proc.started_at,
                'extra': {
                    'pid': proc.pid,
                    'cnid_dbd_pid': cnid_pid
                }
            })

        return result
    def run(self, task):
        self.result = AsyncResult()
        self.task = task
        self.task.set_state(TaskState.EXECUTING)

        filename = None
        module_name = inspect.getmodule(task.clazz).__name__
        for dir in self.balancer.dispatcher.plugin_dirs:
            found = False
            try:
                for root, _, files in os.walk(dir):
                    file = first_or_default(lambda f: module_name in f, files)
                    if file:
                        filename = os.path.join(root, file)
                        found = True
                        break

                if found:
                    break
            except FileNotFoundError:
                continue

        self.conn.call_client_sync('taskproxy.run', {
            'id': task.id,
            'class': task.clazz.__name__,
            'filename': filename,
            'args': task.args,
            'debugger': task.debugger,
            'environment': task.environment
        })

        try:
            self.result.get()
        except BaseException as e:
            if not isinstance(e, TaskException):
                self.balancer.dispatcher.report_error(
                    'Task {0} raised exception other than TaskException'.format(self.task.name),
                    e
                )

            if isinstance(e, TaskAbortException):
                self.task.set_state(TaskState.ABORTED, TaskStatus(0, 'aborted'))
            else:
                self.task.error = serialize_error(e)
                self.task.set_state(TaskState.FAILED, TaskStatus(0, str(e), extra={
                    "stacktrace": traceback.format_exc()
                }))

            self.task.ended.set()
            self.balancer.task_exited(self.task)
            self.state = WorkerState.IDLE
            return

        self.task.result = self.result.value
        self.task.set_state(TaskState.FINISHED, TaskStatus(100, ''))
        self.task.ended.set()
        self.balancer.task_exited(self.task)
        self.state = WorkerState.IDLE
Exemple #31
0
    def wait(self, name_or_id, states):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        with job.cv:
            job.cv.wait_for(lambda: job.state in states)
            return job.state
Exemple #32
0
    def wait(self, name_or_id, states):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        with job.cv:
            job.cv.wait_for(lambda: job.state in states)
            return job.state
Exemple #33
0
    def stop(self, name_or_id, wait=False):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        job.stop()
        if wait:
            self.wait(name_or_id, (JobState.STOPPED, JobState.ERROR))
Exemple #34
0
    def kick_session(self, session_id, sender):
        session = first_or_default(lambda s: s.session_id == session_id,
                                   self.dispatcher.ws_server.connections)

        if not session:
            raise RpcException(errno.ENOENT,
                               'Session {0} not found'.format(session_id))

        session.logout('Kicked out by {0}'.format(sender.user.name))
Exemple #35
0
    def stop(self, name_or_id, wait=False):
        with self.context.lock:
            job = first_or_default(lambda j: j.label == name_or_id or j.id == name_or_id, self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT, 'Job {0} not found'.format(name_or_id))

        job.stop()
        if wait:
            self.wait(name_or_id, (JobState.STOPPED, JobState.ERROR))
Exemple #36
0
    def kick_session(self, session_id, sender):
        session = first_or_default(
            lambda s: s.session_id == session_id,
            self.dispatcher.ws_server.connections)

        if not session:
            raise RpcException(errno.ENOENT, 'Session {0} not found'.format(session_id))

        session.logout('Kicked out by {0}'.format(sender.user.name))
Exemple #37
0
    def get_status(self, id):
        directory = first_or_default(lambda d: d.id == id, self.context.directories)
        if not directory:
            raise RpcException(errno.ENOENT, 'Directory {0} not found'.format(id))

        return {
            'state': directory.state.name,
            'status_code': directory.status_code,
            'status_message': directory.status_message
        }
Exemple #38
0
    def save(self, this, new=False):
        if new:
            self.parent.entity['extents'].append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a['number'] == this.entity['number'],
                self.parent.entity['extents'])
            entity.update(this.entity)

        return self.parent.save()
Exemple #39
0
    def save(self, this, new=False):
        if new:
            if self.parent.entity['users'] is None:
                 self.parent.entity['users'] = []
            self.parent.entity['users'].append(this.entity)
        else:
            entity = first_or_default(lambda a: a['name'] == this.entity['name'], self.parent.entity['users'])
            entity.update(this.entity)

        return self.parent.save()
Exemple #40
0
def get_methods(client, method):
    service, method_name = method.rsplit('.', 1)
    methods = first_or_default(
        lambda m: m['name'] == method_name,
        client.call_sync('discovery.get_methods', service)
    )
    if not methods:
        raise ValueError('Method {0} does not exist'.format(method))

    return methods
Exemple #41
0
    def release(self, identifier, sender):
        if identifier not in transactions:
            raise RpcException(errno.ENOENT, 'Transaction not found')

        t = transactions[identifier]
        s = first_or_default(lambda s: s.sid == sender.session_id, t)

        if not s:
            raise RpcException(errno.EINVAL, 'Transaction is not held by current session')

        t.purge(s)
Exemple #42
0
    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories
            )

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local', self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories
            )
Exemple #43
0
    def run(self, disk):
        boot_pool_name = self.configstore.get('system.boot_pool_name')
        pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool')
        vdev = first_or_default(
            lambda v: os.path.join('/dev', disk + 'p2') == v['path'],
            q.get(pool, 'groups.data.0.children')
        )
        if not vdev:
            raise TaskException(errno.ENOENT, 'Disk {0} not found in the boot pool'.format(disk))

        self.run_subtask_sync('zfs.pool.detach', boot_pool_name, vdev['guid'])
Exemple #44
0
    def run(self, disk):
        boot_pool_name = self.configstore.get('system.boot_pool_name')
        pool = self.dispatcher.call_sync('zfs.pool.get_boot_pool')
        vdev = first_or_default(
            lambda v: os.path.join('/dev', disk + 'p2') == v['path'],
            q.get(pool, 'groups.data.0.children')
        )
        if not vdev:
            raise TaskException(errno.ENOENT, 'Disk {0} not found in the boot pool'.format(disk))

        self.run_subtask_sync('zfs.pool.detach', boot_pool_name, vdev['guid'])
    def release(self, identifier, sender):
        if identifier not in transactions:
            raise RpcException(errno.ENOENT, 'Transaction not found')

        t = transactions[identifier]
        s = first_or_default(lambda s: s.sid == sender.session_id, t)

        if not s:
            raise RpcException(errno.EINVAL, 'Transaction is not held by current session')

        t.purge(s)
Exemple #46
0
    def get_directory_for_id(self, uid=None, gid=None):
        if uid is not None:
            if uid == 0:
                # Special case for root user
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_uid and d.max_uid >= uid >= d.min_uid,
                self.directories)

        if gid is not None:
            if gid == 0:
                # Special case for wheel group
                return first_or_default(lambda d: d.plugin_type == 'local',
                                        self.directories)

            return first_or_default(
                lambda d: d.max_gid and d.max_gid >= gid >= d.min_gid,
                self.directories)
Exemple #47
0
    def save(self, this, new=False):
        if new:
            if self.parent.entity['users'] is None:
                self.parent.entity['users'] = []
            self.parent.entity['users'].append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a['name'] == this.entity['name'],
                self.parent.entity['users'])
            entity.update(this.entity)

        return self.parent.save()
Exemple #48
0
    def search_dn(self, dn, attributes=None):
        if self.ldap.closed:
            self.ldap.bind()

        return first_or_default(None, self.ldap.extend.standard.paged_search(
            search_base=dn,
            search_filter='(objectclass=*)',
            search_scope=ldap3.BASE,
            attributes=attributes or ldap3.ALL_ATTRIBUTES,
            paged_size=1,
            generator=False
        ))
Exemple #49
0
    def save(self, this, new=False):
        if new:
            if self.parent.entity[self.parent_path] is None:
                self.parent.entity[self.parent_path] = []

            self.parent.entity[self.parent_path].append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a[self.primary_key_name] == this.entity['name'],
                self.parent.entity[self.parent_path])
            entity.update(this.entity)

        self.parent.save()
Exemple #50
0
    def invalidate_code(self, code):
        minimal_match = lambda k: k['expires_at'] == code['expires_at'] and k[
            'code'].startswith(code['code'][:2])
        code_match = lambda k: str(k['code']).startswith(
            str(code)[:str(code).find('*')])
        m_funct = minimal_match

        if not isinstance(code_match, (str, int)):
            m_funct = code_match

        match = first_or_default(m_funct, auth_codes)
        if match:
            invalidate_code(match['code'])
Exemple #51
0
    def save(self, this, new=False):
        if new:
            if not q.contains(self.parent.entity, self.parent_path):
                q.set(self.parent.entity, self.parent_path, [])

            q.get(self.parent.entity, self.parent_path).append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a[self.primary_key_name] == this.entity['name'],
                q.get(self.parent.entity, self.parent_path))
            entity.update(this.entity)

        return self.parent.save()
Exemple #52
0
        def set_transport_option(obj, type, property, value):
            opt = first_or_default(lambda i: i['name'] == type, obj['args'][2])

            if value:
                if opt:
                    opt[property] = value
                else:
                    obj['args'][2].append({
                        'name': type,
                        property: value
                    })
            else:
                obj['args'][2].remove(opt)

            obj['args'] = copy.copy(obj['args'])
Exemple #53
0
    def get_mapping_by_field(self, field):
        rest = None

        while True:
            ret = first_or_default(lambda p: p.get == field,
                                   self.property_mappings)
            if ret:
                if ret.ns:
                    return ret.ns(self).get_mapping_by_field(rest)
                return ret

            if '.' not in field:
                break

            field, rest = field.rsplit('.', 1)
def apply(obj, ds):
    partners = obj.pop('partners')
    slave_address = first_or_default(lambda o: o != obj['master'], partners)

    slave = ds.get_one('peers', ('type', '=', 'freenas'),
                       ('address', '=', slave_address))
    master = ds.get_one('peers', ('type', '=', 'freenas'),
                        ('address', '=', obj['master']))

    if not master or not slave:
        return None

    obj['master'] = master['id']
    obj['slave'] = slave['id']

    return obj
Exemple #55
0
    def acquire(self, identifier, timeout, sender):
        t = transactions.setdefault(identifier, Transaction(self.dispatcher, identifier))
        s = first_or_default(lambda s: s.sid == sender.session_id, t)

        if s:
            raise RpcException(errno.EBUSY, 'Transaction is already held by current session')

        s = Session(timeout, sender.user.name, sender.session_id)
        t.sessions.append(s)
        gevent.spawn(t.purge, s)

        self.dispatcher.dispatch_event('ui.transaction.acquired', {
            'identifier': identifier,
            'sid': sender.session_id,
            'user': sender.user.name
        })
Exemple #56
0
    def run(self, id):
        target = self.datastore.get_by_id('iscsi_initiator.targets', id)
        if not target:
            raise TaskException(errno.ENOENT, 'Target {0} doesn\'t exist'.format(id))

        self.datastore.delete('iscsi_initiator.targets', id)

        ctx = iscsi.ISCSIInitiator()
        session = first_or_default(lambda s: s.config.target == target['name'], ctx.sessions)
        if session:
            ctx.remove_session(session)

        self.dispatcher.emit_event('disk.iscsi.target.changed', {
            'operation': 'delete',
            'ids': [id]
        })
Exemple #57
0
    def get_entity_commands(self, this):
        commands = {
            'start': StartVMCommand(this),
            'stop': StopVMCommand(this),
            'kill': KillVMCommand(this),
            'reboot': RebootVMCommand(this),
            'console': ConsoleCommand(this),
            'readme': ReadmeCommand(this, 'config'),
            'guest_info': ShowGuestInfoCommand(this)
        }

        if hasattr(this, 'entity') and this.entity is not None:
            if first_or_default(lambda d: d['type'] == 'GRAPHICS', this.entity['devices']):
                commands['console_vga'] = ConsoleVGACommand(this)

        return commands
Exemple #58
0
    def get_by_pid(self, pid, fuzzy=False):
        with self.context.lock:

            def match(j):
                return j.pid == pid

            def fuzzy_match(j):
                if j.parent and j.parent.pid == pid:
                    return True

                return j.pid == pid

            job = first_or_default(fuzzy_match if fuzzy else match,
                                   self.context.jobs.values())
            if not job:
                raise RpcException(errno.ENOENT,
                                   'Job {0} not found'.format(name_or_id))

        return job.__getstate__()
Exemple #59
0
        def set_graphics(o, v):
            if v:
                if get_graphics(o):
                    return

                o['devices'].append({
                    'name': 'framebuffer',
                    'type': 'GRAPHICS',
                    'properties': {
                        'resolution': '1024x768'
                    }
                })
            else:
                if not get_graphics(o):
                    return

                dev = first_or_default(lambda d: d['type'] == 'GRAPHICS',
                                       o['devices'])
                o['devices'].remove(dev)