Esempio n. 1
0
 def __init__(self, context):
     self.context = context
     self.passwd = wrap([])
     self.group = wrap([])
     self.passwd_filename = None
     self.group_filename = None
     self.watch_thread = None
Esempio n. 2
0
    def run(self, context, args, kwargs, opargs):
        ns = SingleItemNamespace(None, self.parent)
        ns.orig_entity = wrap(copy.deepcopy(self.parent.skeleton_entity))
        ns.entity = wrap(copy.deepcopy(self.parent.skeleton_entity))

        if len(args) > 0:
            prop = self.parent.primary_key
            kwargs[prop.name] = args.pop(0)

        for k, v in list(kwargs.items()):
            if not self.parent.has_property(k):
                output_msg('Property {0} not found'.format(k))
                return
            mapping = self.parent.get_mapping(k)
            if mapping.set is None or not mapping.createsetable:
                output_msg('Property {0} is not writable'.format(k))
                return
            if mapping.regex is not None and not re.match(mapping.regex, str(v)):
                output_msg('Invalid input {0} for property {1}.'.format(v, k))
                return

        if self.parent.required_props:
            missing_args = []
            for prop in self.parent.required_props:
                if isinstance(prop, list):
                    has_arg = False
                    for p in prop:
                        if p in kwargs.keys():
                            has_arg = True
                    if not has_arg:
                        missing_args.append("{0}".format(' or '.join(prop)))
                else:
                    if prop not in kwargs.keys():
                        missing_args.append(prop)
            if self.parent.extra_required_props:
                for prop_set in self.parent.extra_required_props:
                    found_one = False
                    missing = False
                    for prop in prop_set:
                        if prop in kwargs.keys():
                            found_one = True
                        else:
                            if found_one:
                                missing = True
                    if found_one and missing:
                        missing_args.append(' and '.join(prop_set))
            if len(missing_args) > 0:
                output_msg('Required properties not met, still missing: {0}'.format(', '.join(missing_args)))
                return
        else:
            if not args and not kwargs:
                return

        for k, v in kwargs.items():
            prop = self.parent.get_mapping(k)
            prop.do_set(ns.entity, v)

        self.parent.save(ns, new=True)
Esempio n. 3
0
    def run(self, context, args, kwargs, opargs):
        if not args and not kwargs:
            raise CommandException(_("Create requires more arguments, see 'help create' for more information"))
        if len(args) > 1:
            raise CommandException(_("Wrong syntax for create, see 'help create' for more information"))

        if len(args) == 1:
            kwargs[self.parent.primary_key.name] = args.pop(0)

        if 'name' not in kwargs:
            raise CommandException(_('Please specify a name for your replication link'))
        else:
            name = kwargs.pop('name')

        master = kwargs.pop('master')
        slave = kwargs.pop('slave')
        partners = [master, slave]

        for ip in context.call_sync('network.config.get_my_ips'):
            if ip in partners:
                break
        else:
            raise CommandException(_(
                'None of provided replication link partners {0}, {1} match any of machine\'s IPs'.format(master, slave)
            ))

        datasets = kwargs.pop('datasets', [])
        if isinstance(datasets, six.string_types):
            datasets = [datasets]
        bidirectional = read_value(kwargs.pop('bidirectional', False), ValueType.BOOLEAN)
        recursive = read_value(kwargs.pop('recursive', False), ValueType.BOOLEAN)
        replicate_services = read_value(kwargs.pop('replicate_services', False), ValueType.BOOLEAN)

        if replicate_services and not bidirectional:
            raise CommandException(_(
                'Replication of services is available only when bi-directional replication is selected'
            ))

        ns = SingleItemNamespace(None, self.parent)
        ns.orig_entity = query.wrap(copy.deepcopy(self.parent.skeleton_entity))
        ns.entity = query.wrap(copy.deepcopy(self.parent.skeleton_entity))

        ns.entity['name'] = name
        ns.entity['master'] = master
        ns.entity['partners'] = partners
        ns.entity['datasets'] = datasets
        ns.entity['bidirectional'] = bidirectional
        ns.entity['recursive'] = recursive
        ns.entity['replicate_services'] = replicate_services

        context.submit_task(
            self.parent.create_task,
            ns.entity,
            callback=lambda s, t: post_save(ns, s, t)
        )
Esempio n. 4
0
    def __load(self):
        try:
            with open(self.passwd_filename, 'r') as f:
                self.passwd = wrap(load(f))
        except (IOError, ValueError) as err:
            logger.warn('Cannot read {0}: {1}'.format(self.passwd_filename, str(err)))

        try:
            with open(self.group_filename, 'r') as f:
                self.group = wrap(load(f))
        except (IOError, ValueError) as err:
            logger.warn('Cannot read {0}: {1}'.format(self.group_filename, str(err)))
Esempio n. 5
0
    def query(self, filter=None, params=None):
        boot_pool = self.configstore.get('system.boot_pool_name')

        def extend(snapshot):
            dataset, _, name = snapshot['name'].partition('@')
            pool = dataset.partition('/')[0]

            if pool == boot_pool:
                return None

            return {
                'id': snapshot['name'],
                'pool': pool,
                'dataset': dataset,
                'name': name,
                'properties': include(
                    snapshot['properties'],
                    'used', 'referenced', 'compressratio', 'clones'
                ),
                'holds': snapshot['holds']
            }

        return wrap(self.dispatcher.call_sync('zfs.snapshot.query')).query(
            *(filter or []),
            callback=extend,
            **(params or {})
        )
Esempio n. 6
0
    def find_media(self):
        result = []

        for disk in wrap(self.dispatcher.call_sync('disks.query', [('path', 'in', self.get_available_disks())])):
            # Try whole disk first
            typ, label = fstyp(disk['path'])
            if typ:
                result.append({
                    'path': disk['path'],
                    'size': disk['mediasize'],
                    'fstype': typ,
                    'label': label or disk['description']
                })
                continue

            for part in disk['status.partitions']:
                path = part['paths'][0]
                typ, label = fstyp(path)
                if typ:
                    result.append({
                        'path': path,
                        'size': part['mediasize'],
                        'fstype': typ,
                        'label': label or disk['description']
                    })

        return result
Esempio n. 7
0
 def query(self, filter=None, params=None):
     try:
         zfs = libzfs.ZFS()
         result = [o.__getstate__(recursive=False) for o in list(zfs.snapshots)]
         return wrap(result).query(*(filter or []), **(params or {}))
     except libzfs.ZFSException as err:
         raise RpcException(errno.EFAULT, str(err))
Esempio n. 8
0
def modes_to_oct(modes):
    modes = wrap(modes)
    result = 0

    if modes['user.read']:
        result &= stat.S_IRUSR

    if modes['user.write']:
        result &= stat.S_IWUSR

    if modes['user.execute']:
        result &= stat.S_IXUSR

    if modes['group.read']:
        result &= stat.S_IRGRP

    if modes['group.write']:
        result &= stat.S_IWGRP

    if modes['group.execute']:
        result &= stat.S_IXGRP

    if modes['others.read']:
        result &= stat.S_IROTH

    if modes['others.write']:
        result &= stat.S_IWOTH

    if modes['others.execute']:
        result &= stat.S_IXOTH

    return result
Esempio n. 9
0
            def select_fn(fn, obj):
                obj = fn(obj) if fn else obj
                obj = wrap(obj)

                if isinstance(select, (list, tuple)):
                    return [obj.get(i) for i in select]

                if isinstance(select, str):
                    return obj.get(select)
Esempio n. 10
0
 def convert_group(self, entry):
     entry = wrap(dict(entry['attributes']))
     return {
         'id': entry['ipaUniqueID.0'],
         'gid': int(entry['gidNumber.0']),
         'name': entry['cn.0'],
         'builtin': False,
         'sudo': False
     }
Esempio n. 11
0
    def getgrent(self, filter=None, params=None):
        logger.debug('getgrent(filter={0}, params={1})'.format(filter, params))
        if not self.__joined():
            return []

        return wrap(self.convert_group(i) for i in self.wbc.query_groups(self.domain_name)).query(
            *(filter or []),
            **(params or {})
        )
    def query(self, filter=None, params=None):
        def extend(obj):
            nr = obj['active']
            obj['active'] = 'N' in nr
            obj['on_reboot'] = 'R' in nr
            obj['id'] = obj.pop('name')
            return obj

        clones = list(map(extend, ListClones()))
        return wrap(clones).query(*(filter or []), **(params or {}))
Esempio n. 13
0
    def on_pool_change(args):
        ids = [i for i in args['ids'] if i != boot_pool['guid']]
        if args['operation'] == 'delete':
            for i in ids:
                logger.info('Volume {0} is going away'.format(i))
                dispatcher.datastore.delete('volumes', i)

        if args['operation'] in ('create', 'update'):
            for i in ids:
                if args['operation'] == 'update' and dispatcher.datastore.exists('volumes', ('id', '=', i)):
                    # Disable for now to avoid confusing clients with multiple "changed" events
                    #
                    # dispatcher.dispatch_event('volumes.changed', {
                    #     'operation': 'update',
                    #     'ids': [i]
                    # })
                    continue

                pool = wrap(dispatcher.call_sync(
                    'zfs.pool.query',
                    [('guid', '=', i)],
                    {'single': True}
                ))

                if not pool:
                    continue

                logger.info('New volume {0} <{1}>'.format(pool['name'], i))
                with dispatcher.get_lock('volumes'):
                    try:
                        dispatcher.datastore.insert('volumes', {
                            'id': i,
                            'name': pool['name'],
                            'type': 'zfs',
                            'attributes': {}
                        })
                    except DuplicateKeyException:
                        # already inserted by task
                        continue

                    # Set correct mountpoint
                    dispatcher.call_task_sync('zfs.configure', pool['name'], pool['name'], {
                        'mountpoint': {'value': os.path.join(VOLUMES_ROOT, pool['name'])}
                    })

                    if pool['properties.altroot.source'] != 'DEFAULT':
                        # Ouch. That pool is created or imported with altroot.
                        # We need to export and reimport it to remove altroot property
                        dispatcher.call_task_sync('zfs.pool.export', pool['name'])
                        dispatcher.call_task_sync('zfs.pool.import', pool['guid'], pool['name'])

                    dispatcher.dispatch_event('volumes.changed', {
                        'operation': 'create',
                        'ids': [i]
                    })
Esempio n. 14
0
    def convert_user(self, entry):
        entry = wrap(dict(entry['attributes']))
        group = None

        if 'gidNumber.0' in entry:
            group = self.search_one(self.group_dn, '(gidNumber={0})'.format(entry['gidNumber.0']))
            group = wrap(dict(group['attributes']))

        return {
            'id': entry['ipaUniqueID.0'],
            'uid': int(entry['uidNumber.0']),
            'builtin': False,
            'username': entry['uid.0'],
            'full_name': entry.get('gecos.0', entry.get('displayName.0', '<unknown>')),
            'shell': entry.get('loginShell.0', '/bin/sh'),
            'home': entry.get('homeDirectory.0', '/nonexistent'),
            'group': group['ipaUniqueID.0'] if group else None,
            'groups': [],
            'sudo': False
        }
Esempio n. 15
0
    def query(self, filter=None, params=None):
        def extend(disk):
            if disk.get('delete_at'):
                disk['online'] = False
            else:
                disk['online'] = self.is_online(disk['path'])
                disk['status'] = diskinfo_cache.get(disk['id'])

            return disk

        return wrap(self.datastore.query('disks', callback=extend)).query(*(filter or []), **(params or {}))
Esempio n. 16
0
    def query(self, filter=None, params=None):
        def extend(i):
            state, pid = get_status(self.dispatcher, i)
            entry = {
                'id': i['id'],
                'name': i['name'],
                'state': state,
            }

            if pid is not None:
                entry['pid'] = pid

            entry['builtin'] = i['builtin']
            return entry

        # Running extend sequentially might take too long due to the number of services
        # and `service ${name} onestatus`. To workaround that run it in parallel using gevent
        result = self.datastore.query('service_definitions')
        if result is None:
            return result
        jobs = {
            gevent.spawn(extend, entry): entry
            for entry in result
        }
        gevent.joinall(list(jobs.keys()), timeout=15)
        group = gevent.pool.Group()

        def result(greenlet):
            if greenlet.value is None:
                entry = jobs.get(greenlet)
                return {
                    'name': entry['name'],
                    'state': 'UNKNOWN',
                    'builtin': entry['builtin'],
                }
            else:
                return greenlet.value

        result = group.map(result, jobs)
        result = list(map(lambda s: extend_dict(s, {'config': wrap(self.get_service_config(s['id']))}), result))
        return wrap(result).query(*(filter or []), **(params or {}))
    def query(self, filter=None, params=None):
        stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'interface')])

        for stat in stats:
            split_name = stat['name'].split('.', 3)
            stat['short_name'] = dash_to_underscore(
                split_name[1] + '-' + split_name[3] + '-' + split_name[2].split('_', 2)[1]
            )

            normalize_values(stat)

        return wrap(stats).query(*(filter or []), **(params or {}))
Esempio n. 18
0
    def generate_serial(self):
        nic = wrap(self.dispatcher.call_sync("network.interfaces.query", [("type", "=", "ETHER")], {"single": True}))
        laddr = nic["status.link_address"].replace(":", "")
        idx = 0

        while True:
            serial = "{0}{1:02}".format(laddr, idx)
            if not self.datastore.exists("shares", ("properties.serial", "=", serial)):
                return serial

            idx += 1

        raise RpcException(errno.EBUSY, "No free serial numbers found")
    def query(self, filter=None, params=None):
        templates_dir = self.dispatcher.call_sync('system_dataset.request_directory', 'vm-templates')
        templates = []
        for root, dirs, files in os.walk(templates_dir):
            if 'template.json' in files:
                with open(os.path.join(root, 'template.json'), encoding='utf-8') as template:
                    try:
                        templates.append(json.loads(template.read()))
                        templates[-1]['template']['path'] = root
                    except ValueError:
                        pass

        return wrap(templates).query(*(filter or []), **(params or {}))
    def query(self, filter=None, params=None):
        stats = self.dispatcher.call_sync('stat.query', [('name', '~', 'cpu')])

        for stat in stats:
            type = stat['name'].split('.', 3)[2]
            if 'aggregation' in stat['name']:
                stat['short_name'] = dash_to_underscore('aggregated-' + type)
            else:
                stat['short_name'] = dash_to_underscore('cpu-' + re.search(r'\d+', stat['name']).group() + '-' + type)

            normalize_values(stat)

        return wrap(stats).query(*(filter or []), **(params or {}))
Esempio n. 21
0
 def extend_dataset(ds):
     ds = wrap(ds)
     return {
         'name': ds['name'],
         'type': ds['type'],
         'mountpoint': ds['mountpoint'],
         'volsize': ds.get('properties.volsize.rawvalue'),
         'properties': include(
             ds['properties'],
             'used', 'available', 'compression', 'atime', 'dedup',
             'quota', 'refquota', 'reservation', 'refreservation',
             'casesensitivity', 'volsize', 'volblocksize',
         ),
         'permissions_type':  ds.get('properties.org\\.freenas:permissions_type.value'),
     }
Esempio n. 22
0
def generate_disk_cache(dispatcher, path):
    diskinfo_cache_lock.acquire()
    geom.scan()
    name = os.path.basename(path)
    gdisk = geom.geom_by_name('DISK', name)
    multipath_info = None

    disk_info = info_from_device(gdisk.name)
    serial = disk_info['serial']
    identifier = device_to_identifier(name, serial)
    ds_disk = dispatcher.datastore.get_by_id('disks', identifier)

    # Path repesents disk device (not multipath device) and has NAA ID attached
    lunid = gdisk.provider.config.get('lunid')
    if lunid:
        # Check if device could be part of multipath configuration
        d = get_disk_by_lunid(lunid)
        if (d and d['path'] != path) or (ds_disk and ds_disk['is_multipath']):
            multipath_info = attach_to_multipath(dispatcher, d, ds_disk, path)

    provider = gdisk.provider
    try:
        camdev = CamDevice(gdisk.name)
    except RuntimeError:
        camdev = None

    disk = wrap({
        'path': path,
        'is_multipath': False,
        'description': provider.config['descr'],
        'serial': serial,
        'lunid': provider.config.get('lunid'),
        'model': disk_info['model'],
        'interface': disk_info['interface'],
        'is_ssd': disk_info['is_ssd'],
        'id': identifier,
        'controller': camdev.__getstate__() if camdev else None,
    })

    if multipath_info:
        disk.update(multipath_info)

    diskinfo_cache.put(identifier, disk)
    update_disk_cache(dispatcher, path)
    dispatcher.call_sync('disks.configure_disk', identifier)

    logger.info('Added <%s> (%s) to disk cache', identifier, disk['description'])
    diskinfo_cache_lock.release()
Esempio n. 23
0
        def make_snapshot_entry(action):
            snapname = '{0}@{1}'.format(action['localfs'], action['snapshot'])
            filename = hashlib.md5(snapname.encode('utf-8')).hexdigest()
            snap = wrap(self.dispatcher.call_sync(
                'volume.snapshot.query',
                [('id', '=', snapname)],
                {'single': True}
            ))

            return {
                'name': snapname,
                'anchor': action.get('anchor'),
                'incremental': action['incremental'],
                'created_at': datetime.fromtimestamp(int(snap['properties.creation.rawvalue'])),
                'uuid': snap.get('properties.org\\.freenas:uuid.value'),
                'filename': filename
            }
Esempio n. 24
0
    def generate_serial(self):
        nic = wrap(self.dispatcher.call_sync('network.interface.query', [('type', '=', 'ETHER')], {'single': True}))
        laddr = nic['status.link_address'].replace(':', '')
        idx = 0

        while True:
            serial = '{0}{1:02}'.format(laddr, idx)
            if self.datastore.exists('shares', ('properties.serial', '=', serial)):
                idx += 1
                continue

            if self.datastore.exists('simulator.disks', ('serial', '=', serial)):
                idx += 1
                continue

            return serial

        raise RpcException(errno.EBUSY, 'No free serial numbers found')
Esempio n. 25
0
    def query(self, filter=None, params=None):
        def serialize(job):
            last_task = None
            current_task = None
            current_progress = None
            schedule = {f.name: f for f in job.trigger.fields}
            schedule['coalesce'] = job.coalesce
            schedule['timezone'] = job.trigger.timezone

            last_run = self.context.datastore.query(
                'schedulerd.runs',
                ('job_id', '=', job.id),
                sort='created_at',
                single=True
            )

            if last_run:
                last_task = self.context.datastore.get_by_id('tasks', last_run['task_id'])

            if job.id in self.context.active_tasks:
                current_task_id = self.context.active_tasks[job.id]
                current_task = self.context.client.call_sync('task.status', current_task_id)
                if 'progress' in current_task:
                    current_progress = current_task['progress']

            return {
                'id': job.id,
                'description': job.name,
                'name': job.args[0],
                'args': job.args[1:],
                'enabled': job.next_run_time is not None,
                'hidden': job.kwargs['hidden'],
                'protected': job.kwargs['protected'],
                'status': {
                    'next_run_time': job.next_run_time,
                    'last_run_time': last_run['created_at'] if last_run else None,
                    'last_run_status': last_task['state'] if last_task else None,
                    'current_run_status': current_task['state'] if current_task else None,
                    'current_run_progress': current_progress
                },
                'schedule': schedule
            }

        return wrap(list(map(serialize, self.context.scheduler.get_jobs()))).query(*(filter or []), **(params or {}))
    def query(self, filter=None, params=None):
        if not self.is_ipmi_loaded():
            raise RpcException(errno.ENXIO, 'The IPMI device could not be found')

        result = []
        for channel in self.channels():
            try:
                out, err = system('/usr/local/bin/ipmitool', 'lan', 'print', str(channel))
            except SubprocessException as e:
                raise RpcException(errno.EFAULT, 'Cannot receive IPMI configuration: {0}'.format(e.err.strip()))

            raw = {k.strip(): v.strip() for k, v in RE_ATTRS.findall(out)}
            ret = {IPMI_ATTR_MAP[k]: v for k, v in list(raw.items()) if k in IPMI_ATTR_MAP}
            ret['id'] = channel
            ret['vlan_id'] = None if ret['vlan_id'] == 'Disabled' else ret['vlan_id']
            ret['dhcp'] = True if ret['dhcp'] == 'DHCP Address' else False
            result.append(ret)

        return wrap(result).query(*(filter or []), **(params or {}))
Esempio n. 27
0
    def run(self, pool_name, path, updated_params):
        ds = wrap(self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', path)], {'single': True}))

        if 'name' in updated_params:
            self.join_subtasks(self.run_subtask('zfs.rename', ds['name'], updated_params['name']))
            ds['name'] = updated_params['name']

        if 'properties' in updated_params:
            props = exclude(updated_params['properties'], 'used', 'available', 'dedup', 'casesensitivity')
            self.join_subtasks(self.run_subtask('zfs.configure', pool_name, ds['name'], props))

        if 'permissions_type' in updated_params:
            oldtyp = ds['properties.org\\.freenas:permissions_type.value']
            typ = updated_params['permissions_type']

            if oldtyp != 'ACL' and typ == 'ACL':
                self.switch_to_acl(pool_name, ds['name'])

            if oldtyp != 'PERMS' and typ == 'PERMS':
                self.switch_to_chmod(pool_name, ds['name'])
Esempio n. 28
0
    def query(self, filter=None, params=None):
        templates_dir = self.dispatcher.call_sync('system_dataset.request_directory', 'container_templates')
        cache_dir = self.dispatcher.call_sync('system_dataset.request_directory', 'container_image_cache')
        templates = []
        for root, dirs, files in os.walk(templates_dir):
            if 'template.json' in files:
                with open(os.path.join(root, 'template.json'), encoding='utf-8') as template_file:
                    try:
                        template = json.loads(template_file.read())
                        readme = get_readme(root)
                        if readme:
                            with open(readme, 'r') as readme_file:
                                template['template']['readme'] = readme_file.read()
                        template['template']['path'] = root
                        template['template']['cached'] = False
                        if os.path.isdir(os.path.join(cache_dir, template['template']['name'])):
                            template['template']['cached'] = True

                        templates.append(template)
                    except ValueError:
                        pass

        return wrap(templates).query(*(filter or []), **(params or {}))
    def query(self, filter=None, params=None):
        stats = self.dispatcher.call_sync(
            'stat.query',
            [
                ['or', [('name', '~', 'load'), ('name', '~', 'processes'), ('name', '~', 'memory'), ('name', '~', 'df')]],
                ['nor', [('name', '~', 'zfs')]]
            ]
        )

        for stat in stats:
            split_name = stat['name'].split('.', 3)
            if 'df' in stat['name']:
                stat['short_name'] = dash_to_underscore(
                    split_name[1].split('-', 1)[1] + '-' + split_name[2].split('-', 1)[1]
                )
            elif 'load' in stat['name']:
                stat['short_name'] = dash_to_underscore(split_name[1] + '-' + split_name[3])
            else:
                stat['short_name'] = dash_to_underscore(split_name[2])

            normalize_values(stat)

        return wrap(stats).query(*(filter or []), **(params or {}))
Esempio n. 30
0
        def extend(vol):
            config = wrap(self.get_config(vol['name']))
            if not config:
                vol['status'] = 'UNKNOWN'
            else:
                topology = config['groups']
                for vdev, _ in iterate_vdevs(topology):
                    try:
                        vdev['path'] = self.dispatcher.call_sync(
                            'disks.partition_to_disk',
                            vdev['path']
                        )
                    except RpcException as err:
                        if err.code == errno.ENOENT:
                            pass

                vol.update({
                    'description': None,
                    'mountpoint': None,
                    'datasets': None,
                    'upgraded': None,
                    'topology': topology,
                    'root_vdev': config['root_vdev'],
                    'status': config['status'],
                    'scan': config['scan'],
                    'properties': config['properties']
                })

                if config['status'] != 'UNAVAIL':
                    vol.update({
                        'description': config.get('root_dataset.properties.org\\.freenas:description.value'),
                        'mountpoint': config['root_dataset.properties.mountpoint.value'],
                        'datasets': list(map(extend_dataset, flatten_datasets(config['root_dataset']))),
                        'upgraded': is_upgraded(config),
                    })

            return vol
Esempio n. 31
0
    def run(self, context, args, kwargs, opargs):
        ns = SingleItemNamespace(None, self.parent)
        ns.orig_entity = wrap(copy.deepcopy(self.parent.skeleton_entity))
        ns.entity = wrap(copy.deepcopy(self.parent.skeleton_entity))
        kwargs = collections.OrderedDict(kwargs)

        if len(args) > 0:
            # Do not allow user to specify name as both implicit and explicit parameter as this suggests a mistake
            if 'name' in kwargs:
                raise CommandException(
                    _("Both implicit and explicit 'name' parameters are specified."
                      ))
            else:
                prop = self.parent.primary_key
                kwargs[prop.name] = args.pop(0)
                kwargs.move_to_end(prop.name, False)

        for k, v in list(kwargs.items()):
            if not self.parent.has_property(k):
                output_msg('Property {0} not found'.format(k))
                return
            mapping = self.parent.get_mapping(k)
            if mapping.set is None or not mapping.createsetable:
                output_msg('Property {0} is not writable'.format(k))
                return
            if mapping.regex is not None and not re.match(
                    mapping.regex, str(v)):
                output_msg('Invalid input {0} for property {1}.'.format(v, k))
                return

        if self.parent.required_props:
            missing_args = []
            for prop in self.parent.required_props:
                if isinstance(prop, list):
                    has_arg = False
                    for p in prop:
                        if p in kwargs.keys():
                            has_arg = True
                    if not has_arg:
                        missing_args.append("{0}".format(' or '.join(prop)))
                else:
                    if prop not in kwargs.keys():
                        missing_args.append(prop)
            if self.parent.extra_required_props:
                for prop_set in self.parent.extra_required_props:
                    found_one = False
                    missing = False
                    for prop in prop_set:
                        if prop in kwargs.keys():
                            found_one = True
                        else:
                            if found_one:
                                missing = True
                    if found_one and missing:
                        missing_args.append(' and '.join(prop_set))
            if hasattr(self.parent, 'conditional_required_props'):
                for prop in self.parent.conditional_required_props(kwargs):
                    if prop not in kwargs.keys():
                        missing_args.append(prop)
            if len(missing_args) > 0:
                output_msg(
                    _('Required properties not provided: {0}'.format(
                        ', '.join(missing_args))))
                return
        else:
            if not args and not kwargs:
                return

        mappings = map(lambda i: (self.parent.get_mapping(i[0]), i[1]),
                       kwargs.items())
        for prop, v in sorted(mappings, key=lambda i: i[0].index):
            prop.do_set(ns.entity, v)

        self.parent.save(ns, new=True)
Esempio n. 32
0
    def run(self, context, args, kwargs, opargs):
        if not args and not kwargs:
            raise CommandException(_("create requires more arguments, see 'help create' for more information"))
        if len(args) > 1:
            raise CommandException(_("Wrong syntax for create, see 'help create' for more information"))

        # This magic below make either `create foo` or `create name=foo` work
        if len(args) == 1:
            # However, do not allow user to specify name as both implicit and explicit parameter as this suggests a mistake
            if 'name' in kwargs:
                raise CommandException(_("Both implicit and explicit 'name' parameters are specified."))
            else:
                kwargs[self.parent.primary_key.name] = args.pop(0)

        if 'name' not in kwargs:
            raise CommandException(_('Please specify a name for your pool'))
        else:
            name = kwargs.pop('name')

        volume_type = kwargs.pop('type', 'auto')
        if volume_type not in VDEV_TYPES:
            raise CommandException(_(
                "Invalid volume type {0}.  Should be one of: {1}".format(volume_type, VDEV_TYPES)
            ))

        if 'disks' not in kwargs:
            raise CommandException(_("Please specify one or more disks using the disks property"))
        else:
            disks = kwargs.pop('disks')
            if isinstance(disks, six.string_types):
                disks = [disks]

        if read_value(kwargs.pop('encryption', False), ValueType.BOOLEAN) is True:
            encryption = True
            password = kwargs.get('password', None)
        else:
            encryption = False
            password = None

        cache_disks = kwargs.pop('cache', [])
        log_disks = kwargs.pop('log', [])
        if cache_disks is None:
            cache_disks = []
        if log_disks is None:
            log_disks = []
        if isinstance(cache_disks, six.string_types):
            cache_disks = [cache_disks]
        if isinstance(log_disks, six.string_types):
            log_disks = [log_disks]

        ns = SingleItemNamespace(None, self.parent)
        ns.orig_entity = query.wrap(copy.deepcopy(self.parent.skeleton_entity))
        ns.entity = query.wrap(copy.deepcopy(self.parent.skeleton_entity))

        disks, cache_disks, log_disks = check_disks(context, disks, cache_disks, log_disks)

        if disks != 'auto':
            if len(disks) < DISKS_PER_TYPE[volume_type]:
                raise CommandException(_("Volume type {0} requires at least {1} disks".format(
                    volume_type,
                    DISKS_PER_TYPE[volume_type]
                )))
            if len(disks) > 1 and volume_type == 'disk':
                raise CommandException(_("Cannot create a volume of type disk with multiple disks"))

        if volume_type == 'auto':
            layout = kwargs.pop('layout', 'auto')
            if layout not in VOLUME_LAYOUTS:
                raise CommandException(_(
                    "Invalid layout {0}.  Should be one of: {1}".format(layout, list(VOLUME_LAYOUTS.keys()))
                ))
            else:
                if disks != 'auto' and len(disks) < DISKS_PER_TYPE[VOLUME_LAYOUTS[layout]]:
                    raise CommandException(_("Volume layout {0} requires at least {1} disks".format(layout, DISKS_PER_TYPE[VOLUME_LAYOUTS[layout]])))

            context.submit_task('volume.create_auto', name, 'zfs', layout, disks, cache_disks, log_disks, encryption, password)
        else:
            ns.entity['id'] = name
            ns.entity['topology'] = {}
            ns.entity['topology']['data'] = []
            if volume_type == 'disk':
                ns.entity['topology']['data'].append(
                    {'type': 'disk', 'path': correct_disk_path(disks[0])})
            else:
                ns.entity['topology']['data'].append({
                    'type': volume_type,
                    'children': [{'type': 'disk', 'path': correct_disk_path(disk)} for disk in disks]
                })
            ns.entity['encrypted'] = encryption
            if len(cache_disks) > 0:
                if 'cache' not in ns.entity:
                    ns.entity['topology']['cache'] = []

                for disk in cache_disks:
                    ns.entity['topology']['cache'].append({
                        'type': 'disk',
                        'path': correct_disk_path(disk)
                    })

            if len(log_disks) > 0:
                if 'log' not in ns.entity:
                    ns.entity['topology']['log'] = []

                if len(log_disks) > 1:
                    ns.entity['topology']['log'].append({
                        'type': 'mirror',
                        'children': [{'type': 'disk', 'path': correct_disk_path(disk)} for disk in log_disks]
                    })
                else:
                    ns.entity['topology']['log'].append({
                        'type': 'disk',
                        'path': correct_disk_path(log_disks[0])
                    })

            context.submit_task(
                self.parent.create_task,
                ns.entity,
                password,
                callback=lambda s, t: post_save(ns, s, t))