示例#1
0
def apply(obj, ds):
    for dev in obj['devices']:
        if dev.get('type') == 'DISK':
            if q.get(dev, 'properties.target_type') == 'ZVOL':
                q.set(dev, 'properties.target_type', 'BLOCK')

    return obj
示例#2
0
文件: namespace.py 项目: zoot/cli
    def do_set(self, obj, value, check_entity=None):
        if not self.can_set(check_entity if check_entity else obj):
            raise ValueError(
                _("Property '{0}' is not settable for this entity".format(
                    self.name)))

        value = read_value(value, self.type)

        if self.strict and (self.enum or (self.complete and self.context)):
            enum_val = self.enum() if callable(
                self.enum) else self.enum or self.complete.choices(
                    self.context, None)
            if self.type == ValueType.SET:
                for e in value:
                    if e not in enum_val:
                        raise ValueError(
                            "Invalid value for property '{0}'. Should be one of: {1}"
                            .format(
                                self.get_name,
                                '; '.join(format_value(i) for i in enum_val)))
            elif value not in enum_val:
                raise ValueError(
                    "Invalid value for property '{0}'. Should be one of: {1}".
                    format(self.get_name,
                           ', '.join(format_value(i) for i in enum_val)))

        if isinstance(self.set, collections.Callable):
            self.set(obj, value)
            return

        q.set(obj, self.set, value)
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                "dscached.management.normalize_parameters", directory["type"], directory.get("parameters", {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists("directories", ("name", "=", directory["name"])):
            raise TaskException(errno.EEXIST, "Directory {0} already exists".format(directory["name"]))

        normalize(
            directory, {"enabled": False, "enumerate": True, "immutable": False, "uid_range": None, "gid_range": None}
        )

        # Replace passed in params with normalized ones
        directory["parameters"] = params

        if directory["type"] == "winbind":
            normalize(directory, {"uid_range": [100000, 999999], "gid_range": [100000, 999999]})

            smb = self.dispatcher.call_sync("service.query", [("name", "=", "smb")], {"single": True})
            if not q.get(smb, "config.enable"):
                q.set(smb, "config.enable", True)
                self.join_subtasks(self.run_subtask("service.update", smb["id"], smb))

        self.id = self.datastore.insert("directories", directory)
        self.dispatcher.call_sync("dscached.management.configure_directory", self.id)
        self.dispatcher.dispatch_event("directory.changed", {"operation": "create", "ids": [self.id]})

        node = ConfigNode("directory", self.configstore)
        node["search_order"] = node["search_order"].value + [directory["name"]]
        self.dispatcher.call_sync("dscached.management.reload_config")
        return self.id
示例#4
0
文件: namespace.py 项目: erinix/cli
    def do_set(self, obj, value, check_entity=None):
        if not self.can_set(check_entity if check_entity else obj):
            raise ValueError(_("Property '{0}' is not settable for this entity".format(self.name)))

        value = read_value(value, self.type)

        if self.strict and self.enum:
            enum_val = self.enum(obj) if callable(self.enum) else self.enum
            if self.type == ValueType.SET:
                for e in value:
                    if e not in enum_val:
                        raise ValueError("Invalid value for property '{0}'. Should be one of: {1}".format(
                            self.get_name,
                            '; '.join(format_value(i) for i in enum_val))
                        )
            elif value not in enum_val:
                raise ValueError("Invalid value for property '{0}'. Should be one of: {1}".format(
                    self.get_name,
                    ', '.join(format_value(i) for i in enum_val))
                )

        if isinstance(self.set, collections.Callable):
            self.set(obj, value)
            return

        q.set(obj, self.set, value)
示例#5
0
def apply(obj, ds):
    for dev in obj['devices']:
        if dev.get('type') == 'DISK':
            if q.get(dev, 'properties.target_type') == 'ZVOL':
                q.set(dev, 'properties.target_type', 'BLOCK')

    return obj
示例#6
0
文件: utils.py 项目: mactanxin/cli
def get_item_stub(context, parent, name):
    from freenas.cli.namespace import SingleItemNamespace
    ns = SingleItemNamespace(name, parent, context)
    ns.orig_entity = copy.deepcopy(parent.skeleton_entity)
    ns.entity = copy.deepcopy(parent.skeleton_entity)
    set(ns.entity, parent.primary_key_name, name)
    return ns
示例#7
0
文件: utils.py 项目: mactanxin/cli
def set_related(context, name, obj, field, value):
    thing = context.entity_subscribers[name].query(('name', '=', value),
                                                   single=True)
    if not thing:
        from freenas.cli.namespace import CommandException
        raise CommandException('{0} not found'.format(value))

    set(obj, field, thing['id'])
示例#8
0
        def extend_query():
            for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')):
                password = q.get(i, 'credentials.password')
                if password:
                    q.set(i, 'credentials.password', Password(password))

                i['status'] = lazy(self.get_status, i['id'])

                yield i
示例#9
0
        def extend_query():
            for i in self.datastore.query_stream('peers', ('type', '=', 'vmware')):
                password = q.get(i, 'credentials.password')
                if password:
                    q.set(i, 'credentials.password', Password(password))

                i['status'] = lazy(self.get_status, i['id'])

                yield i
示例#10
0
文件: namespace.py 项目: erinix/cli
    def on_update(self, old_entity, new_entity):
        for cwd in self.context.ml.path:
            if isinstance(cwd, SingleItemNamespace) and cwd.parent == self:
                if q.get(old_entity, self.primary_key_name) == q.get(cwd.entity, self.primary_key_name):
                    q.set(cwd.entity, self.primary_key_name, q.get(new_entity, self.primary_key_name))
                    cwd.load()

                if not cwd.entity:
                    self.context.ml.cd_up()
示例#11
0
文件: namespace.py 项目: zoot/cli
    def delete(self, this, kwargs):
        q.set(
            self.parent.entity, self.parent_path,
            list(
                filter(
                    lambda i: i[self.primary_key_name] != this.entity[
                        self.primary_key_name],
                    q.get(self.parent.entity, self.parent_path))))

        return self.parent.save()
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters', directory['type'],
                directory.get('parameters', {}))
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories',
                                 ('name', '=', directory['name'])):
            raise TaskException(
                errno.EEXIST,
                'Directory {0} already exists'.format(directory['name']))

        normalize(
            directory, {
                'enabled': False,
                'enumerate': True,
                'immutable': False,
                'uid_range': None,
                'gid_range': None
            })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query',
                                            [('name', '=', 'smb')],
                                            {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory',
                                  self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
示例#13
0
    def update_one(self, key, **kwargs):
        with self.lock:
            item = self.get(key)
            if not item:
                return False

            for k, v in kwargs.items():
                set(item, k, v)

            self.put(key, item)
            return True
示例#14
0
    def update_one(self, key, **kwargs):
        with self.lock:
            item = self.get(key)
            if not item:
                return False

            for k, v in kwargs.items():
                set(item, k, v)

            self.put(key, item)
            return True
示例#15
0
文件: namespace.py 项目: zoot/cli
    def on_update(self, old_entity, new_entity):
        for cwd in self.context.ml.path:
            if isinstance(cwd, SingleItemNamespace) and cwd.parent == self:
                if q.get(old_entity, self.primary_key_name) == q.get(
                        cwd.entity, self.primary_key_name):
                    q.set(cwd.entity, self.primary_key_name,
                          q.get(new_entity, self.primary_key_name))
                    cwd.load()

                if not cwd.entity:
                    self.context.ml.cd_up()
示例#16
0
文件: namespace.py 项目: erinix/cli
    def delete(self, this, kwargs):
        q.set(
            self.parent.entity,
            self.parent_path,
            list(filter(
                lambda i: i[self.primary_key_name] != this.entity[self.primary_key_name],
                q.get(self.parent.entity, self.parent_path)
            ))
        )

        return self.parent.save()
示例#17
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        password = q.get(peer, 'credentials.password')
        if password:
            q.set(peer, 'credentials.password', unpassword(password))

        return self.datastore.insert('peers', peer)
示例#18
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        password = q.get(peer, 'credentials.password')
        if password:
            q.set(peer, 'credentials.password', unpassword(password))

        return self.datastore.insert('peers', peer)
示例#19
0
文件: namespace.py 项目: zoot/cli
    def save(self, this, new=False):
        if new:
            if not q.contains(self.parent.entity, self.parent_path):
                q.set(self.parent.entity, self.parent_path, [])

            q.get(self.parent.entity, self.parent_path).append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a[self.primary_key_name] == this.entity['name'],
                q.get(self.parent.entity, self.parent_path))
            entity.update(this.entity)

        return self.parent.save()
示例#20
0
    def run(self, id, updated_fields):
        peer = self.datastore.get_by_id('peers', id)
        if not peer:
            raise TaskException(errno.ENOENT, 'Peer {0} does not exist'.format(id))

        password = q.get(updated_fields, 'credentials.password')
        if password:
            q.set(updated_fields, 'credentials.password', unpassword(password))

        peer.update(updated_fields)
        if 'name' in updated_fields and self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        self.datastore.update('peers', id, peer)
示例#21
0
    def run(self, id, updated_fields):
        peer = self.datastore.get_by_id('peers', id)
        if not peer:
            raise TaskException(errno.ENOENT, 'Peer {0} does not exist'.format(id))

        password = q.get(updated_fields, 'credentials.password')
        if password:
            q.set(updated_fields, 'credentials.password', unpassword(password))

        peer.update(updated_fields)
        if 'name' in updated_fields and self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        self.datastore.update('peers', id, peer)
示例#22
0
文件: namespace.py 项目: erinix/cli
    def save(self, this, new=False):
        if new:
            if not q.contains(self.parent.entity, self.parent_path):
                q.set(self.parent.entity, self.parent_path, [])

            q.get(self.parent.entity, self.parent_path).append(this.entity)
        else:
            entity = first_or_default(
                lambda a: a[self.primary_key_name] == this.entity[self.primary_key_name],
                q.get(self.parent.entity, self.parent_path)
            )
            entity.update(this.entity)

        return self.parent.save()
示例#23
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters',
                directory['type'],
                directory.get('parameters', {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories', ('name', '=', directory['name'])):
            raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(directory['name']))

        normalize(directory, {
            'enabled': False,
            'enumerate': True,
            'immutable': False,
            'uid_range': None,
            'gid_range': None
        })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query', [('name', '=', 'smb')], {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
示例#24
0
    def __init__(self, name, context):
        super(DockerConfigNamespace, self).__init__(name, context)
        self.config_call = "docker.config.get_config"
        self.update_task = 'docker.config.update'

        self.add_property(
            descr='Default Docker host',
            name='default_host',
            get=lambda o: self.get_host({'host': o['default_host']}),
            set=lambda o, v: q.set(o, 'default_host', self.set_host({}, v)),
            complete=EntitySubscriberComplete('default_host=', 'docker.host', lambda d: d['name']),
            usage=_('''\
            Name of a Docker host selected by default for any
            container or container image operations
            when there is no `host` parameter set explicitly in a command.''')
        )

        self.add_property(
            descr='Forward Docker remote API to host',
            name='api_forwarding',
            get=lambda o: self.get_host({'host': o['default_host']}),
            set=lambda o, v: q.set(o, 'default_host', self.set_host({}, v)),
            complete=EntitySubscriberComplete('default_host=', 'docker.host', lambda d: d['name']),
            usage=_('''\
            Defines which (if any) Docker host - Virtual Machine hosting
            a Docker service - should expose their standard remote HTTP API
            at FreeNAS's default network interface.''')
        )

        self.add_property(
            descr='Docker remote API forwarding',
            name='api_forwarding_enable',
            get='api_forwarding_enable',
            set='api_forwarding_enable',
            type=ValueType.BOOLEAN,
            usage=_('''\
            Used for enabling/disabling Docker HTTP API forwarding
            to FreeNAS's default network interface.''')
        )

        self.add_property(
            descr='Default DockerHub collection',
            name='default_collection',
            get='default_collection',
            set='default_collection',
            usage=_('''\
            Used for setting a default DockerHub container images collection,
            which later is being used in tab completion in other 'docker' namespaces.
            Collection equals to DockerHub username''')
        )
示例#25
0
文件: namespace.py 项目: zoot/cli
    def do_append(self, obj, value):
        if self.type not in (ValueType.SET, ValueType.ARRAY):
            raise ValueError('Property is not a set or array')

        value = read_value(value, self.type)
        oldvalues = q.get(obj, self.set)
        if oldvalues is not None:
            newvalues = oldvalues + value
        else:
            newvalues = value

        if isinstance(self.set, collections.Callable):
            self.set(obj, newvalues)
            return

        q.set(obj, self.set, newvalues)
示例#26
0
文件: namespace.py 项目: erinix/cli
    def do_append(self, obj, value):
        if self.type not in (ValueType.SET, ValueType.ARRAY):
            raise ValueError('Property is not a set or array')

        value = read_value(value, self.type)
        oldvalues = q.get(obj, self.set)
        if oldvalues is not None:
            newvalues = oldvalues + list(value)
        else:
            newvalues = value

        if isinstance(self.set, collections.Callable):
            self.set(obj, newvalues)
            return

        q.set(obj, self.set, newvalues)
示例#27
0
文件: namespace.py 项目: erinix/cli
    def do_remove(self, obj, value):
        if self.type not in (ValueType.SET, ValueType.ARRAY):
            raise ValueError('Property is not a set or array')

        value = read_value(value, self.type)
        oldvalues = q.get(obj, self.set)
        newvalues = oldvalues
        for v in value:
            if v in newvalues:
                newvalues.remove(v)
            else:
                raise CommandException(_('{0} is not a value in {1}'.format(v, self.set)))

        if isinstance(self.set, collections.Callable):
            self.set(obj, newvalues)
            return

        q.set(obj, self.set, newvalues)
示例#28
0
文件: namespace.py 项目: zoot/cli
    def do_remove(self, obj, value):
        if self.type not in (ValueType.SET, ValueType.ARRAY):
            raise ValueError('Property is not a set or array')

        value = read_value(value, self.type)
        oldvalues = q.get(obj, self.set)
        newvalues = oldvalues
        for v in value:
            if v in newvalues:
                newvalues.remove(v)
            else:
                raise CommandException(
                    _('{0} is not a value in {1}'.format(v, self.set)))

        if isinstance(self.set, collections.Callable):
            self.set(obj, newvalues)
            return

        q.set(obj, self.set, newvalues)
示例#29
0
    def attr_query(self):
        class Test(object):
            pass

        c = Test()
        d = {}
        q.set(c, 'f', True)
        q.set(d, 'f2', Test())
        q.set(d, 'f2.nested', True)

        if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(q.get(d, 'f2'), Test):
            l = [d, c]
            if q.contains(c, 'f'):
                q.delete(c, 'f')

                return bool(q.query(l, ('f2.nested', '=', True), count=True))

        return False
示例#30
0
    def attr_query(self):
        class Test(object):
            pass

        c = Test()
        d = {}
        q.set(c, 'f', True)
        q.set(d, 'f2', Test())
        q.set(d, 'f2.nested', True)

        if q.get(c, 'f') and q.get(d, 'f2.nested') and isinstance(
                q.get(d, 'f2'), Test):
            l = [d, c]
            if q.contains(c, 'f'):
                q.delete(c, 'f')

                return bool(q.query(l, ('f2.nested', '=', True), count=True))

        return False
示例#31
0
 def set_memsize(o, v):
     set(o, 'config.memsize', int(v / 1024 / 1024))
示例#32
0
    def __init__(self, name, context):
        super(ReplicationNamespace, self).__init__(name, context)

        class PeerComplete(MultipleSourceComplete):
            def __init__(self, name):
                super(PeerComplete, self).__init__(
                    name,
                    (
                        EntitySubscriberComplete(name, 'peer', lambda o: o['name'] if o['type'] == 'freenas' else None),
                        RpcComplete(name, 'system.general.get_config', lambda o: o['hostname'])
                    )
                )

        self.primary_key_name = 'name'
        self.entity_subscriber_name = 'replication'
        self.create_task = 'replication.create'
        self.update_task = 'replication.update'
        self.delete_task = 'replication.delete'
        self.required_props = ['datasets', 'master', 'slave']

        self.localdoc['CreateEntityCommand'] = ("""\
            Usage: create <name> master=<master> slave=<slave> recursive=<recursive>
                    bidirectional=<bidirectional> auto_recover=<auto_recover>
                    replicate_services=<replicate_services> encrypt=<encrypt>
                    compress=<fast/default/best> throttle=<throttle>
                    snapshot_lifetime=<snapshot_lifetime> follow_delete=<follow_delete>

            Example: create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool/dataset
                     create my_replication master=freenas-1.local slave=freenas-2.local
                                           datasets=source:target,source2/data:target2
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3 datasets=mypool
                                           bidirectional=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                                           auto_recover=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool compress=best
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128 compress=best
                                           throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool snapshot_lifetime=1:10:10
                                           followdelete=yes

            Creates a replication link entry. Link contains configuration data
            used in later replication process.

            All ZFS pools referenced in 'datasets' property must exist on both
            slave and master at creation time. Datasets can be defined as a simple list
            of datasets available on master (source) eg. mypool/mydataset,mypool2/mydataset2,
            or a list of {source}:{target} eg. mypool/ds:targetpool/ds2,otherpool:targetpool2.
            First example could be expanded to:
            mypool/mydataset:mypool/mydataset,mypool2/mydataset2:mypool2mydataset2
            It would have the same meaning.

            Bidirectional replication is accepting only identical master and slave
            (source and target) datasets trees eg mypool:mypool,mypool2:mypool2.

            Created replication is implicitly: unidirectional, non-recursive,
            does not recover automatically and does not replicate services
            along with datasets.

            One of: master, slave parameters must represent one of current machine's
            IP addresses. Both these parameters must be defined,
            because unidirectional replication link can be promoted
            to become bi-directional link.

            Recursive parameter set to 'yes' informs that every child dataset
            of datasets defined in 'datasets' parameter will be replicated
            along with provided parents.

            Only in bi-directional replication service replication
            and automatic recovery are available.

            When automatic recovery is selected it is not possible to switch
            hosts roles manually. It's being done automatically each time
            'master' goes down or up again.
            Creates a replication task. For a list of properties, see 'help properties'.""")
        self.entity_localdoc['SetEntityCommand'] = ("""\
            Usage: set <property>=<value> ...

            Examples: set bidirectional=yes
                      set throttle=1M
                      set encrypt=AES256
                      set datasets=mypool1,mypool2/dataset1

            Sets a replication property. For a list of properties, see 'help properties'.""")

        self.localdoc['ListCommand'] = ("""\
            Usage: show

            Lists all replications. Optionally, filter or sort by property.
            Use 'help properties' to list available properties.

            Examples:
                show
                show | search name == foo""")

        self.entity_localdoc['DeleteEntityCommand'] = ("""\
            Usage: delete scrub=<scrub>

            Examples: delete
                      delete scrub=yes

            Delete current entity. Scrub allows to delete related datasets at slave side.""")

        self.skeleton_entity = {
            'bidirectional': False,
            'recursive': False,
            'replicate_services': False,
            'transport_options': []
        }

        def get_transport_option(obj, name):
            options = obj['transport_options']
            for o in options:
                if name in o['%type'].lower():
                    return o

            return None

        def get_compress(obj):
            compress = get_transport_option(obj, 'compress')
            if compress:
                return compress['level']
            else:
                return None

        def get_throttle(obj):
            throttle = get_transport_option(obj, 'throttle')
            if throttle:
                return throttle['buffer_size']
            else:
                return None

        def get_encrypt(obj):
            encrypt = get_transport_option(obj, 'encrypt')
            if encrypt:
                return encrypt['type']
            else:
                return None

        def set_transport_option(obj, oldval, val):
            if oldval:
                obj['transport_options'].remove(oldval)
            if val:
                obj['transport_options'].append(val)

        def set_compress(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'CompressReplicationTransportOption',
                    'level': val
                }
            set_transport_option(obj, get_transport_option(obj, 'compress'), opt)

        def set_throttle(obj, val):
            opt = {
                '%type': 'ThrottleReplicationTransportOption',
                'buffer_size': val
            }
            set_transport_option(obj, get_transport_option(obj, 'throttle'), opt)

        def set_encrypt(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'EncryptReplicationTransportOption',
                    'type': val
                }
            set_transport_option(obj, get_transport_option(obj, 'encrypt'), opt)

        def get_peer(obj, role):
            if obj[role] == self.context.call_sync('system.info.host_uuid'):
                return self.context.call_sync('system.general.get_config')['hostname']
            else:
                return self.context.entity_subscribers['peer'].query(
                    ('id', '=', obj[role]),
                    single=True,
                    select='name'
                )

        def set_peer(obj, val, role):
            if val == self.context.call_sync('system.general.get_config')['hostname']:
                obj[role] = self.context.call_sync('system.info.host_uuid')
            else:
                peer_id = self.context.entity_subscribers['peer'].query(
                    ('name', '=', val),
                    ('type', '=', 'freenas'),
                    single=True,
                    select='id'
                )
                obj[role] = peer_id

        def get_datasets(obj):
            return ['{0}:{1}'.format(i['master'], i['slave']) for i in obj['datasets']]

        def set_datasets(obj, value):
            datasets = []
            for ds in value:
                sp_dataset = ds.split(':', 1)
                datasets.append({
                    'master': sp_dataset[0],
                    'slave': sp_dataset[int(bool(len(sp_dataset) == 2 and sp_dataset[1]))]
                })

            obj['datasets'] = datasets

        def get_initial_master(obj):
            if obj['initial_master'] == obj['master']:
                return get_peer(obj, 'master')
            elif obj['initial_master'] == obj['slave']:
                return get_peer(obj, 'slave')
            else:
                return

        self.add_property(
            descr='Name',
            name='name',
            get='name',
            usersetable=False,
            list=True,
            usage=_('Name of a replication task')
        )

        self.add_property(
            descr='Master',
            name='master',
            get=lambda o: get_peer(o, 'master'),
            set=lambda o, v: set_peer(o, v, 'master'),
            usersetable=False,
            list=True,
            complete=PeerComplete('master='),
            usage=_('Name of FreeNAS machine (peer) acting as a sending side.')
        )

        self.add_property(
            descr='Slave',
            name='slave',
            get=lambda o: get_peer(o, 'slave'),
            set=lambda o, v: set_peer(o, v, 'slave'),
            usersetable=False,
            list=True,
            complete=PeerComplete('slave='),
            usage=_('Name of FreeNAS machine (peer) acting as a receiving side.')
        )

        self.add_property(
            descr='Datasets',
            name='datasets',
            get=get_datasets,
            set=set_datasets,
            list=False,
            strict=False,
            type=ValueType.SET,
            complete=EntitySubscriberComplete('datasets=', 'volume.dataset', lambda o: o['name'] + ':'),
            usage=_('List of datasets to be replicated.')
        )

        self.add_property(
            descr='Bi-directional',
            name='bidirectional',
            get='bidirectional',
            set='bidirectional',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if a replication task does support inverting master/slave roles.')
        )

        self.add_property(
            descr='Automatic recovery',
            name='auto_recover',
            get='auto_recover',
            set='auto_recover',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Enables automatic replication stream invert when initial master
            becomes down/unreachable. Once initial master goes back online
            replication streams are being inverted again
            to match initial direction.''')
        )

        self.add_property(
            descr='Initial master side',
            name='initial_master',
            get=get_initial_master,
            usersetable=False,
            createsetable=False,
            list=False,
            usage=_('Informs which host was initially selected a replication master.')
        )

        self.add_property(
            descr='One time',
            name='one_time',
            get='one_time',
            usersetable=False,
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('One time replications are automatically deleted after first successful run.')
        )

        self.add_property(
            descr='Recursive',
            name='recursive',
            get='recursive',
            set='recursive',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if selected datasets should be replicated recursively.')
        )

        self.add_property(
            descr='Services replication',
            name='replicate_services',
            get='replicate_services',
            set='replicate_services',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            When set, in bidirectional replication case,
            enables FreeNAS machines to attempt to recreate services
            (such as shares) on new master after role swap.''')
        )

        self.add_property(
            descr='Transfer encryption',
            name='encryption',
            get=get_encrypt,
            set=set_encrypt,
            enum=['no', 'AES128', 'AES192', 'AES256'],
            list=False,
            usage=_('''\
            Encryption algorithm used during replication stream send operation.
            Can be one of: 'no', 'AES128', 'AES192', 'AES256'.''')
        )

        self.add_property(
            descr='Transfer throttle',
            name='throttle',
            get=get_throttle,
            set=set_throttle,
            list=False,
            type=ValueType.SIZE,
            usage=_('Maximum transfer speed during replication. Value in B/s.')
        )

        self.add_property(
            descr='Transfer compression',
            name='compression',
            get=get_compress,
            set=set_compress,
            enum=['no', 'FAST', 'DEFAULT', 'BEST'],
            list=False,
            usage=_('''\
            Compression algorithm used during replication stream send operation.
            Can be one of: 'no', 'FAST', 'DEFAULT', 'BEST'.''')
        )

        self.add_property(
            descr='Snapshot lifetime',
            name='snapshot_lifetime',
            get='snapshot_lifetime',
            set=lambda o, v: q.set(o, 'snapshot_lifetime', parse_timedelta(str(v)).seconds),
            list=False,
            type=ValueType.NUMBER,
            usage=_('Lifetime of snapshots created for replication purposes.')
        )

        self.add_property(
            descr='Follow delete',
            name='followdelete',
            get='followdelete',
            set='followdelete',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Defines if replication should automatically remove
            stale snapshots at slave side.''')
        )

        self.add_property(
            descr='Current status',
            name='status',
            get='current_state.status',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            usage=_('Current status of replication.')
        )

        self.add_property(
            descr='Current progress',
            name='progress',
            get=lambda o: '{0:.2f}'.format(round(q.get(o, 'current_state.progress'), 2)) + '%',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Current progress of replication.')
        )

        self.add_property(
            descr='Last speed',
            name='speed',
            get='current_state.speed',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Transfer speed of current replication run.')
        )

        self.primary_key = self.get_mapping('name')

        self.entity_commands = self.get_entity_commands
示例#33
0
    def __init__(self, name, context):
        super(ReplicationNamespace, self).__init__(name, context)

        class PeerComplete(MultipleSourceComplete):
            def __init__(self, name):
                super(PeerComplete, self).__init__(
                    name,
                    (
                        EntitySubscriberComplete(name, 'peer', lambda o: o['name'] if o['type'] == 'freenas' else None),
                        RpcComplete(name, 'system.general.get_config', lambda o: o['hostname'])
                    )
                )

        self.primary_key_name = 'name'
        self.entity_subscriber_name = 'replication'
        self.create_task = 'replication.create'
        self.update_task = 'replication.update'
        self.delete_task = 'replication.delete'
        self.required_props = ['datasets', 'master', 'slave']

        self.localdoc['CreateEntityCommand'] = ("""\
            Usage: create <name> master=<master> slave=<slave> recursive=<recursive>
                    bidirectional=<bidirectional> auto_recover=<auto_recover>
                    replicate_services=<replicate_services> encrypt=<encrypt>
                    compress=<fast/default/best> throttle=<throttle>
                    snapshot_lifetime=<snapshot_lifetime> follow_delete=<follow_delete>

            Example: create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool/dataset
                     create my_replication master=freenas-1.local slave=freenas-2.local
                                           datasets=source:target,source2/data:target2
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3 datasets=mypool
                                           bidirectional=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                                           auto_recover=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool compress=best
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128 compress=best
                                           throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool snapshot_lifetime=1:10:10
                                           followdelete=yes

            Creates a replication link entry. Link contains configuration data
            used in later replication process.

            All ZFS pools referenced in 'datasets' property must exist on both
            slave and master at creation time. Datasets can be defined as a simple list
            of datasets available on master (source) eg. mypool/mydataset,mypool2/mydataset2,
            or a list of {source}:{target} eg. mypool/ds:targetpool/ds2,otherpool:targetpool2.
            First example could be expanded to:
            mypool/mydataset:mypool/mydataset,mypool2/mydataset2:mypool2mydataset2
            It would have the same meaning.

            Bidirectional replication is accepting only identical master and slave
            (source and target) datasets trees eg mypool:mypool,mypool2:mypool2.

            Created replication is implicitly: unidirectional, non-recursive,
            does not recover automatically and does not replicate services
            along with datasets.

            One of: master, slave parameters must represent one of current machine's
            IP addresses. Both these parameters must be defined,
            because unidirectional replication link can be promoted
            to become bi-directional link.

            Recursive parameter set to 'yes' informs that every child dataset
            of datasets defined in 'datasets' parameter will be replicated
            along with provided parents.

            Only in bi-directional replication service replication
            and automatic recovery are available.

            When automatic recovery is selected it is not possible to switch
            hosts roles manually. It's being done automatically each time
            'master' goes down or up again.
            Creates a replication task. For a list of properties, see 'help properties'.""")
        self.entity_localdoc['SetEntityCommand'] = ("""\
            Usage: set <property>=<value> ...

            Examples: set bidirectional=yes
                      set throttle=1M
                      set encrypt=AES256
                      set datasets=mypool1,mypool2/dataset1

            Sets a replication property. For a list of properties, see 'help properties'.""")

        self.localdoc['ListCommand'] = ("""\
            Usage: show

            Lists all replications. Optionally, filter or sort by property.
            Use 'help properties' to list available properties.

            Examples:
                show
                show | search name == foo""")

        self.entity_localdoc['DeleteEntityCommand'] = ("""\
            Usage: delete scrub=<scrub>

            Examples: delete
                      delete scrub=yes

            Delete current entity. Scrub allows to delete related datasets at slave side.""")

        self.skeleton_entity = {
            'bidirectional': False,
            'recursive': False,
            'replicate_services': False,
            'transport_options': []
        }

        def get_transport_option(obj, name):
            options = obj['transport_options']
            for o in options:
                if name in o['%type'].lower():
                    return o

            return None

        def get_compress(obj):
            compress = get_transport_option(obj, 'compress')
            if compress:
                return compress['level']
            else:
                return None

        def get_throttle(obj):
            throttle = get_transport_option(obj, 'throttle')
            if throttle:
                return throttle['buffer_size']
            else:
                return None

        def get_encrypt(obj):
            encrypt = get_transport_option(obj, 'encrypt')
            if encrypt:
                return encrypt['type']
            else:
                return None

        def set_transport_option(obj, oldval, val):
            if oldval:
                obj['transport_options'].remove(oldval)
            if val:
                obj['transport_options'].append(val)

        def set_compress(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'CompressReplicationTransportOption',
                    'level': val
                }
            set_transport_option(obj, get_transport_option(obj, 'compress'), opt)

        def set_throttle(obj, val):
            opt = {
                '%type': 'ThrottleReplicationTransportOption',
                'buffer_size': val
            }
            set_transport_option(obj, get_transport_option(obj, 'throttle'), opt)

        def set_encrypt(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'EncryptReplicationTransportOption',
                    'type': val
                }
            set_transport_option(obj, get_transport_option(obj, 'encrypt'), opt)

        def get_peer(obj, role):
            if obj[role] == self.context.call_sync('system.info.host_uuid'):
                return self.context.call_sync('system.general.get_config')['hostname']
            else:
                return self.context.entity_subscribers['peer'].query(
                    ('id', '=', obj[role]),
                    single=True,
                    select='name'
                )

        def set_peer(obj, val, role):
            if val == self.context.call_sync('system.general.get_config')['hostname']:
                obj[role] = self.context.call_sync('system.info.host_uuid')
            else:
                peer_id = self.context.entity_subscribers['peer'].query(
                    ('name', '=', val),
                    ('type', '=', 'freenas'),
                    single=True,
                    select='id'
                )
                obj[role] = peer_id

        def get_datasets(obj):
            return ['{0}:{1}'.format(i['master'], i['slave']) for i in obj['datasets']]

        def set_datasets(obj, value):
            datasets = []
            for ds in value:
                sp_dataset = ds.split(':', 1)
                datasets.append({
                    'master': sp_dataset[0],
                    'slave': sp_dataset[int(bool(len(sp_dataset) == 2 and sp_dataset[1]))]
                })

            obj['datasets'] = datasets

        def get_initial_master(obj):
            if obj['initial_master'] == obj['master']:
                return get_peer(obj, 'master')
            elif obj['initial_master'] == obj['slave']:
                return get_peer(obj, 'slave')
            else:
                return

        self.add_property(
            descr='Name',
            name='name',
            get='name',
            usersetable=False,
            list=True,
            usage=_('Name of a replication task')
        )

        self.add_property(
            descr='Master',
            name='master',
            get=lambda o: get_peer(o, 'master'),
            set=lambda o, v: set_peer(o, v, 'master'),
            usersetable=False,
            list=True,
            complete=PeerComplete('master='),
            usage=_('Name of FreeNAS machine (peer) acting as a sending side.')
        )

        self.add_property(
            descr='Slave',
            name='slave',
            get=lambda o: get_peer(o, 'slave'),
            set=lambda o, v: set_peer(o, v, 'slave'),
            usersetable=False,
            list=True,
            complete=PeerComplete('slave='),
            usage=_('Name of FreeNAS machine (peer) acting as a receiving side.')
        )

        self.add_property(
            descr='Datasets',
            name='datasets',
            get=get_datasets,
            set=set_datasets,
            list=False,
            strict=False,
            type=ValueType.SET,
            complete=EntitySubscriberComplete('datasets=', 'volume.dataset', lambda o: o['name'] + ':'),
            usage=_('List of datasets to be replicated.')
        )

        self.add_property(
            descr='Bi-directional',
            name='bidirectional',
            get='bidirectional',
            set='bidirectional',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if a replication task does support inverting master/slave roles.')
        )

        self.add_property(
            descr='Automatic recovery',
            name='auto_recover',
            get='auto_recover',
            set='auto_recover',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Enables automatic replication stream invert when initial master
            becomes down/unreachable. Once initial master goes back online
            replication streams are being inverted again
            to match initial direction.''')
        )

        self.add_property(
            descr='Initial master side',
            name='initial_master',
            get=get_initial_master,
            usersetable=False,
            createsetable=False,
            list=False,
            usage=_('Informs which host was initially selected a replication master.')
        )

        self.add_property(
            descr='Recursive',
            name='recursive',
            get='recursive',
            set='recursive',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if selected datasets should be replicated recursively.')
        )

        self.add_property(
            descr='Services replication',
            name='replicate_services',
            get='replicate_services',
            set='replicate_services',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            When set, in bidirectional replication case,
            enables FreeNAS machines to attempt to recreate services
            (such as shares) on new master after role swap.''')
        )

        self.add_property(
            descr='Transfer encryption',
            name='encryption',
            get=get_encrypt,
            set=set_encrypt,
            enum=['no', 'AES128', 'AES192', 'AES256'],
            list=False,
            usage=_('''\
            Encryption algorithm used during replication stream send operation.
            Can be one of: 'no', 'AES128', 'AES192', 'AES256'.''')
        )

        self.add_property(
            descr='Transfer throttle',
            name='throttle',
            get=get_throttle,
            set=set_throttle,
            list=False,
            type=ValueType.SIZE,
            usage=_('Maximum transfer speed during replication. Value in B/s.')
        )

        self.add_property(
            descr='Transfer compression',
            name='compression',
            get=get_compress,
            set=set_compress,
            enum=['no', 'FAST', 'DEFAULT', 'BEST'],
            list=False,
            usage=_('''\
            Compression algorithm used during replication stream send operation.
            Can be one of: 'no', 'FAST', 'DEFAULT', 'BEST'.''')
        )

        self.add_property(
            descr='Snapshot lifetime',
            name='snapshot_lifetime',
            get='snapshot_lifetime',
            set=lambda o, v: q.set(o, 'snapshot_lifetime', parse_timedelta(str(v)).seconds),
            list=False,
            type=ValueType.NUMBER,
            usage=_('Lifetime of snapshots created for replication purposes.')
        )

        self.add_property(
            descr='Follow delete',
            name='followdelete',
            get='followdelete',
            set='followdelete',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Defines if replication should automatically remove
            stale snapshots at slave side.''')
        )

        self.add_property(
            descr='Current status',
            name='status',
            get='current_state.status',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            usage=_('Current status of replication.')
        )

        self.add_property(
            descr='Current progress',
            name='progress',
            get=lambda o: '{0:.2f}'.format(round(q.get(o, 'current_state.progress'), 2)) + '%',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Current progress of replication.')
        )

        self.add_property(
            descr='Last speed',
            name='speed',
            get='current_state.speed',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Transfer speed of current replication run.')
        )

        self.primary_key = self.get_mapping('name')

        self.entity_commands = self.get_entity_commands
示例#34
0
文件: peer.py 项目: zoot/cli
 def set_type(o, v):
     q.set(o, 'type', v)
     q.set(o, 'credentials.%type', '{0}-credentials'.format(v))
示例#35
0
 def set_type(o, v):
     q.set(o, 'type', v)
     q.set(o, 'credentials.%type', '{0}Credentials'.format(v.title().replace('-', '')))
示例#36
0
文件: calendar.py 项目: zoot/cli
    def __init__(self, name, context):
        super(ReplicationNamespace, self).__init__(name, context)
        self.extra_query_params = [('task', '=', 'replication.replicate_dataset')]
        self.required_props.extend([])
        self.task_args_helper = ['dataset', 'options', 'transport_options']
        self.skeleton_entity['task'] = 'replication.replicate_dataset'
        self.skeleton_entity['args'] = [
            None,
            {
                'remote_dataset': None,
                'peer': None,
                'recursive': False,
                'followdelete': False
            },
            []
        ]

        def get_peer_name(id):
            peer = self.context.entity_subscribers['peer'].query(('id', '=', id), single=True)
            return peer['name'] if peer else None

        def set_peer_id(name):
            peer = self.context.entity_subscribers['peer'].query(('name', '=', name), single=True)
            if not peer:
                raise CommandException('Peer {0} not found'.format(name))

            return peer['id']

        def get_transport_option(obj, type, property):
            opt = first_or_default(lambda i: i['name'] == type, obj['args'][2])
            return opt[property] if opt else None

        def set_transport_option(obj, type, property, value):
            opt = first_or_default(lambda i: i['name'] == type, obj['args'][2])

            if value:
                if opt:
                    opt[property] = value
                else:
                    obj['args'][2].append({
                        'name': type,
                        property: value
                    })
            else:
                obj['args'][2].remove(opt)

            obj['args'] = copy.copy(obj['args'])

        self.add_property(
            descr='Local dataset',
            name='dataset',
            get=lambda obj: self.get_task_args(obj, 'dataset'),
            list=True,
            set=lambda obj, val: self.set_task_args(obj, val, 'dataset'),
        )

        self.add_property(
            descr='Remote dataset',
            name='remote_dataset',
            get=lambda obj: q.get(self.get_task_args(obj, 'options'), 'remote_dataset'),
            list=True,
            set=lambda obj, val: q.set(self.get_task_args(obj, 'options'), 'remote_dataset', val),
        )

        self.add_property(
            descr='Peer name',
            name='peer',
            get=lambda obj: get_peer_name(q.get(self.get_task_args(obj, 'options'), 'peer')),
            list=True,
            set=lambda obj, val: q.set(self.get_task_args(obj, 'options'), 'peer', set_peer_id(val)),
        )

        self.add_property(
            descr='Recursive',
            name='recursive',
            get=lambda obj: q.get(self.get_task_args(obj, 'options'), 'recursive'),
            list=True,
            set=lambda obj, val: q.set(self.get_task_args(obj, 'options'), 'recursive', val),
            type=ValueType.BOOLEAN
        )

        self.add_property(
            descr='Follow delete',
            name='followdelete',
            get=lambda obj: q.get(self.get_task_args(obj, 'options'), 'followdelete'),
            list=False,
            set=lambda obj, val: q.set(self.get_task_args(obj, 'options'), 'followdelete', val),
            type=ValueType.BOOLEAN
        )

        self.add_property(
            descr='Compression',
            name='compression',
            get=lambda obj: get_transport_option(obj, 'compress', 'level'),
            list=False,
            set=lambda obj, val: set_transport_option(obj, 'compress', 'level', val),
            enum=['FAST', 'DEFAULT', 'BEST', None]
        )

        self.add_property(
            descr='Encryption',
            name='encryption',
            get=lambda obj: get_transport_option(obj, 'encryption', 'type'),
            list=False,
            set=lambda obj, val: set_transport_option(obj, 'encryption', 'type', val),
            enum=['AES128', 'AES192', 'AES256', None]
        )

        self.add_property(
            descr='Throttle',
            name='throttle',
            get=lambda obj: get_transport_option(obj, 'throttle', 'buffer_size'),
            list=False,
            set=lambda obj, val: set_transport_option(obj, 'throttle', 'buffer_size', val),
            type=ValueType.SIZE
        )
示例#37
0
文件: calendar.py 项目: zoot/cli
 def set_rsync_args(self, entity, name, value):
     q.set(entity['args'][0], name, value)
示例#38
0
文件: calendar.py 项目: erinix/cli
 def set_rsync_args(self, entity, name, value):
     q.set(entity['args'][0], name, value)