Exemple #1
0
 def run(self, context, args, kwargs, opargs):
     delay = kwargs.get('delay', None)
     if delay:
         delay = parse_timedelta(delay).seconds
     context.submit_task('system.reboot', delay)
     return _("The system will now reboot...")
Exemple #2
0
 def serialize_filter(self, context, args, kwargs, opargs):
     return {
         "filter":
         [('timestamp', '!=', None),
          ('timestamp', '>=', datetime.now() - parse_timedelta(args[0]))]
     }
Exemple #3
0
    def __init__(self, name, context):
        super(ReplicationNamespace, self).__init__(name, context)

        class PeerComplete(MultipleSourceComplete):
            def __init__(self, name):
                super(PeerComplete, self).__init__(
                    name,
                    (
                        EntitySubscriberComplete(name, 'peer', lambda o: o['name'] if o['type'] == 'freenas' else None),
                        RpcComplete(name, 'system.general.get_config', lambda o: o['hostname'])
                    )
                )

        self.primary_key_name = 'name'
        self.entity_subscriber_name = 'replication'
        self.create_task = 'replication.create'
        self.update_task = 'replication.update'
        self.delete_task = 'replication.delete'
        self.required_props = ['datasets', 'master', 'slave']

        self.localdoc['CreateEntityCommand'] = ("""\
            Usage: create <name> master=<master> slave=<slave> recursive=<recursive>
                    bidirectional=<bidirectional> auto_recover=<auto_recover>
                    replicate_services=<replicate_services> encrypt=<encrypt>
                    compress=<fast/default/best> throttle=<throttle>
                    snapshot_lifetime=<snapshot_lifetime> follow_delete=<follow_delete>

            Example: create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool/dataset
                     create my_replication master=freenas-1.local slave=freenas-2.local
                                           datasets=source:target,source2/data:target2
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3 datasets=mypool
                                           bidirectional=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                                           auto_recover=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool compress=best
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128 compress=best
                                           throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool snapshot_lifetime=1:10:10
                                           followdelete=yes

            Creates a replication link entry. Link contains configuration data
            used in later replication process.

            All ZFS pools referenced in 'datasets' property must exist on both
            slave and master at creation time. Datasets can be defined as a simple list
            of datasets available on master (source) eg. mypool/mydataset,mypool2/mydataset2,
            or a list of {source}:{target} eg. mypool/ds:targetpool/ds2,otherpool:targetpool2.
            First example could be expanded to:
            mypool/mydataset:mypool/mydataset,mypool2/mydataset2:mypool2mydataset2
            It would have the same meaning.

            Bidirectional replication is accepting only identical master and slave
            (source and target) datasets trees eg mypool:mypool,mypool2:mypool2.

            Created replication is implicitly: unidirectional, non-recursive,
            does not recover automatically and does not replicate services
            along with datasets.

            One of: master, slave parameters must represent one of current machine's
            IP addresses. Both these parameters must be defined,
            because unidirectional replication link can be promoted
            to become bi-directional link.

            Recursive parameter set to 'yes' informs that every child dataset
            of datasets defined in 'datasets' parameter will be replicated
            along with provided parents.

            Only in bi-directional replication service replication
            and automatic recovery are available.

            When automatic recovery is selected it is not possible to switch
            hosts roles manually. It's being done automatically each time
            'master' goes down or up again.
            Creates a replication task. For a list of properties, see 'help properties'.""")
        self.entity_localdoc['SetEntityCommand'] = ("""\
            Usage: set <property>=<value> ...

            Examples: set bidirectional=yes
                      set throttle=1M
                      set encrypt=AES256
                      set datasets=mypool1,mypool2/dataset1

            Sets a replication property. For a list of properties, see 'help properties'.""")

        self.localdoc['ListCommand'] = ("""\
            Usage: show

            Lists all replications. Optionally, filter or sort by property.
            Use 'help properties' to list available properties.

            Examples:
                show
                show | search name == foo""")

        self.entity_localdoc['DeleteEntityCommand'] = ("""\
            Usage: delete scrub=<scrub>

            Examples: delete
                      delete scrub=yes

            Delete current entity. Scrub allows to delete related datasets at slave side.""")

        self.skeleton_entity = {
            'bidirectional': False,
            'recursive': False,
            'replicate_services': False,
            'transport_options': []
        }

        def get_transport_option(obj, name):
            options = obj['transport_options']
            for o in options:
                if name in o['%type'].lower():
                    return o

            return None

        def get_compress(obj):
            compress = get_transport_option(obj, 'compress')
            if compress:
                return compress['level']
            else:
                return None

        def get_throttle(obj):
            throttle = get_transport_option(obj, 'throttle')
            if throttle:
                return throttle['buffer_size']
            else:
                return None

        def get_encrypt(obj):
            encrypt = get_transport_option(obj, 'encrypt')
            if encrypt:
                return encrypt['type']
            else:
                return None

        def set_transport_option(obj, oldval, val):
            if oldval:
                obj['transport_options'].remove(oldval)
            if val:
                obj['transport_options'].append(val)

        def set_compress(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'CompressReplicationTransportOption',
                    'level': val
                }
            set_transport_option(obj, get_transport_option(obj, 'compress'), opt)

        def set_throttle(obj, val):
            opt = {
                '%type': 'ThrottleReplicationTransportOption',
                'buffer_size': val
            }
            set_transport_option(obj, get_transport_option(obj, 'throttle'), opt)

        def set_encrypt(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'EncryptReplicationTransportOption',
                    'type': val
                }
            set_transport_option(obj, get_transport_option(obj, 'encrypt'), opt)

        def get_peer(obj, role):
            if obj[role] == self.context.call_sync('system.info.host_uuid'):
                return self.context.call_sync('system.general.get_config')['hostname']
            else:
                return self.context.entity_subscribers['peer'].query(
                    ('id', '=', obj[role]),
                    single=True,
                    select='name'
                )

        def set_peer(obj, val, role):
            if val == self.context.call_sync('system.general.get_config')['hostname']:
                obj[role] = self.context.call_sync('system.info.host_uuid')
            else:
                peer_id = self.context.entity_subscribers['peer'].query(
                    ('name', '=', val),
                    ('type', '=', 'freenas'),
                    single=True,
                    select='id'
                )
                obj[role] = peer_id

        def get_datasets(obj):
            return ['{0}:{1}'.format(i['master'], i['slave']) for i in obj['datasets']]

        def set_datasets(obj, value):
            datasets = []
            for ds in value:
                sp_dataset = ds.split(':', 1)
                datasets.append({
                    'master': sp_dataset[0],
                    'slave': sp_dataset[int(bool(len(sp_dataset) == 2 and sp_dataset[1]))]
                })

            obj['datasets'] = datasets

        def get_initial_master(obj):
            if obj['initial_master'] == obj['master']:
                return get_peer(obj, 'master')
            elif obj['initial_master'] == obj['slave']:
                return get_peer(obj, 'slave')
            else:
                return

        self.add_property(
            descr='Name',
            name='name',
            get='name',
            usersetable=False,
            list=True,
            usage=_('Name of a replication task')
        )

        self.add_property(
            descr='Master',
            name='master',
            get=lambda o: get_peer(o, 'master'),
            set=lambda o, v: set_peer(o, v, 'master'),
            usersetable=False,
            list=True,
            complete=PeerComplete('master='),
            usage=_('Name of FreeNAS machine (peer) acting as a sending side.')
        )

        self.add_property(
            descr='Slave',
            name='slave',
            get=lambda o: get_peer(o, 'slave'),
            set=lambda o, v: set_peer(o, v, 'slave'),
            usersetable=False,
            list=True,
            complete=PeerComplete('slave='),
            usage=_('Name of FreeNAS machine (peer) acting as a receiving side.')
        )

        self.add_property(
            descr='Datasets',
            name='datasets',
            get=get_datasets,
            set=set_datasets,
            list=False,
            strict=False,
            type=ValueType.SET,
            complete=EntitySubscriberComplete('datasets=', 'volume.dataset', lambda o: o['name'] + ':'),
            usage=_('List of datasets to be replicated.')
        )

        self.add_property(
            descr='Bi-directional',
            name='bidirectional',
            get='bidirectional',
            set='bidirectional',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if a replication task does support inverting master/slave roles.')
        )

        self.add_property(
            descr='Automatic recovery',
            name='auto_recover',
            get='auto_recover',
            set='auto_recover',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Enables automatic replication stream invert when initial master
            becomes down/unreachable. Once initial master goes back online
            replication streams are being inverted again
            to match initial direction.''')
        )

        self.add_property(
            descr='Initial master side',
            name='initial_master',
            get=get_initial_master,
            usersetable=False,
            createsetable=False,
            list=False,
            usage=_('Informs which host was initially selected a replication master.')
        )

        self.add_property(
            descr='One time',
            name='one_time',
            get='one_time',
            usersetable=False,
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('One time replications are automatically deleted after first successful run.')
        )

        self.add_property(
            descr='Recursive',
            name='recursive',
            get='recursive',
            set='recursive',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if selected datasets should be replicated recursively.')
        )

        self.add_property(
            descr='Services replication',
            name='replicate_services',
            get='replicate_services',
            set='replicate_services',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            When set, in bidirectional replication case,
            enables FreeNAS machines to attempt to recreate services
            (such as shares) on new master after role swap.''')
        )

        self.add_property(
            descr='Transfer encryption',
            name='encryption',
            get=get_encrypt,
            set=set_encrypt,
            enum=['no', 'AES128', 'AES192', 'AES256'],
            list=False,
            usage=_('''\
            Encryption algorithm used during replication stream send operation.
            Can be one of: 'no', 'AES128', 'AES192', 'AES256'.''')
        )

        self.add_property(
            descr='Transfer throttle',
            name='throttle',
            get=get_throttle,
            set=set_throttle,
            list=False,
            type=ValueType.SIZE,
            usage=_('Maximum transfer speed during replication. Value in B/s.')
        )

        self.add_property(
            descr='Transfer compression',
            name='compression',
            get=get_compress,
            set=set_compress,
            enum=['no', 'FAST', 'DEFAULT', 'BEST'],
            list=False,
            usage=_('''\
            Compression algorithm used during replication stream send operation.
            Can be one of: 'no', 'FAST', 'DEFAULT', 'BEST'.''')
        )

        self.add_property(
            descr='Snapshot lifetime',
            name='snapshot_lifetime',
            get='snapshot_lifetime',
            set=lambda o, v: q.set(o, 'snapshot_lifetime', parse_timedelta(str(v)).seconds),
            list=False,
            type=ValueType.NUMBER,
            usage=_('Lifetime of snapshots created for replication purposes.')
        )

        self.add_property(
            descr='Follow delete',
            name='followdelete',
            get='followdelete',
            set='followdelete',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Defines if replication should automatically remove
            stale snapshots at slave side.''')
        )

        self.add_property(
            descr='Current status',
            name='status',
            get='current_state.status',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            usage=_('Current status of replication.')
        )

        self.add_property(
            descr='Current progress',
            name='progress',
            get=lambda o: '{0:.2f}'.format(round(q.get(o, 'current_state.progress'), 2)) + '%',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Current progress of replication.')
        )

        self.add_property(
            descr='Last speed',
            name='speed',
            get='current_state.speed',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Transfer speed of current replication run.')
        )

        self.primary_key = self.get_mapping('name')

        self.entity_commands = self.get_entity_commands
 def run(self, context, args, kwargs, opargs):
     delay = kwargs.get('delay', None)
     if delay:
         delay = parse_timedelta(delay).seconds
     context.submit_task('system.reboot', delay)
     return _("The system will now reboot...")
Exemple #5
0
    def __init__(self, name, context):
        super(ReplicationNamespace, self).__init__(name, context)

        class PeerComplete(MultipleSourceComplete):
            def __init__(self, name):
                super(PeerComplete, self).__init__(
                    name,
                    (
                        EntitySubscriberComplete(name, 'peer', lambda o: o['name'] if o['type'] == 'freenas' else None),
                        RpcComplete(name, 'system.general.get_config', lambda o: o['hostname'])
                    )
                )

        self.primary_key_name = 'name'
        self.entity_subscriber_name = 'replication'
        self.create_task = 'replication.create'
        self.update_task = 'replication.update'
        self.delete_task = 'replication.delete'
        self.required_props = ['datasets', 'master', 'slave']

        self.localdoc['CreateEntityCommand'] = ("""\
            Usage: create <name> master=<master> slave=<slave> recursive=<recursive>
                    bidirectional=<bidirectional> auto_recover=<auto_recover>
                    replicate_services=<replicate_services> encrypt=<encrypt>
                    compress=<fast/default/best> throttle=<throttle>
                    snapshot_lifetime=<snapshot_lifetime> follow_delete=<follow_delete>

            Example: create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool/dataset
                     create my_replication master=freenas-1.local slave=freenas-2.local
                                           datasets=source:target,source2/data:target2
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3 datasets=mypool
                                           bidirectional=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool,mypool2 bidirectional=yes
                                           recursive=yes replicate_services=yes
                                           auto_recover=yes
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool compress=best
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool encrypt=AES128 compress=best
                                           throttle=10MiB
                     create my_replication master=10.0.0.2 slave=10.0.0.3
                                           datasets=mypool snapshot_lifetime=1:10:10
                                           followdelete=yes

            Creates a replication link entry. Link contains configuration data
            used in later replication process.

            All ZFS pools referenced in 'datasets' property must exist on both
            slave and master at creation time. Datasets can be defined as a simple list
            of datasets available on master (source) eg. mypool/mydataset,mypool2/mydataset2,
            or a list of {source}:{target} eg. mypool/ds:targetpool/ds2,otherpool:targetpool2.
            First example could be expanded to:
            mypool/mydataset:mypool/mydataset,mypool2/mydataset2:mypool2mydataset2
            It would have the same meaning.

            Bidirectional replication is accepting only identical master and slave
            (source and target) datasets trees eg mypool:mypool,mypool2:mypool2.

            Created replication is implicitly: unidirectional, non-recursive,
            does not recover automatically and does not replicate services
            along with datasets.

            One of: master, slave parameters must represent one of current machine's
            IP addresses. Both these parameters must be defined,
            because unidirectional replication link can be promoted
            to become bi-directional link.

            Recursive parameter set to 'yes' informs that every child dataset
            of datasets defined in 'datasets' parameter will be replicated
            along with provided parents.

            Only in bi-directional replication service replication
            and automatic recovery are available.

            When automatic recovery is selected it is not possible to switch
            hosts roles manually. It's being done automatically each time
            'master' goes down or up again.
            Creates a replication task. For a list of properties, see 'help properties'.""")
        self.entity_localdoc['SetEntityCommand'] = ("""\
            Usage: set <property>=<value> ...

            Examples: set bidirectional=yes
                      set throttle=1M
                      set encrypt=AES256
                      set datasets=mypool1,mypool2/dataset1

            Sets a replication property. For a list of properties, see 'help properties'.""")

        self.localdoc['ListCommand'] = ("""\
            Usage: show

            Lists all replications. Optionally, filter or sort by property.
            Use 'help properties' to list available properties.

            Examples:
                show
                show | search name == foo""")

        self.entity_localdoc['DeleteEntityCommand'] = ("""\
            Usage: delete scrub=<scrub>

            Examples: delete
                      delete scrub=yes

            Delete current entity. Scrub allows to delete related datasets at slave side.""")

        self.skeleton_entity = {
            'bidirectional': False,
            'recursive': False,
            'replicate_services': False,
            'transport_options': []
        }

        def get_transport_option(obj, name):
            options = obj['transport_options']
            for o in options:
                if name in o['%type'].lower():
                    return o

            return None

        def get_compress(obj):
            compress = get_transport_option(obj, 'compress')
            if compress:
                return compress['level']
            else:
                return None

        def get_throttle(obj):
            throttle = get_transport_option(obj, 'throttle')
            if throttle:
                return throttle['buffer_size']
            else:
                return None

        def get_encrypt(obj):
            encrypt = get_transport_option(obj, 'encrypt')
            if encrypt:
                return encrypt['type']
            else:
                return None

        def set_transport_option(obj, oldval, val):
            if oldval:
                obj['transport_options'].remove(oldval)
            if val:
                obj['transport_options'].append(val)

        def set_compress(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'CompressReplicationTransportOption',
                    'level': val
                }
            set_transport_option(obj, get_transport_option(obj, 'compress'), opt)

        def set_throttle(obj, val):
            opt = {
                '%type': 'ThrottleReplicationTransportOption',
                'buffer_size': val
            }
            set_transport_option(obj, get_transport_option(obj, 'throttle'), opt)

        def set_encrypt(obj, val):
            opt = None
            if val != 'no':
                opt = {
                    '%type': 'EncryptReplicationTransportOption',
                    'type': val
                }
            set_transport_option(obj, get_transport_option(obj, 'encrypt'), opt)

        def get_peer(obj, role):
            if obj[role] == self.context.call_sync('system.info.host_uuid'):
                return self.context.call_sync('system.general.get_config')['hostname']
            else:
                return self.context.entity_subscribers['peer'].query(
                    ('id', '=', obj[role]),
                    single=True,
                    select='name'
                )

        def set_peer(obj, val, role):
            if val == self.context.call_sync('system.general.get_config')['hostname']:
                obj[role] = self.context.call_sync('system.info.host_uuid')
            else:
                peer_id = self.context.entity_subscribers['peer'].query(
                    ('name', '=', val),
                    ('type', '=', 'freenas'),
                    single=True,
                    select='id'
                )
                obj[role] = peer_id

        def get_datasets(obj):
            return ['{0}:{1}'.format(i['master'], i['slave']) for i in obj['datasets']]

        def set_datasets(obj, value):
            datasets = []
            for ds in value:
                sp_dataset = ds.split(':', 1)
                datasets.append({
                    'master': sp_dataset[0],
                    'slave': sp_dataset[int(bool(len(sp_dataset) == 2 and sp_dataset[1]))]
                })

            obj['datasets'] = datasets

        def get_initial_master(obj):
            if obj['initial_master'] == obj['master']:
                return get_peer(obj, 'master')
            elif obj['initial_master'] == obj['slave']:
                return get_peer(obj, 'slave')
            else:
                return

        self.add_property(
            descr='Name',
            name='name',
            get='name',
            usersetable=False,
            list=True,
            usage=_('Name of a replication task')
        )

        self.add_property(
            descr='Master',
            name='master',
            get=lambda o: get_peer(o, 'master'),
            set=lambda o, v: set_peer(o, v, 'master'),
            usersetable=False,
            list=True,
            complete=PeerComplete('master='),
            usage=_('Name of FreeNAS machine (peer) acting as a sending side.')
        )

        self.add_property(
            descr='Slave',
            name='slave',
            get=lambda o: get_peer(o, 'slave'),
            set=lambda o, v: set_peer(o, v, 'slave'),
            usersetable=False,
            list=True,
            complete=PeerComplete('slave='),
            usage=_('Name of FreeNAS machine (peer) acting as a receiving side.')
        )

        self.add_property(
            descr='Datasets',
            name='datasets',
            get=get_datasets,
            set=set_datasets,
            list=False,
            strict=False,
            type=ValueType.SET,
            complete=EntitySubscriberComplete('datasets=', 'volume.dataset', lambda o: o['name'] + ':'),
            usage=_('List of datasets to be replicated.')
        )

        self.add_property(
            descr='Bi-directional',
            name='bidirectional',
            get='bidirectional',
            set='bidirectional',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if a replication task does support inverting master/slave roles.')
        )

        self.add_property(
            descr='Automatic recovery',
            name='auto_recover',
            get='auto_recover',
            set='auto_recover',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Enables automatic replication stream invert when initial master
            becomes down/unreachable. Once initial master goes back online
            replication streams are being inverted again
            to match initial direction.''')
        )

        self.add_property(
            descr='Initial master side',
            name='initial_master',
            get=get_initial_master,
            usersetable=False,
            createsetable=False,
            list=False,
            usage=_('Informs which host was initially selected a replication master.')
        )

        self.add_property(
            descr='Recursive',
            name='recursive',
            get='recursive',
            set='recursive',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('Defines if selected datasets should be replicated recursively.')
        )

        self.add_property(
            descr='Services replication',
            name='replicate_services',
            get='replicate_services',
            set='replicate_services',
            condition=lambda o: o.get('bidirectional'),
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            When set, in bidirectional replication case,
            enables FreeNAS machines to attempt to recreate services
            (such as shares) on new master after role swap.''')
        )

        self.add_property(
            descr='Transfer encryption',
            name='encryption',
            get=get_encrypt,
            set=set_encrypt,
            enum=['no', 'AES128', 'AES192', 'AES256'],
            list=False,
            usage=_('''\
            Encryption algorithm used during replication stream send operation.
            Can be one of: 'no', 'AES128', 'AES192', 'AES256'.''')
        )

        self.add_property(
            descr='Transfer throttle',
            name='throttle',
            get=get_throttle,
            set=set_throttle,
            list=False,
            type=ValueType.SIZE,
            usage=_('Maximum transfer speed during replication. Value in B/s.')
        )

        self.add_property(
            descr='Transfer compression',
            name='compression',
            get=get_compress,
            set=set_compress,
            enum=['no', 'FAST', 'DEFAULT', 'BEST'],
            list=False,
            usage=_('''\
            Compression algorithm used during replication stream send operation.
            Can be one of: 'no', 'FAST', 'DEFAULT', 'BEST'.''')
        )

        self.add_property(
            descr='Snapshot lifetime',
            name='snapshot_lifetime',
            get='snapshot_lifetime',
            set=lambda o, v: q.set(o, 'snapshot_lifetime', parse_timedelta(str(v)).seconds),
            list=False,
            type=ValueType.NUMBER,
            usage=_('Lifetime of snapshots created for replication purposes.')
        )

        self.add_property(
            descr='Follow delete',
            name='followdelete',
            get='followdelete',
            set='followdelete',
            list=False,
            type=ValueType.BOOLEAN,
            usage=_('''\
            Defines if replication should automatically remove
            stale snapshots at slave side.''')
        )

        self.add_property(
            descr='Current status',
            name='status',
            get='current_state.status',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            usage=_('Current status of replication.')
        )

        self.add_property(
            descr='Current progress',
            name='progress',
            get=lambda o: '{0:.2f}'.format(round(q.get(o, 'current_state.progress'), 2)) + '%',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Current progress of replication.')
        )

        self.add_property(
            descr='Last speed',
            name='speed',
            get='current_state.speed',
            usersetable=False,
            createsetable=False,
            list=False,
            type=ValueType.STRING,
            condition=lambda o: q.get(o, 'current_state.status') == 'RUNNING',
            usage=_('Transfer speed of current replication run.')
        )

        self.primary_key = self.get_mapping('name')

        self.entity_commands = self.get_entity_commands
Exemple #6
0
 def serialize_filter(self, context, args, kwargs, opargs):
     return {"filter": [
         ('timestamp', '!=', None),
         ('timestamp', '>=', datetime.now() - parse_timedelta(args[0]))
     ]}