コード例 #1
0
    def run(self, id):
        peer = self.datastore.get_by_id('peers', id)
        if not peer:
            raise TaskException(errno.ENOENT,
                                'Peer entry {0} does not exist'.format(id))

        remote = q.get(peer, 'credentials.address')
        remote_client = None
        hostid = self.dispatcher.call_sync('system.info.host_uuid')
        try:
            try:
                remote_client = get_freenas_peer_client(self, remote)

                call_task_and_check_state(remote_client,
                                          'peer.freenas.delete_local', hostid,
                                          False)
            except RpcException as e:
                self.add_warning(
                    TaskWarning(
                        e.code,
                        'Remote {0} is unreachable. Delete operation is performed at local side only.'
                        .format(remote)))
            except ValueError as e:
                self.add_warning(TaskWarning(errno.EINVAL, str(e)))

            self.join_subtasks(
                self.run_subtask('peer.freenas.delete_local', id, True))

        finally:
            if remote_client:
                remote_client.disconnect()
コード例 #2
0
ファイル: TestPlugin.py プロジェクト: jceel/middleware
 def run(self):
     self.add_warning(TaskWarning(errno.EBUSY, 'Warning 1'))
     self.add_warning(TaskWarning(errno.ENXIO, 'Warning 2'))
     self.add_warning(
         TaskWarning(errno.EINVAL,
                     'Warning 3 with extra payload',
                     extra={'hello': 'world'}))
コード例 #3
0
ファイル: UserPlugin.py プロジェクト: surajrav/middleware
    def run(self, id, delete_params=None):
        subtasks = []
        try:
            user = self.datastore.get_by_id('users', id)
            if user is None:
                raise TaskException(
                    errno.ENOENT,
                    'User with UID {0} does not exist'.format(id))

            if (delete_params and delete_params.get('delete_home_directory')
                    and user['home'] not in (None, '/nonexistent')
                    and os.path.exists(user['home'])):
                homedir_dataset = self.dispatcher.call_sync(
                    'volume.dataset.query',
                    [('mountpoint', '=', user['home'])], {'single': True})
                if homedir_dataset:
                    subtasks.append(
                        self.run_subtask('volume.dataset.delete',
                                         homedir_dataset['id']))
            elif user['home'] not in (None, '/nonexistent') and os.path.exists(
                    user['home']):
                self.add_warning(
                    TaskWarning(
                        errno.EBUSY,
                        'Home directory {} left behind, you need to delete it separately'
                        .format(user['home'])))

            group = self.datastore.get_by_id('groups', user['group'])
            if group and user['uid'] == group['gid']:
                if delete_params and delete_params.get('delete_own_group'):
                    subtasks.append(
                        self.run_subtask('group.delete', user['group']))

                else:
                    self.add_warning(
                        TaskWarning(
                            errno.EBUSY,
                            'Group {0} ({1}) left behind, you need to delete it separately'
                            .format(group['name'], group['gid'])))

            self.join_subtasks(*subtasks)

            if user.get('smbhash'):
                try:
                    system('/usr/local/bin/pdbedit', '-x', user['username'])
                except SubprocessException as e:
                    pass

            self.datastore.delete('users', id)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'accounts')
        except DatastoreException as e:
            raise TaskException(errno.EBADMSG,
                                'Cannot delete user: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('user.changed', {
            'operation': 'delete',
            'ids': [id]
        })
コード例 #4
0
ファイル: NetworkPlugin.py プロジェクト: surajrav/middleware
    def run(self, name, updated_fields):
        if not self.datastore.exists('network.routes', ('id', '=', name)):
            raise TaskException(errno.ENOENT,
                                'Route {0} does not exist'.format(name))

        route = self.datastore.get_one('network.routes', ('id', '=', name))
        net = updated_fields[
            'network'] if 'network' in updated_fields else route['network']
        netmask = updated_fields[
            'netmask'] if 'netmask' in updated_fields else route['netmask']
        type = updated_fields['type'] if 'type' in updated_fields else route[
            'type']
        gateway = updated_fields[
            'gateway'] if 'gateway' in updated_fields else route['gateway']

        if type == 'INET':
            max_cidr = 32
        else:
            max_cidr = 128
        if not (0 <= netmask <= max_cidr):
            raise TaskException(
                errno.EINVAL,
                'Netmask value {0} is not valid. Allowed values are 0-{1} (CIDR).'
                .format(route['netmask'], max_cidr))

        try:
            ipaddress.ip_network(os.path.join(net, str(netmask)))
        except ValueError:
            raise TaskException(
                errno.EINVAL,
                '{0} would have host bits set. Change network or netmask to represent a valid network'
                .format(os.path.join(net, str(netmask))))

        network = ipaddress.ip_network(os.path.join(net, str(netmask)))
        if ipaddress.ip_address(gateway) in network:
            self.add_warning(
                TaskWarning(
                    errno.EINVAL,
                    'Gateway {0} is in the destination network {1}.'.format(
                        gateway, network.exploded)))

        route = self.datastore.get_one('network.routes', ('id', '=', name))
        route.update(updated_fields)
        self.datastore.update('network.routes', name, route)
        try:
            for code, message in self.dispatcher.call_sync(
                    'networkd.configuration.configure_routes'):
                self.add_warning(TaskWarning(code, message))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure routes: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('network.route.changed', {
            'operation': 'update',
            'ids': [route['id']]
        })
コード例 #5
0
ファイル: NetworkPlugin.py プロジェクト: surajrav/middleware
    def run(self, route):
        if self.datastore.exists('network.routes', ('id', '=', route['id'])):
            raise TaskException(errno.EEXIST,
                                'Route {0} exists'.format(route['id']))

        for r in self.dispatcher.call_sync('network.route.query'):
            if (r['network'] == route['network']) and \
               (r['netmask'] == route['netmask']) and \
               (r['gateway'] == route['gateway']):
                raise TaskException(
                    errno.EINVAL,
                    'Cannot create two identical routes differing only in name.'
                )

        if route['type'] == 'INET':
            max_cidr = 32
        else:
            max_cidr = 128
        if not (0 <= route['netmask'] <= max_cidr):
            raise TaskException(
                errno.EINVAL,
                'Netmask value {0} is not valid. Allowed values are 0-{1} (CIDR).'
                .format(route['netmask'], max_cidr))

        try:
            ipaddress.ip_network(
                os.path.join(route['network'], str(route['netmask'])))
        except ValueError:
            raise TaskException(
                errno.EINVAL,
                '{0} would have host bits set. Change network or netmask to represent a valid network'
                .format(os.path.join(route['network'], str(route['netmask']))))

        network = ipaddress.ip_network(
            os.path.join(route['network'], str(route['netmask'])))
        if ipaddress.ip_address(route['gateway']) in network:
            self.add_warning(
                TaskWarning(
                    errno.EINVAL,
                    'Gateway {0} is in the destination network {1}.'.format(
                        route['gateway'], network.exploded)))

        self.datastore.insert('network.routes', route)
        try:
            for code, message in self.dispatcher.call_sync(
                    'networkd.configuration.configure_routes'):
                self.add_warning(TaskWarning(code, message))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure routes: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('network.route.changed', {
            'operation': 'create',
            'ids': [route['id']]
        })
コード例 #6
0
ファイル: MigrationPlugin.py プロジェクト: lukegb/middleware
    def run(self):
        users = get_table('select * from account_bsdusers')
        groups = get_table('select * from account_bsdgroups')
        grp_membership = get_table('select * from account_bsdgroupmembership')

        # First lets create all the non buitlin groups in this system
        for g in filter(lambda x: x['bsdgrp_builtin'] == 0, groups.values()):
            try:
                self.run_subtask_sync(
                    'group.create',
                    {
                        'name': g['bsdgrp_group'],
                        'gid': g['bsdgrp_gid'],
                        'sudo': g['bsdgrp_sudo']
                    }
                )
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Could not create group: {0} due to error: {1}'.format(g['bsdgrp_group'], err)
                ))

        # Now lets first add the root user's properties (password, etc)
        fn10_groups = list(self.dispatcher.call_sync('group.query'))
        root_user = users.pop(1)
        fn10_root_user = self.dispatcher.call_sync(
            'user.query', [('uid', '=', 0)], {"single": True}
        )
        fn10_root_user = populate_user_obj(fn10_root_user, fn10_groups, root_user, groups, grp_membership)
        del fn10_root_user['builtin']
        del fn10_root_user['home']
        del fn10_root_user['uid']
        del fn10_root_user['username']
        del fn10_root_user['locked']
        self.run_subtask_sync('user.update', fn10_root_user['id'], fn10_root_user)

        # Now rest of the users can be looped upon
        for u in filter(lambda x: x['bsdusr_builtin'] == 0, users.values()):
            try:
                self.run_subtask_sync(
                    'user.create',
                    populate_user_obj(None, fn10_groups, u, groups, grp_membership)
                )
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Could not create user: {0} due to error: {1}'.format(u['bsdusr_username'], err)
                ))
コード例 #7
0
    def run(self, fd):
        try:
            with os.fdopen(fd.fd, 'wb') as f:
                with tarfile.open(fileobj=f, mode='w:gz', dereference=True) as tar:
                    plugins = self.dispatcher.call_sync('management.get_plugin_names')
                    total = len(plugins)
                    done = 0

                    # Iterate over plugins
                    for plugin in plugins:
                        self.set_progress(done / total * 100, 'Collecting debug info for {0}'.format(plugin))
                        try:
                            hooks = self.dispatcher.call_sync('management.collect_debug', plugin, timeout=600)
                        except RpcException as err:
                            self.add_warning(
                                TaskWarning(err.code, 'Cannot collect debug data for {0}: {1}'.format(plugin, err.message))
                            )
                            continue

                        for hook in hooks:
                            self.process_hook(hook, plugin, tar)

                        done += 1

        except BrokenPipeError as err:
            raise TaskException(errno.EPIPE, 'The download timed out') from err
コード例 #8
0
ファイル: NetworkPlugin.py プロジェクト: surajrav/middleware
    def run(self, settings):
        node = ConfigNode('network', self.configstore)
        node.update(settings)

        if node['dhcp.assign_gateway']:
            # Clear out gateway settings
            node['gateway.ipv4'] = None

        if node['dhcp.assign_dns']:
            # Clear out DNS settings
            node['dns.addresses'] = []
            node['dns.search'] = []

        configure_proxy(self.dispatcher, node['http_proxy'].value)

        try:
            for code, message in self.dispatcher.call_sync(
                    'networkd.configuration.configure_network', timeout=60):
                self.add_warning(TaskWarning(code, message))

            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'network')
        except RpcException as e:
            raise TaskException(
                errno.ENXIO,
                'Cannot reconfigure interface: {0}'.format(str(e)))
コード例 #9
0
    def run(self, share):
        normalize(
            share['properties'], {
                'alldirs': False,
                'read_only': False,
                'maproot_user': None,
                'maproot_group': None,
                'mapall_user': None,
                'mapall_group': None,
                'hosts': [],
                'security': []
            })

        if share['properties']['security'] and not self.dispatcher.call_sync(
                'service.query', [('name', '=', 'nfs')],
            {'single': True})['config']['v4']:
            self.add_warning(
                TaskWarning(
                    errno.ENXIO,
                    "NFS security option requires NFSv4 support to be enabled in NFS service settings."
                ))

        id = self.datastore.insert('shares', share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs')
        self.dispatcher.call_sync('service.reload', 'nfs', timeout=60)
        return id
コード例 #10
0
    def run(self, id, updated_fields):
        share = self.datastore.get_by_id('shares', id)
        share.update(updated_fields)

        if share['target_type'] != 'DATASET' and q.get(share,
                                                       'properties.alldirs'):
            raise TaskException(
                errno.EINVAL, 'alldirs can be only used with dataset shares')

        if share['properties']['security'] and not self.dispatcher.call_sync(
                'service.query', [('name', '=', 'nfs')],
            {'single': True})['config']['v4']:
            self.add_warning(
                TaskWarning(
                    errno.ENXIO,
                    "NFS security option requires NFSv4 support to be enabled in NFS service settings."
                ))

        self.datastore.update('shares', id, share)
        self.dispatcher.call_sync('etcd.generation.generate_group', 'nfs')
        self.dispatcher.call_sync('service.reload', 'nfs', timeout=60)
        self.dispatcher.dispatch_event('share.nfs.changed', {
            'operation': 'update',
            'ids': [id]
        })
コード例 #11
0
ファイル: MigrationPlugin.py プロジェクト: lukegb/middleware
    def run(self):
        # dict containing service enable flags for all services
        fn9_services = {
            srv['srv_service']: srv for srv in get_table('select * from services_services').values()
        }

        fn10_services = list(self.dispatcher.call_sync('service.query'))

        # Migrating AFP service
        fn9_afp = get_table('select * from services_afp', dictionary=False)[0]
        try:
            self.run_subtask_sync(
                'service.update',
                q.query(fn10_services, ("name", "=", "afp"), single=True)['id'],
                {'config': {
                    'enable': bool(fn9_services['afp']['srv_enable']),
                    'guest_enable': bool(fn9_afp['afp_srv_guest']),
                    'guest_user': fn9_afp['afp_srv_guest_user'],
                    'bind_addresses': [
                        i.strip() for i in fn9_afp['afp_srv_bindip'].split(',')
                        if (i and not i.isspace())
                    ] or None,
                    'connections_limit': fn9_afp['afp_srv_connections_limit'],
                    'homedir_enable': True if fn9_afp['afp_srv_homedir_enable'] in ('True', 1) else False,
                    'homedir_path': fn9_afp['afp_srv_homedir'] or None,
                    'homedir_name': fn9_afp['afp_srv_homename'] or None,
                    'dbpath': fn9_afp['afp_srv_dbpath'] or None,
                    'auxiliary': fn9_afp['afp_srv_global_aux'] or None
                }}
            )
        except RpcException as err:
            self.add_warning(TaskWarning(
                errno.EINVAL, 'Could not update AFP service settings due to err: {0}'.format(err)
            ))
コード例 #12
0
    def run(self, props):
        if 'hostname' in props:
            netif.set_hostname(props['hostname'])

        if 'description' in props:
            self.configstore.set('system.description', props['description'])

        if 'tags' in props:
            self.configstore.set('system.tags', props['tags'])

        if 'language' in props:
            self.configstore.set('system.language', props['language'])

        if 'timezone' in props:
            new = props['timezone']
            old = self.configstore.get('system.timezone')
            if new != old:
                count = self.run_subtask_sync('calendar_task.change_timezone',
                                              new)
                self.add_warning(
                    TaskWarning(
                        errno.ENXIO,
                        "{0} calendar tasks rescheduled from timezone '{1}' to '{2}'"
                        .format(count, old, new)))
            self.configstore.set('system.timezone', new)
            os.environ['TZ'] = new

        if 'console_keymap' in props:
            new = props['console_keymap']
            old = self.configstore.get('system.console.keymap')
            if new != old:
                with open('/dev/console') as fd:
                    system('/usr/sbin/kbdcontrol',
                           '-l',
                           props['console_keymap'],
                           file_obj_stdin=fd)
                self.configstore.set('system.console.keymap', new)

        syslog_changed = False
        if 'syslog_server' in props:
            self.configstore.set('system.syslog_server',
                                 props['syslog_server'])
            syslog_changed = True

        try:
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'localtime')
            if syslog_changed:
                self.dispatcher.call_sync('serviced.job.send_signal',
                                          'org.freenas.logd', signal.SIGHUP)
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure system: {0}'.format(str(e), ))

        self.dispatcher.dispatch_event('system.general.changed', {
            'operation': 'update',
        })
コード例 #13
0
ファイル: ShareAFPPlugin.py プロジェクト: jceel/middleware
def kill_connections(dispatcher, predicate):
    for c in dispatcher.call_sync('share.afp.get_connected_clients'):
        if predicate(c):
            pid = c['extra']['pid']
            cnid_dbd_pid = c['extra']['cnid_dbd_pid']
            try:
                os.kill(pid, signal.SIGTERM)
                if cnid_dbd_pid:
                    os.kill(cnid_dbd_pid, signal.SIGTERM)
            except OSError as err:
                yield TaskWarning(err.errno, 'Cannot kill PID {0}: {1}'.format(pid, str(err)))
コード例 #14
0
ファイル: DebugPlugin.py プロジェクト: surajrav/middleware
    def run(self, fd, logs=True, cores=False):
        try:
            with os.fdopen(fd.fd, 'wb') as f:
                with tarfile.open(fileobj=f, mode='w:gz', dereference=True) as tar:
                    plugins = self.dispatcher.call_sync('management.get_plugin_names')
                    total = len(plugins)
                    done = 0

                    # Iterate over plugins
                    for plugin in plugins:
                        self.set_progress(done / total * 80, 'Collecting debug info for {0}'.format(plugin))
                        try:
                            hooks = self.dispatcher.call_sync('management.collect_debug', plugin, timeout=600)
                        except RpcException as err:
                            self.add_warning(
                                TaskWarning(err.code, 'Cannot collect debug data for {0}: {1}'.format(plugin, err.message))
                            )
                            continue

                        for hook in hooks:
                            self.process_hook(hook, plugin, tar)

                        done += 1

                    if logs:
                        hook = {
                            'type': 'AttachCommandOutput',
                            'name': 'system-log',
                            'command': ['/usr/local/sbin/logctl', '--last', '3d', '--dump'],
                            'shell': False,
                            'decode': False
                        }

                        self.set_progress(90, 'Collecting logs')
                        self.process_hook(hook, 'Logs', tar)

                    if cores:
                        hook = {
                            'type': 'AttachDirectory',
                            'name': 'cores',
                            'path': '/var/db/system/cores',
                            'recursive': True
                        }

                        self.set_progress(95, 'Collecting core files')
                        self.process_hook(hook, 'UserCores', tar)

        except BrokenPipeError as err:
            raise TaskException(errno.EPIPE, 'The download timed out') from err
コード例 #15
0
    def run(self, id):
        if not self.datastore.exists('network.routes', ('id', '=', id)):
            raise TaskException(errno.ENOENT, 'route {0} does not exist'.format(id))

        self.datastore.delete('network.routes', id)
        try:
            for code, message in self.dispatcher.call_sync('networkd.configuration.configure_routes'):
                self.add_warning(TaskWarning(code, message))
        except RpcException as e:
            raise TaskException(errno.ENXIO, 'Cannot reconfigure routes: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('network.route.changed', {
            'operation': 'delete',
            'ids': [id]
        })
コード例 #16
0
    def run(self, fd, include_cores=False):
        try:
            with os.fdopen(fd.fd, 'wb') as f:
                with tarfile.open(fileobj=f, mode='w:gz',
                                  dereference=True) as tar:
                    plugins = self.dispatcher.call_sync(
                        'management.get_plugin_names')
                    total = len(plugins)
                    done = 0

                    # Iterate over plugins
                    for plugin in plugins:
                        self.set_progress(
                            done / total * 100,
                            'Collecting debug info for {0}'.format(plugin))
                        try:
                            hooks = self.dispatcher.call_sync(
                                'management.collect_debug',
                                plugin,
                                timeout=600)
                        except RpcException as err:
                            self.add_warning(
                                TaskWarning(
                                    err.code,
                                    'Cannot collect debug data for {0}: {1}'.
                                    format(plugin, err.message)))
                            continue

                        for hook in hooks:
                            self.process_hook(hook, plugin, tar)

                        done += 1

                    # If include_cores was set, attach stuff from /var/db/system/cores
                    if include_cores:
                        hook = {
                            'type': 'AttachDirectory',
                            'name': 'cores',
                            'path': '/var/db/system/cores',
                            'recursive': True
                        }

                        self.process_hook(hook, 'UserCores', tar)

        except BrokenPipeError as err:
            raise TaskException(errno.EPIPE, 'The download timed out') from err
コード例 #17
0
    def run(self, id):
        iface = self.datastore.get_by_id('network.interfaces', id)
        if not iface:
            raise TaskException(errno.ENOENT, 'Interface {0} does not exist'.format(id))

        if not iface['enabled']:
            raise TaskException(errno.ENXIO, 'Interface {0} is disabled'.format(id))

        try:
            for code, message in self.dispatcher.call_sync('networkd.configuration.up_interface', id):
                self.add_warning(TaskWarning(code, message))
        except RpcException as err:
            raise TaskException(errno.ENXIO, 'Cannot reconfigure interface: {0}'.format(str(err)))

        self.dispatcher.dispatch_event('network.interface.changed', {
            'operation': 'update',
            'ids': [id]
        })
コード例 #18
0
    def run(self, id):
        iface = self.datastore.get_by_id('network.interfaces', id)
        if not iface:
            raise TaskException(errno.ENOENT, 'Interface {0} does not exist'.format(id))

        if iface['type'] not in ('VLAN', 'LAGG', 'BRIDGE'):
            raise TaskException(errno.EBUSY, 'Cannot delete physical interface')

        self.datastore.delete('network.interfaces', id)
        try:
            for code, message in self.dispatcher.call_sync('networkd.configuration.configure_network', timeout=60):
                self.add_warning(TaskWarning(code, message))
        except RpcException as e:
            raise TaskException(errno.ENXIO, 'Cannot reconfigure network: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('network.interface.changed', {
            'operation': 'delete',
            'ids': [id]
        })
コード例 #19
0
    def process_hook(self, cmd, plugin, tar):
        if cmd['type'] == 'AttachData':
            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(cmd['data'])
            tar.addfile(
                info,
                io.BytesIO(
                    cmd['data'] if isinstance(cmd['data'], bytes) else cmd['data'].encode('utf-8')
                )
            )

        if cmd['type'] == 'AttachCommandOutput':
            try:
                out, err = system(*cmd['command'], shell=cmd['shell'])
                content = out + '\n' + err + '\n'
            except SubprocessException as err:
                content = 'Exit code: {0}\nstdout:\n{1}stderr:\n{2}'.format(
                    err.returncode, err.out, err.err
                )

            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(content)
            tar.addfile(
                info,
                io.BytesIO(content if isinstance(content, bytes) else content.encode('utf-8'))
            )

        if cmd['type'] in ('AttachDirectory', 'AttachFile'):
            try:
                tar.add(
                    cmd['path'],
                    arcname=os.path.join(plugin, cmd['name']),
                    recursive=cmd.get('recursive')
                )
            except OSError as err:
                self.add_warning(TaskWarning(
                    err.errno,
                    '{0}: Cannot file {1}, Error: {2}'.format(plugin, cmd['path'], err.strerror)
                ))
                logger.error(
                    "Error occured when adding {0} to the tarfile for plugin: {1}".format(cmd['path'], plugin),
                    exc_info=True
                )
コード例 #20
0
ファイル: UpdatePlugin.py プロジェクト: shangteus/middleware
    def run(self, reboot_post_install=False):
        self.set_progress(0, 'Checking for new updates')
        self.run_subtask_sync(
            'update.checkfetch',
            progress_callback=lambda p, m='Checking for new updates', e=None:
            self.chunk_progress(0, 50, '', p, m, e))
        if self.dispatcher.call_sync('update.is_update_available'):
            self.set_progress(50, 'Installing downloaded updates now')
            self.run_subtask_sync('update.apply',
                                  reboot_post_install,
                                  progress_callback=lambda p, m=
                                  'Installing downloaded updates now', e=None:
                                  self.chunk_progress(50, 100, '', p, m, e))
            self.set_progress(100, 'Updates Installed successfully')
            result = True
        else:
            self.add_warning(
                TaskWarning(errno.ENOENT, 'No Updates Available for Install'))
            self.set_progress(100, 'No Updates Available for Install')
            result = False

        return result
コード例 #21
0
ファイル: MigrationPlugin.py プロジェクト: lukegb/middleware
    def run(self):

        # Migrate disk settings
        disk_subtasks = []
        fn9_disks = get_table('select * from storage_disk', dictionary=False)
        fn10_disks = self.dispatcher.call_sync('disk.query')

        for fn9_disk in fn9_disks:
            dev = self.identifier_to_device(fn9_disk['disk_identifier'])
            if not dev:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Identifier to device failed for {0}, skipping'.format(
                        fn9_disk['disk_identifier']
                    )
                ))
                continue
            newident = self.device_to_identifier(dev, serial=(fn9_disk['disk_serial'] or None))
            if not newident:
                self.add_warning(TaskWarning('Failed to convert {0} to id, skipping'.format(dev)))
                continue

            fn10_disk = None
            fn10_disk = q.query(fn10_disks, ('id', '=', newident), single=True)

            if fn10_disk is None:
                self.add_warning(TaskWarning(
                    'Failed to lookup id: {0} for fn9 disk id: {1}, skipping'.format(
                        newident, dev
                    )
                ))
                continue

            del fn10_disk['name']
            del fn10_disk['serial']
            del fn10_disk['path']
            del fn10_disk['mediasize']
            del fn10_disk['status']
            fn10_disk.update({
                'smart': fn9_disk['disk_togglesmart'],
                'smart_options': fn9_disk['disk_smartoptions'],
                'standby_mode': None if fn9_disk['disk_hddstandby'] == 'Always On' else int(fn9_disk['disk_hddstandby']),
                'acoustic_level': fn9_disk['disk_acousticlevel'].upper(),
                'apm_mode': None if fn9_disk['disk_advpowermgmt'] == 'Disabled' else int(fn9_disk['disk_advpowermgmt'])
            })
            disk_subtasks.append(self.run_subtask('disk.update', fn10_disk.pop('id'), fn10_disk))

        if disk_subtasks:
            self.join_subtasks(*disk_subtasks)

        # Importing fn9 volumes
        fn9_volumes = get_table('select * from storage_volume')
        for fn9_volume in fn9_volumes.values():
            try:
                self.run_subtask_sync(
                    'volume.import', fn9_volume['vol_guid'], fn9_volume['vol_name']
                )
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Cannot import volume name: {0} GUID: {1} due to error: {2}'.format(
                        fn9_volume['vol_name'], fn9_volume['vol_guid'], err
                    )
                ))
コード例 #22
0
    def run(self, id, updated_fields, enable_service=False):
        share = self.datastore.get_by_id('shares', id)
        if not share:
            raise TaskException(errno.ENOENT, 'Share not found')

        if share['immutable']:
            raise TaskException(errno.EACCES, 'Cannot modify immutable share {0}.'.format(id))

        if 'name' in updated_fields or 'type' in updated_fields:
            share.update(updated_fields)
            if self.datastore.exists(
                'shares',
                ('id', '!=', id),
                ('type', '=', share['type']),
                ('name', '=', share['name'])
            ):
                raise TaskException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(
                    share['name'],
                    share['type']
                ))

        path_after_update = updated_fields.get('target_path', share['target_path'])
        type_after_update = updated_fields.get('target_type', share['target_type'])
        permissions = updated_fields.pop('permissions', None)
        share_path = self.dispatcher.call_sync('share.expand_path', path_after_update, type_after_update)

        if type_after_update in ('DIRECTORY', 'FILE'):
            pool_mountpoints = tuple(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))
            if not path_after_update.startswith(pool_mountpoints):
                raise TaskException(errno.EINVAL, "Provided directory or file has to reside within user defined ZFS pool")

        if not os.path.exists(share_path):
            raise TaskException(
                errno.ENOENT,
                'Selected share target {0} does not exist'.format(path_after_update)
            )

        share = self.datastore.get_by_id('shares', id)
        remove_unchanged(updated_fields, share)

        path = self.dispatcher.call_sync('share.get_directory_path', share['id'])
        try:
            delete_config(
                path,
                '{0}-{1}'.format(share['type'], share['name'])
            )
        except (OSError, ValueError):
            pass

        if 'type' in updated_fields:
            old_share_type = share['type']
            new_share_type = self.dispatcher.call_sync('share.supported_types').get(updated_fields['type'])
            if share['target_type'] == 'DATASET':
                pool, dataset = split_dataset(share['target_path'])
                self.join_subtasks(
                    self.run_subtask('volume.dataset.update', dataset, {
                        'permissions_type': new_share_type['perm_type']
                    })
                )

            share.update(updated_fields)
            self.run_subtask_sync('share.{0}.delete'.format(old_share_type), id)
            self.run_subtask_sync('share.{0}.create'.format(updated_fields['type']), share)
        else:
            self.run_subtask_sync('share.{0}.update'.format(share['type']), id, updated_fields)

        if permissions:
            path = self.dispatcher.call_sync('share.translate_path', id)
            self.run_subtask_sync('file.set_permissions', path, permissions)

        self.dispatcher.dispatch_event('share.changed', {
            'operation': 'update',
            'ids': [share['id']]
        })

        updated_share = self.datastore.get_by_id('shares', id)
        path = self.dispatcher.call_sync('share.get_directory_path', updated_share['id'])
        try:
            save_config(
                path,
                '{0}-{1}'.format(updated_share['type'], updated_share['name']),
                updated_share,
                file_perms=0o600
            )
        except OSError as err:
            self.add_warning(TaskWarning(errno.ENXIO, 'Cannot save backup config file: {0}'.format(str(err))))

        service_state = self.dispatcher.call_sync('service.query', [('name', '=', share['type'])], {'single': True})
        if service_state['state'] != 'RUNNING':
            if enable_service:
                config = service_state['config']
                config['enable'] = True
                self.run_subtask_sync('service.update', service_state['id'], {'config': config})
            else:
                self.add_warning(TaskWarning(
                    errno.ENXIO, "Share has been updated but the service {0} is not currently running "
                                 "Please enable the {0} service.".format(share['type'])
                ))
コード例 #23
0
    def run(self, share, dataset_properties=None, enable_service=False):
        if share['target_type'] == 'ZVOL':
            parent_ds = '/'.join(share['target_path'].split('/')[:-1])
            shareable = bool(self.dispatcher.call_sync('volume.dataset.query', [('name', '=', parent_ds)]))
        else:
            share_path = self.dispatcher.call_sync('share.expand_path', share['target_path'], share['target_type'])
            if share['target_type'] != 'FILE':
                share_path = os.path.dirname(share_path)
            shareable = os.path.exists(share_path)

        if not shareable:
            raise TaskException(errno.ENOENT, 'Selected share target {0} does not exist or cannot be created'.format(
                share['target_path']
            ))

        root = self.dispatcher.call_sync('volume.get_volumes_root')
        share_type = self.dispatcher.call_sync('share.supported_types').get(share['type'])
        pool_mountpoints = tuple(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))

        assert share_type['subtype'] in ('FILE', 'BLOCK'),\
            "Unsupported Share subtype: {0}".format(share_type['subtype'])

        if self.datastore.exists(
            'shares',
            ('type', '=', share['type']),
            ('name', '=', share['name'])
        ):
            raise TaskException(errno.EEXIST, 'Share {0} of type {1} already exists'.format(
                share['name'],
                share['type']
            ))

        normalize(share, {
            'enabled': True,
            'immutable': False,
            'description': ''
        })

        if share['target_type'] in ('DATASET', 'ZVOL'):
            dataset = share['target_path']
            pool = share['target_path'].split('/')[0]
            path = os.path.join(root, dataset)

            if not self.dispatcher.call_sync('zfs.dataset.query', [('name', '=', dataset)], {'single': True}):
                if share_type['subtype'] == 'FILE':
                    self.run_subtask_sync('volume.dataset.create', {
                        'volume': pool,
                        'id': dataset,
                        'permissions_type': share_type['perm_type'],
                        'properties': dataset_properties or {}
                    })

                if share_type['subtype'] == 'BLOCK':
                    self.run_subtask_sync('volume.dataset.create', {
                        'volume': pool,
                        'id': dataset,
                        'type': 'VOLUME',
                        'volsize': share['properties']['size'],
                        'properties': dataset_properties or {}
                    })
            else:
                if share_type['subtype'] == 'FILE':
                    self.run_subtask('volume.dataset.update', dataset, {
                        'permissions_type': share_type['perm_type']
                    })

        elif share['target_type'] == 'DIRECTORY':
            if not share['target_path'].startswith(pool_mountpoints):
                raise TaskException(errno.EINVAL, "Provided directory has to reside within user defined ZFS pool")

            # Verify that target directory exists
            path = share['target_path']
            if not os.path.isdir(path):
                raise TaskException(errno.ENOENT, "Target directory {0} doesn't exist".format(path))

        elif share['target_type'] == 'FILE':
            if not share['target_path'].startswith(pool_mountpoints):
                raise TaskException(errno.EINVAL, "Provided file has to reside within user defined ZFS pool")
            # Verify that target file exists
            path = share['target_path']
            if not os.path.isfile(path):
                raise TaskException(errno.ENOENT, "Target file {0} doesn't exist".format(path))

        else:
            raise AssertionError('Invalid target type')

        if share.get('permissions') and share['target_type'] not in ('ZVOL', 'FILE'):
            self.run_subtask_sync('file.set_permissions', path, share.pop('permissions'))

        id = self.run_subtask_sync('share.{0}.create'.format(share['type']), share)
        self.dispatcher.dispatch_event('share.changed', {
            'operation': 'create',
            'ids': [id]
        })

        new_share = self.datastore.get_by_id('shares', id)
        path = self.dispatcher.call_sync('share.get_directory_path', new_share['id'])
        try:
            save_config(
                path,
                '{0}-{1}'.format(new_share['type'], new_share['name']),
                new_share,
                file_perms=0o600
            )
        except OSError as err:
            self.add_warning(TaskWarning(errno.ENXIO, 'Cannot save backup config file: {0}'.format(str(err))))

        service_state = self.dispatcher.call_sync('service.query', [('name', '=', share['type'])], {'single': True})
        if service_state['state'] != 'RUNNING':
            if enable_service:
                config = service_state['config']
                config['enable'] = True
                self.run_subtask_sync('service.update', service_state['id'], {'config': config})
            else:
                self.add_warning(TaskWarning(
                    errno.ENXIO, "Share has been created but the service {0} is not currently running "
                                 "Please enable the {0} service.".format(share['type'])
                ))

        return id
コード例 #24
0
ファイル: NetworkPlugin.py プロジェクト: surajrav/middleware
    def run(self, id, updated_fields):
        if not self.datastore.exists('network.interfaces', ('id', '=', id)):
            raise TaskException(errno.ENOENT,
                                'Interface {0} does not exist'.format(id))

        entity = self.datastore.get_by_id('network.interfaces', id)

        if updated_fields.get('mtu') and entity['type'] == 'LAGG':
            raise TaskException(
                errno.EINVAL,
                'MTU cannot be configured for lagg interfaces - MTU of first member port is used'
            )

        if updated_fields.get('dhcp'):
            # Check for DHCP inconsistencies
            # 1. Check whether DHCP is enabled on other interfaces
            # 2. Check whether DHCP configures default route and/or DNS server addresses
            dhcp_used = self.datastore.exists('network.interfaces',
                                              ('dhcp', '=', True),
                                              ('id', '!=', id))
            dhcp_gateway = self.configstore.get('network.dhcp.assign_gateway')
            dhcp_dns = self.configstore.get('network.dhcp.assign_dns')

            if dhcp_used and (dhcp_gateway or dhcp_dns):
                raise TaskException(
                    errno.ENXIO,
                    'DHCP gateway or DNS assignment is already enabled on another interface'
                )

            if dhcp_gateway:
                self.configstore.set('network.gateway.ipv4', None)

            if dhcp_dns:
                self.configstore.set('network.dns.search', [])
                self.configstore.set('network.dns.addresses', [])

            # Clear all aliases
            entity['aliases'] = []

        if updated_fields.get('aliases'):
            # Forbid setting any aliases on interface with DHCP
            if (updated_fields.get('dhcp') or entity['dhcp']
                ) and updated_fields.get('dhcp') is not False and len(
                    updated_fields['aliases']) > 0:
                raise TaskException(errno.EINVAL,
                                    'Cannot set aliases when using DHCP')

            # Check for aliases inconsistencies
            ips = [x['address'] for x in updated_fields['aliases']]
            if any(ips.count(x) > 1 for x in ips):
                raise TaskException(errno.ENXIO, 'Duplicated IP alias')

            # Add missing broadcast addresses and address family
            for i in updated_fields['aliases']:
                normalize(i, {'type': 'INET'})

                if not i.get('broadcast') and i['type'] == 'INET':
                    i['broadcast'] = str(
                        calculate_broadcast(i['address'], i['netmask']))

        if updated_fields.get('vlan'):
            vlan = updated_fields['vlan']
            if (not vlan['parent'] and vlan['tag']) or (vlan['parent']
                                                        and not vlan['tag']):
                raise TaskException(
                    errno.EINVAL,
                    'Can only set VLAN parent interface and tag at the same time'
                )

        if updated_fields.get('lagg'):
            lagg = updated_fields['lagg']
            for i in lagg['ports']:
                member = self.datastore.get_by_id('network.interfaces', i)
                if not member:
                    raise TaskException(
                        errno.EINVAL,
                        'Lagg member interface {0} doesn\'t exist'.format(i))

                if member['type'] in ('LAGG', 'VLAN'):
                    raise TaskException(
                        errno.EINVAL,
                        'VLAN and LAGG interfaces cannot be members of a LAGG')

        entity.update(updated_fields)
        self.datastore.update('network.interfaces', id, entity)

        try:
            for code, message in self.dispatcher.call_sync(
                    'networkd.configuration.configure_network'):
                self.add_warning(TaskWarning(code, message))
        except RpcException as err:
            raise TaskException(
                err.code,
                'Cannot reconfigure interface: {0}'.format(err.message))

        self.dispatcher.dispatch_event('network.interface.changed', {
            'operation': 'update',
            'ids': [id]
        })
コード例 #25
0
ファイル: NetworkPlugin.py プロジェクト: surajrav/middleware
    def run(self, iface):
        type = iface['type']
        name = self.dispatcher.call_sync(
            'networkd.configuration.get_next_name', type)
        normalize(
            iface, {
                'id': name,
                'name': None,
                'type': type,
                'cloned': True,
                'enabled': True,
                'dhcp': False,
                'rtadv': False,
                'noipv6': False,
                'mtu': None,
                'media': None,
                'mediaopts': [],
                'aliases': [],
                'capabilities': {
                    'add': [],
                    'del': []
                }
            })

        if type == 'VLAN':
            iface.setdefault('vlan', {})
            normalize(iface['vlan'], {'parent': None, 'tag': None})

        if type == 'LAGG':
            iface.setdefault('lagg', {})
            normalize(iface['lagg'], {'protocol': 'FAILOVER', 'ports': []})

        if type == 'BRIDGE':
            iface.setdefault('bridge', {})
            normalize(iface['bridge'], {'members': []})

        if iface['mtu'] and iface['type'] == 'LAGG':
            raise TaskException(
                errno.EINVAL,
                'MTU cannot be configured for lagg interfaces - MTU of first member port is used'
            )

        if iface['dhcp']:
            # Check for DHCP inconsistencies
            # 1. Check whether DHCP is enabled on other interfaces
            # 2. Check whether DHCP configures default route and/or DNS server addresses
            dhcp_used = self.datastore.exists('network.interfaces',
                                              ('dhcp', '=', True),
                                              ('id', '!=', iface['id']))
            dhcp_gateway = self.configstore.get('network.dhcp.assign_gateway')
            dhcp_dns = self.configstore.get('network.dhcp.assign_dns')

            if dhcp_used and (dhcp_gateway or dhcp_dns):
                raise TaskException(
                    errno.ENXIO,
                    'DHCP gateway or DNS assignment is already enabled on another interface'
                )

            if dhcp_gateway:
                self.configstore.set('network.gateway.ipv4', None)

            if dhcp_dns:
                self.configstore.set('network.dns.search', [])
                self.configstore.set('network.dns.addresses', [])

        if iface['aliases']:
            # Forbid setting any aliases on interface with DHCP
            if iface['dhcp'] and len(iface['aliases']) > 0:
                raise TaskException(errno.EINVAL,
                                    'Cannot set aliases when using DHCP')

            # Check for aliases inconsistencies
            ips = [x['address'] for x in iface['aliases']]
            if any(ips.count(x) > 1 for x in ips):
                raise TaskException(errno.ENXIO, 'Duplicated IP alias')

            # Add missing broadcast addresses and address family
            for i in iface['aliases']:
                normalize(i, {'type': 'INET'})

                if not i.get('broadcast') and i['type'] == 'INET':
                    i['broadcast'] = str(
                        calculate_broadcast(i['address'], i['netmask']))

        if iface.get('vlan'):
            vlan = iface['vlan']
            if (not vlan['parent'] and vlan['tag']) or (vlan['parent']
                                                        and not vlan['tag']):
                raise TaskException(
                    errno.EINVAL,
                    'Can only set VLAN parent interface and tag at the same time'
                )

        if iface.get('lagg'):
            lagg = iface['lagg']
            for i in lagg['ports']:
                member = self.datastore.get_by_id('network.interfaces', i)
                if not member:
                    raise TaskException(
                        errno.EINVAL,
                        'Lagg member interface {0} doesn\'t exist'.format(i))

                if member['type'] in ('LAGG', 'VLAN'):
                    raise TaskException(
                        errno.EINVAL,
                        'VLAN and LAGG interfaces cannot be members of a LAGG')

        self.datastore.insert('network.interfaces', iface)

        try:
            for code, message in self.dispatcher.call_sync(
                    'networkd.configuration.configure_network', timeout=60):
                self.add_warning(TaskWarning(code, message))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure network: {0}'.format(str(e)))

        self.dispatcher.dispatch_event('network.interface.changed', {
            'operation': 'create',
            'ids': [name]
        })

        return name
コード例 #26
0
    def run(self, path, permissions, recursive=False):
        if not os.path.exists(path):
            raise TaskException(errno.ENOENT,
                                'Path {0} does not exist'.format(path))

        if recursive and not os.path.isdir(path):
            raise TaskException(
                errno.EINVAL,
                'Recursive specified, but {0} is not directory'.format(path))

        if permissions.get('user') or permissions.get('group'):
            user = permissions.get('user')
            group = permissions.get('group')
            uid = gid = -1

            if user:
                try:
                    user = self.dispatcher.call_sync(
                        'dscached.account.getpwnam', user)
                    uid = user['uid']
                except RpcException:
                    raise TaskException(errno.ENOENT,
                                        'User {0} not found'.format(user))

            if group:
                try:
                    group = self.dispatcher.call_sync(
                        'dscached.group.getgrnam', group)
                    gid = group['gid']
                except KeyError:
                    raise TaskException(errno.ENOENT,
                                        'Group {0} not found'.format(group))

            bsd.lchown(path, uid, gid, recursive)

        ds = None
        chmod_safe = True

        try:
            poolname, dsname, rest = self.dispatcher.call_sync(
                'volume.decode_path', path)
            ds = self.dispatcher.call_sync('volume.dataset.query',
                                           [('id', '=', dsname)],
                                           {'single': True})
            chmod_safe = ds['permissions_type'] == 'PERM'
        except RpcException:
            pass

        if permissions.get('modes'):
            modes = permissions['modes']
            if modes.get('value'):
                modes = int(modes['value'])
            else:
                modes = modes_to_oct(modes)

            try:
                bsd.lchmod(path, modes, recursive)
            except OSError as err:
                if err.errno == errno.EPERM:
                    if chmod_safe:
                        self.add_warning(
                            TaskWarning(
                                err.errno,
                                'chmod() failed: {0}'.format(err.strerror)))
                else:
                    raise TaskException(
                        err.errno, 'chmod() failed: {0}'.format(err.strerror))

        if permissions.get('acl'):
            a = acl.ACL()
            a.__setstate__(permissions['acl'])
            a.apply(path)
            if not recursive:
                return

            # Build second ACL, but with inherits removed. It will be applied on files
            b = acl.ACL()
            b.__setstate__(permissions['acl'])
            for i in b.entries:
                i.flags[acl.NFS4Flag.DIRECTORY_INHERIT] = False
                i.flags[acl.NFS4Flag.FILE_INHERIT] = False

            for root, dirs, files in os.walk(path):
                for n in files:
                    try:
                        b.apply(file=os.path.join(root, n))
                    except OSError:
                        pass

                for n in dirs:
                    try:
                        a.apply(file=os.path.join(root, n))
                    except OSError:
                        pass

        if ds:
            self.dispatcher.dispatch_event('zfs.dataset.changed', {
                'operation': 'update',
                'ids': [ds['id']]
            })

        self.dispatcher.dispatch_event('file.permissions.changed', {
            'path': path,
            'recursive': recursive,
            'permissions': permissions
        })
コード例 #27
0
ファイル: MigrationPlugin.py プロジェクト: lukegb/middleware
    def run(self):
        # Lets first get all the fn9 network config data we need
        fn9_globalconf = get_table('select * from network_globalconfiguration', dictionary=False)[0]
        fn9_interfaces = get_table('select * from network_interfaces')
        fn9_aliases = get_table('select * from network_alias')
        fn9_lagg_interfaces = get_table('select * from network_lagginterface')
        fn9_lagg_membership = get_table('select * from network_lagginterfacemembers')
        fn9_static_routes = get_table('select * from network_staticroute')
        fn9_vlan = get_table('select * from network_vlan')

        # Now get the fn10 data on netowrk config and interfaces (needed to update interfaces, etc)
        fn10_interfaces = list(self.dispatcher.call_sync('network.interface.query'))

        # Now start with the conversion logic

        # Migrating regular network interfaces
        for fn9_iface in fn9_interfaces.values():
            fn10_iface = q.query(
                fn10_interfaces, ('id', '=', fn9_iface['int_interface']), single=True
            )
            if fn10_iface:
                del fn10_iface['status']
                del fn10_iface['type']
            else:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Skipping FreeNAS 9.x netwrok interface: {0} as it is not found'.format(
                        fn9_iface['int_interface']
                    )
                ))
                continue
            aliases = []
            if fn9_iface['int_ipv4address']:
                aliases.append({
                    'type': 'INET',
                    'address': fn9_iface['int_ipv4address'],
                    'netmask': int(fn9_iface['int_v4netmaskbit'])
                })
            if fn9_iface['int_ipv6address']:
                aliases.append({
                    'type': 'INET6',
                    'address': fn9_iface['int_ipv6address'],
                    'netmask': int(fn9_iface['int_v6netmaskbit'])
                })
            # # TODO: Fix below code to work
            # for alias in fn9_aliases.values():
            #     if alias['alias_v4address']:
            #         aliases.append({
            #             'type': 'INET',
            #             'address': alias['alias_v4address'],
            #             'netmask': int(alias['alias_v4netmaskbit'])
            #         })

            #     if alias.alias_v6address:
            #         aliases.append({
            #             'type': 'INET6',
            #             'address': alias['alias_v6address'],
            #             'netmask': int(alias['alias_v6netmaskbit'])
            #         })

            fn10_iface.update({
                'name': fn9_iface['int_name'],
                'dhcp': fn9_iface['int_dhcp'],
                'aliases': aliases
            })
            m = re.search(r'mtu (\d+)', fn9_iface['int_options'])
            if m:
                fn10_iface['mtu'] = int(m.group(1))

            m = re.search(r'media (\w+)', fn9_iface['int_options'])
            if m:
                fn10_iface['media'] = m.group(1)

            m = re.search(r'mediaopt (\w+)', fn9_iface['int_options'])
            if m:
                opt = m.group(1)
                if opt in MEDIAOPT_MAP:
                    fn10_iface['mediaopts'] = [MEDIAOPT_MAP[opt]]

            # Try to read capabilities
            for k, v in CAPABILITY_MAP.items():
                if '-{0}'.format(k) in fn9_iface['int_options']:
                    l = fn10_iface.setdefault('capabilities', {}).setdefault('del', [])
                    l += v
                elif k in fn9_iface['int_options']:
                    l = fn10_iface.setdefault('capabilities', {}).setdefault('add', [])
                    l += v
            network_id = fn10_iface.pop('id')
            try:
                self.run_subtask_sync('network.interface.update', network_id, fn10_iface)
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Could not configure network interface: {0} due to error: {1}'.format(
                        network_id, err
                    )
                ))

        # TODO: Migrate LAGG interfaces
        # for key, value in fn9_lagg_interfaces

        # TODO: Migrate VLANs
        # for key, value in fn9_vlan:
        #     pass

        # Migrating hosts database
        for line in fn9_globalconf['gc_hosts'].split('\n'):
            line = line.strip()
            if line:
                ip, *names = line.split(' ')
                for name in names:
                    try:
                        self.run_subtask_sync(
                            'network.host.create', {'id': name, 'addresses': [ip]}
                        )
                    except RpcException as err:
                        self.add_warning(TaskWarning(
                            errno.EINVAL,
                            'Could not add host: {0}, ip: {1} due to error: {2}'.format(
                                name, ip, err
                            )
                        ))

        # Migrating static routes
        for route in fn9_static_routes.values():
            try:
                net = ipaddress.ip_network(route['sr_destination'])
            except ValueError as e:
                logger.debug("Invalid network {0}: {1}".format(route['sr_destination'], e))
                continue

            try:
                self.run_subtask_sync('network.route.create', {
                    'id': route['sr_description'],
                    'type': 'INET',
                    'network': str(net.network_address),
                    'netmask': net.prefixlen,
                    'gateway': route['sr_gateway'],
                })
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Could not add network route: {0} due to error: {1}'.format(
                        route['sr_description'], err
                    )
                ))

        # Set the system hostname
        self.run_subtask_sync(
            'system.general.update',
            {'hostname': fn9_globalconf['gc_hostname'] + '.' + fn9_globalconf['gc_domain']}
        )

        # Finally migrate the global network config
        gc_nameservers = [
            fn9_globalconf['gc_nameserver' + x] for x in ['1', '2', '3']
            if fn9_globalconf['gc_nameserver' + x]
        ]
        self.run_subtask_sync(
            'network.config.update',
            {
                'autoconfigure': bool(fn9_interfaces),
                'http_proxy': fn9_globalconf['gc_httpproxy'] or None,
                'gateway': {
                    'ipv4': fn9_globalconf['gc_ipv4gateway'] or None,
                    'ipv6': fn9_globalconf['gc_ipv6gateway'] or None
                },
                'dns': {'addresses': gc_nameservers},
                'dhcp': {
                    'assign_gateway': not bool(
                        fn9_globalconf['gc_ipv4gateway'] or fn9_globalconf['gc_ipv6gateway']
                    ),
                    'assign_dns': not bool(gc_nameservers)
                },
                'netwait': {
                    'enabled': bool(fn9_globalconf['gc_netwait_enabled']),
                    'addresses': list(filter(None, fn9_globalconf['gc_netwait_ip'].split(' ')))
                }
            }
        )
コード例 #28
0
ファイル: MigrationPlugin.py プロジェクト: lukegb/middleware
    def run(self):
        # Generic stuff needed for all share migration
        fn10_datasets = list(self.dispatcher.call_sync('volume.dataset.query'))

        # Lets start with AFP shares
        fn9_afp_shares = get_table('select * from sharing_afp_share')
        for fn9_afp_share in fn9_afp_shares.values():
            ro_users, ro_groups, rw_users, rw_groups, users_allow, groups_allow, users_deny, groups_deny = [[] for _ in range(8)]
            for allow_item in fn9_afp_share['afp_rw'].split(','):
                allow_item = allow_item.strip(' ')
                if allow_item.startswith('@'):
                    groups_allow.append(allow_item[1:])
                elif allow_item:
                    users_allow.append(allow_item)
            for ro_item in fn9_afp_share['afp_ro'].split(','):
                ro_item = ro_item.strip(' ')
                if ro_item.startswith('@'):
                    ro_groups.append(ro_item[1:])
                elif ro_item:
                    ro_users.append(ro_item)
            for rw_item in fn9_afp_share['afp_rw'].split(','):
                rw_item = rw_item.strip(' ')
                if rw_item.startswith('@'):
                    rw_groups.append(rw_item[1:])
                elif rw_item:
                    rw_users.append(rw_item)
            for deny_item in fn9_afp_share['afp_deny'].split(','):
                deny_item = deny_item.strip(' ')
                if deny_item.startswith('@'):
                    groups_deny.append(deny_item[1:])
                elif deny_item:
                    users_deny.append(deny_item)
            hosts_deny = list(filter(None, fn9_afp_share['afp_hostsdeny'].split(' ')))
            hosts_allow = list(filter(None, fn9_afp_share['afp_hostsallow'].split(' ')))

            try:
                self.run_subtask_sync(
                    'share.create',
                    {
                        'name': fn9_afp_share['afp_name'],
                        'description': fn9_afp_share['afp_comment'],
                        'enabled': True,
                        'immutable': False,
                        'type': 'afp',
                        'target_path': fn9_afp_share['afp_path'][5:],  # to remove leading /mnt
                        'target_type': 'DATASET' if q.query(
                            fn10_datasets,
                            ('mountpoint', '=', fn9_afp_share['afp_path']),
                            single=True
                        ) else 'DIRECTORY',
                        'properties': {
                            'read_only': False,
                            'time_machine': bool(fn9_afp_share['afp_timemachine']),
                            'zero_dev_numbers': bool(fn9_afp_share['afp_nodev']),
                            'no_stat': bool(fn9_afp_share['afp_nostat']),
                            'afp3_privileges': bool(fn9_afp_share['afp_upriv']),
                            'default_file_perms': {'value': int(fn9_afp_share['afp_fperm'])},
                            'default_directory_perms': {'value': int(fn9_afp_share['afp_dperm'])},
                            'ro_users': ro_users or None,
                            'ro_groups': ro_groups or None,
                            'rw_users': rw_users or None,
                            'rw_groups': rw_groups or None,
                            'users_allow': users_allow or None,
                            'users_deny': users_deny or None,
                            'groups_allow': groups_allow or None,
                            'groups_deny': groups_deny or None,
                            'hosts_allow': hosts_allow or None,
                            'hosts_deny': hosts_deny or None
                        }
                    }
                )
            except RpcException as err:
                self.add_warning(TaskWarning(
                    errno.EINVAL,
                    'Cannot create AFP share: {0} due to error: {1}'.format(
                        fn9_afp_share['afp_name'], err
                    )
                ))
コード例 #29
0
ファイル: DebugPlugin.py プロジェクト: surajrav/middleware
    def process_hook(self, cmd, plugin, tar):
        if cmd['type'] == 'AttachData':
            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(cmd['data'])
            tar.addfile(
                info,
                io.BytesIO(
                    cmd['data'] if isinstance(cmd['data'], bytes) else cmd['data'].encode('utf-8')
                )
            )

        if cmd['type'] == 'AttachRPC':
            try:
                result = self.dispatcher.call_sync(cmd['rpc'], *cmd['args'])
                if hasattr(result, '__next__'):
                    result = list(result)
            except RpcException as err:
                self.add_warning(TaskWarning(
                    err.code,
                    f'{plugin}: Cannot add output of {cmd["rpc"]} call, error: {err.message}'
                ))
            else:
                data = dumps(result, debug=True, indent=4)
                info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
                info.size = len(data)
                tar.addfile(
                    info,
                    io.BytesIO(
                        data if isinstance(data, bytes) else data.encode('utf-8')
                    )
                )

        if cmd['type'] == 'AttachCommandOutput':
            try:
                out, _ = system(*cmd['command'], shell=cmd['shell'], decode=cmd['decode'], merge_stderr=True)
            except SubprocessException as err:
                out = 'Exit code: {0}\n'.format(err.returncode)
                if cmd['decode']:
                    out += 'Output:\n:{0}'.format(err.out)

            info = tarfile.TarInfo(os.path.join(plugin, cmd['name']))
            info.size = len(out)
            tar.addfile(
                info,
                io.BytesIO(out if isinstance(out, bytes) else out.encode('utf-8'))
            )

        if cmd['type'] in ('AttachDirectory', 'AttachFile'):
            try:
                tar.add(
                    cmd['path'],
                    arcname=os.path.join(plugin, cmd['name']),
                    recursive=cmd.get('recursive')
                )
            except OSError as err:
                self.add_warning(TaskWarning(
                    err.errno,
                    '{0}: Cannot add file {1}, error: {2}'.format(plugin, cmd['path'], err.strerror)
                ))

                logger.error(
                    "Error occured when adding {0} to the tarfile for plugin: {1}".format(cmd['path'], plugin),
                    exc_info=True
                )