示例#1
0
    def run(self, ups):
        node = ConfigNode('service.ups', self.configstore).__getstate__()
        if 'monitor_password' in ups:
            ups['monitor_password'] = unpassword(ups['monitor_password'])

        node.update(ups)

        if node['mode'] == 'MASTER' and (not node['driver_port'] or not node['driver']):
            raise TaskException(errno.EINVAL, 'Please provide a valid port and driver for monitored UPS device')

        if node['mode'] == 'SLAVE' and not node['remote_host']:
            raise TaskException(errno.EINVAL, 'remote_host field is required in SLAVE mode')

        if not re.search(r'^[a-z0-9\.\-_]+$', node['identifier'], re.I):
            raise TaskException(errno.EINVAL, 'Use alphanumeric characters, ".", "-" and "_"')

        for i in ('monitor_user', 'monitor_password'):
            if re.search(r'[ #]', node[i], re.I):
                raise TaskException(errno.EINVAL, 'Spaces or number signs are not allowed')

        try:
            node = ConfigNode('service.ups', self.configstore)
            node.update(ups)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'services')
            self.dispatcher.call_sync('etcd.generation.generate_group', 'ups')
            self.dispatcher.dispatch_event('service.ups.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure UPS: {0}'.format(str(e))
            )

        return 'RESTART'
示例#2
0
    def categories(self, user, password):
        version = self.dispatcher.call_sync('system.info.version')
        sw_name = version.split('-')[0].lower()
        try:
            r = requests.post(
                'https://%s/%s/api/v1.0/categories' % (PROXY_ADDRESS, sw_name),
                data=json.dumps({
                    'user': user,
                    'password': unpassword(password),
                    'project': REDMINE_PROJECT_NAME,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError as e:
            logger.debug('Failed to decode ticket attachment response: %s', e.text)
            raise RpcException(errno.EINVAL, 'Failed to decode ticket response')
        except requests.ConnectionError as e:
            raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e)))
        except requests.Timeout as e:
            raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e)))

        if 'error' in data:
            raise RpcException(errno.EINVAL, data['message'])

        return data
示例#3
0
    def run(self, id, updated_params):
        directory = self.datastore.get_by_id('directories', id)
        old_name = None

        if directory['immutable']:
            raise TaskException(errno.EPERM, 'Directory {0} is immutable'.format(directory['name']))

        if 'name' in updated_params:
            old_name = directory['name']
            if self.datastore.exists('directories', ('name', '=', updated_params['name']), ('id', '!=', id)):
                raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(updated_params['name']))

        if 'parameters' in updated_params:
            for k, v in updated_params['parameters'].items():
                if k == 'password':
                    updated_params['parameters'][k] = unpassword(v)

        directory.update(updated_params)
        self.datastore.update('directories', id, directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'update',
            'ids': [id]
        })

        if old_name:
            node = ConfigNode('directory', self.configstore)
            search_order = node['search_order'].value
            if old_name in search_order:
                search_order.remove(old_name)
                search_order.append(directory['name'])
                node['search_order'] = search_order

            self.dispatcher.call_sync('dscached.management.reload_config')
    def run(self, id, updated_params):
        directory = self.datastore.get_by_id('directories', id)
        old_name = None

        if directory['immutable']:
            raise TaskException(errno.EPERM, 'Directory {0} is immutable'.format(directory['name']))

        if 'name' in updated_params:
            old_name = directory['name']
            if self.datastore.exists('directories', ('name', '=', updated_params['name'])):
                raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(directory['name']))

        if 'parameters' in updated_params:
            for k, v in updated_params['parameters'].items():
                if k == 'password':
                    updated_params['parameters'][k] = unpassword(v)

        directory.update(updated_params)
        self.datastore.update('directories', id, directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'update',
            'ids': [id]
        })

        if old_name:
            node = ConfigNode('directory', self.configstore)
            search_order = node['search_order'].value
            if old_name in search_order:
                search_order.remove(old_name)
                search_order.append(directory['name'])
                node['search_order'] = search_order

            self.dispatcher.call_sync('dscached.management.reload_config')
示例#5
0
def open_ssh_connection(dispatcher, backup):
    peer = dispatcher.call_sync('peer.query', [('id', '=', backup['peer'])], {'single': True})
    if not peer:
        raise TaskException(errno.ENOENT, 'Cannot find peer {0}'.format(backup['peer']))

    if peer['type'] != 'ssh':
        raise TaskException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type']))

    creds = peer['credentials']
    try:
        session = transport.Transport(creds['address'], creds.get('port', 22))
        session.window_size = 1024 * 1024 * 1024
        session.packetizer.REKEY_BYTES = pow(2, 48)
        session.packetizer.REKEY_PACKETS = pow(2, 48)
        session.start_client()

        if creds.get('privkey'):
            if try_key_auth(session, creds):
                return session
            else:
                raise Exception('Cannot authenticate using keys')

        session.auth_password(creds['username'], unpassword(creds['password']))
        return session

    except socket.gaierror as err:
        raise Exception('Connection error: {0}'.format(err.strerror))
    except ssh_exception.BadAuthenticationType as err:
        raise Exception('Cannot authenticate: {0}'.format(str(err)))
示例#6
0
    def categories(self, user, password):
        version = self.dispatcher.call_sync('system.info.version')
        sw_name = version.split('-')[0].lower()
        project_name = '-'.join(version.split('-')[:2]).lower()
        try:
            r = requests.post(
                'https://%s/%s/api/v1.0/categories' % (PROXY_ADDRESS, sw_name),
                data=json.dumps({
                    'user': user,
                    'password': unpassword(password),
                    'project': project_name,
                }),
                headers={'Content-Type': 'application/json'},
                timeout=10,
            )
            data = r.json()
        except simplejson.JSONDecodeError as e:
            logger.debug('Failed to decode ticket attachment response: %s', e.text)
            raise RpcException(errno.EINVAL, 'Failed to decode ticket response')
        except requests.ConnectionError as e:
            raise RpcException(errno.ENOTCONN, 'Connection failed: {0}'.format(str(e)))
        except requests.Timeout as e:
            raise RpcException(errno.ETIMEDOUT, 'Connection timed out: {0}'.format(str(e)))

        if 'error' in data:
            raise RpcException(errno.EINVAL, data['message'])

        return data
示例#7
0
def open_ssh_connection(dispatcher, backup):
    peer = dispatcher.call_sync('peer.query', [('id', '=', backup['peer'])],
                                {'single': True})
    if not peer:
        raise TaskException(errno.ENOENT,
                            'Cannot find peer {0}'.format(backup['peer']))

    if peer['type'] != 'ssh':
        raise TaskException(errno.EINVAL,
                            'Invalid peer type: {0}'.format(peer['type']))

    creds = peer['credentials']
    try:
        session = transport.Transport(creds['address'], creds.get('port', 22))
        session.window_size = 1024 * 1024 * 1024
        session.packetizer.REKEY_BYTES = pow(2, 48)
        session.packetizer.REKEY_PACKETS = pow(2, 48)
        session.start_client()

        if creds.get('privkey'):
            if try_key_auth(session, creds):
                return session
            else:
                raise Exception('Cannot authenticate using keys')

        session.auth_password(creds['username'], unpassword(creds['password']))
        return session

    except socket.gaierror as err:
        raise Exception('Connection error: {0}'.format(err.strerror))
    except ssh_exception.BadAuthenticationType as err:
        raise Exception('Cannot authenticate: {0}'.format(str(err)))
示例#8
0
    def run(self, id, updated_params):
        if not self.dispatcher.call_sync('ipmi.is_ipmi_loaded'):
            raise TaskException(errno.ENXIO, 'No IPMI module loaded')

        if id not in self.dispatcher.call_sync('ipmi.channels'):
            raise TaskException(errno.ENXIO, 'Invalid channel')

        config = self.dispatcher.call_sync('ipmi.query', [('id', '=', id)],
                                           {'single': True})
        config.update(updated_params)
        channel = str(id)

        if updated_params.get('gateway') is None:
            config['gateway'] = '0.0.0.0'

        try:
            if config['dhcp']:
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel,
                       'ipsrc', 'dhcp')
            else:
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel,
                       'ipsrc', 'static')
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel,
                       'ipaddr', config['address'])
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel,
                       'netmask', cidr_to_netmask(config['netmask']))
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel,
                       'defgw', 'ipaddr', config['gateway'])

            vlan_id = config['vlan_id'] if config.get('vlan_id') else 'off'
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'vlan',
                   'id', str(vlan_id))
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'access',
                   'on')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth',
                   'USER', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth',
                   'OPERATOR', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth',
                   'ADMIN', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth',
                   'CALLBACK', 'MD2,MD5')

            with contextlib.suppress(SubprocessException):
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'arp',
                       'respond', 'on')

            with contextlib.suppress(SubprocessException):
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'arp',
                       'generate', 'on')

            if 'password' in updated_params:
                system('/usr/local/bin/ipmitool', 'user', 'set', 'password',
                       '2', unpassword(updated_params['password']))
                system('/usr/local/bin/ipmitool', 'user', 'enable', '2')

        except SubprocessException as err:
            raise TaskException(
                errno.EFAULT, 'Cannot configure IPMI channel {0}: {1}'.format(
                    channel, err.err))
示例#9
0
    def run(self, ups):
        node = ConfigNode('service.ups', self.configstore).__getstate__()
        if 'monitor_password' in ups:
            ups['monitor_password'] = unpassword(ups['monitor_password'])

        node.update(ups)

        if node['mode'] == 'MASTER' and (not node['driver_port'] or not node['driver']):
            raise TaskException(errno.EINVAL, 'Please provide a valid port and driver for monitored UPS device')

        if node['mode'] == 'SLAVE' and not node['remote_host']:
            raise TaskException(errno.EINVAL, 'remote_host field is required in SLAVE mode')

        if not re.search(r'^[a-z0-9\.\-_]+$', node['identifier'], re.I):
            raise TaskException(errno.EINVAL, 'Use alphanumeric characters, ".", "-" and "_"')

        for i in ('monitor_user', 'monitor_password'):
            if re.search(r'[ #]', node[i], re.I):
                raise TaskException(errno.EINVAL, 'Spaces or number signs are not allowed')

        try:
            node = ConfigNode('service.ups', self.configstore)
            node.update(ups)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'services')
            self.dispatcher.call_sync('etcd.generation.generate_group', 'ups')
            self.dispatcher.dispatch_event('service.ups.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure UPS: {0}'.format(str(e))
            )

        return 'RESTART'
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters', directory['type'],
                directory.get('parameters', {}))
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories',
                                 ('name', '=', directory['name'])):
            raise TaskException(
                errno.EEXIST,
                'Directory {0} already exists'.format(directory['name']))

        normalize(
            directory, {
                'enabled': False,
                'enumerate': True,
                'immutable': False,
                'uid_range': None,
                'gid_range': None
            })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query',
                                            [('name', '=', 'smb')],
                                            {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory',
                                  self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
示例#11
0
    def run(self, ticket):
        version = self.dispatcher.call_sync('system.info.version')
        project_name = '-'.join(version.split('-')[:2]).lower()
        attachments = []
        debug_file_name = os.path.join(
            DEFAULT_DEBUG_DUMP_DIR,
            version + '_' + time.strftime('%Y%m%d%H%M%S') + '.tar.gz')

        try:
            rm_connection = redmine.Redmine(BUGTRACKER_ADDRESS,
                                            username=ticket['username'],
                                            password=unpassword(
                                                ticket['password']))
            rm_connection.auth()

            for attachment in ticket.get('attachments', []):
                attachment = os.path.normpath(attachment)
                attachments.append({
                    'path': attachment,
                    'filename': os.path.split(attachment)[-1]
                })
                if not os.path.exists(attachment):
                    raise TaskException(
                        errno.ENOENT,
                        'File {} does not exists.'.format(attachment))

            if ticket.get('debug'):
                self.run_subtask_sync('debug.save_to_file', debug_file_name)
                attachments.append({
                    'path':
                    debug_file_name,
                    'filename':
                    os.path.split(debug_file_name)[-1]
                })

            redmine_response = rm_connection.issue.create(
                project_id=project_name,
                subject=ticket['subject'],
                description=ticket['description'],
                category_id=ticket['category'],
                custom_fields=[{
                    'id': 2,
                    'value': VERSION_CODES['BETA2']
                }],
                is_private=ticket.get('debug', False),
                tracker_id=1 if ticket['type'] == 'bug' else 2,
                uploads=attachments)
        except redmine.exceptions.AuthError:
            raise TaskException(errno.EINVAL, 'Invalid username or password')

        finally:
            if ticket.get('debug') and os.path.exists(debug_file_name):
                os.remove(debug_file_name)

        return redmine_response.id
示例#12
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        password = q.get(peer, 'credentials.password')
        if password:
            q.set(peer, 'credentials.password', unpassword(password))

        return self.datastore.insert('peers', peer)
示例#13
0
    def run(self, peer, initial_credentials):
        if 'name' not in peer:
            raise TaskException(errno.EINVAL, 'Name has to be specified')

        if self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        password = q.get(peer, 'credentials.password')
        if password:
            q.set(peer, 'credentials.password', unpassword(password))

        return self.datastore.insert('peers', peer)
示例#14
0
    def run(self, id, updated_fields):
        peer = self.datastore.get_by_id('peers', id)
        if not peer:
            raise TaskException(errno.ENOENT, 'Peer {0} does not exist'.format(id))

        password = q.get(updated_fields, 'credentials.password')
        if password:
            q.set(updated_fields, 'credentials.password', unpassword(password))

        peer.update(updated_fields)
        if 'name' in updated_fields and self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        self.datastore.update('peers', id, peer)
示例#15
0
    def run(self, id, updated_fields):
        peer = self.datastore.get_by_id('peers', id)
        if not peer:
            raise TaskException(errno.ENOENT, 'Peer {0} does not exist'.format(id))

        password = q.get(updated_fields, 'credentials.password')
        if password:
            q.set(updated_fields, 'credentials.password', unpassword(password))

        peer.update(updated_fields)
        if 'name' in updated_fields and self.datastore.exists('peers', ('name', '=', peer['name'])):
            raise TaskException(errno.EINVAL, 'Peer entry {0} already exists'.format(peer['name']))

        self.datastore.update('peers', id, peer)
示例#16
0
    def run(self, directory):
        try:
            params = self.dispatcher.call_sync(
                'dscached.management.normalize_parameters',
                directory['type'],
                directory.get('parameters', {})
            )
        except RpcException as err:
            raise TaskException(err.code, err.message)

        if self.datastore.exists('directories', ('name', '=', directory['name'])):
            raise TaskException(errno.EEXIST, 'Directory {0} already exists'.format(directory['name']))

        normalize(directory, {
            'enabled': False,
            'enumerate': True,
            'immutable': False,
            'uid_range': None,
            'gid_range': None
        })

        # Replace passed in params with normalized ones
        directory['parameters'] = params

        for k, v in directory['parameters'].items():
            if k == 'password':
                directory['parameters'][k] = unpassword(v)

        if directory['type'] == 'winbind':
            normalize(directory, {
                'uid_range': [100000, 999999],
                'gid_range': [100000, 999999]
            })

            smb = self.dispatcher.call_sync('service.query', [('name', '=', 'smb')], {"single": True})
            if not q.get(smb, 'config.enable'):
                q.set(smb, 'config.enable', True)
                self.run_subtask_sync('service.update', smb['id'], smb)

        self.id = self.datastore.insert('directories', directory)
        self.dispatcher.call_sync('dscached.management.configure_directory', self.id)
        self.dispatcher.dispatch_event('directory.changed', {
            'operation': 'create',
            'ids': [self.id]
        })

        node = ConfigNode('directory', self.configstore)
        node['search_order'] = node['search_order'].value + [directory['name']]
        self.dispatcher.call_sync('dscached.management.reload_config')
        return self.id
示例#17
0
    def run(self, snmp):
        node = ConfigNode('service.snmp', self.configstore).__getstate__()
        if 'v3_password' in snmp:
            snmp['v3_password'] = unpassword(snmp['v3_password'])

        node.update(snmp)

        if node['contact']:
            if '@' in node['contact']:
                if not jsonschema._format.is_email(node['contact']):
                    raise TaskException(errno.EINVAL, 'Invalid e-mail address')
            elif not re.match(r'^[-_a-zA-Z0-9\s]+$', node['contact']):
                raise TaskException(
                    errno.EINVAL,
                    'Must contain only alphanumeric characters, _, - or a valid e-mail address'
                )

        if not node['community']:
            if not node['v3']:
                raise TaskException(errno.ENOENT, 'This field is required')
        elif not re.match(r'^[-_a-zA-Z0-9\s]+$', node['community']):
            raise TaskException(
                errno.EINVAL,
                'The community must contain only alphanumeric characters, _ or -'
            )

        if node['v3_password'] and len(node['v3_password']) < 8:
            raise TaskException(errno.EINVAL,
                                'Password must contain at least 8 characters')

        if node['v3_privacy_passphrase'] and len(
                node['v3_privacy_passphrase']) < 8:
            raise TaskException(
                errno.EINVAL, 'Passphrase must contain at least 8 characters')

        try:
            node = ConfigNode('service.snmp', self.configstore)
            node.update(snmp)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'snmpd')
            self.dispatcher.dispatch_event('service.snmp.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(errno.ENXIO,
                                'Cannot reconfigure SNMP: {0}'.format(str(e)))

        return 'RESTART'
示例#18
0
    def run(self, dyndns):
        try:
            node = ConfigNode('service.dyndns', self.configstore)
            if 'password' in dyndns:
                dyndns['password'] = unpassword(dyndns['password'])

            node.update(dyndns)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'dyndns')
            self.dispatcher.dispatch_event('service.dyndns.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure DynamicDNS: {0}'.format(str(e))
            )

        return 'RELOAD'
示例#19
0
    def run(self, dyndns):
        try:
            node = ConfigNode('service.dyndns', self.configstore)
            if 'password' in dyndns:
                dyndns['password'] = unpassword(dyndns['password'])

            node.update(dyndns)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'dyndns')
            self.dispatcher.dispatch_event('service.dyndns.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure DynamicDNS: {0}'.format(str(e))
            )

        return 'RELOAD'
示例#20
0
    def run(self, ticket):
        try:
            version = self.dispatcher.call_sync('system.info.version')
            project_name = '-'.join(version.split('-')[:2]).lower()
            attachments = []

            rm_connection = redmine.Redmine(
                BUGTRACKER_ADDRESS,
                username=ticket['username'],
                password=unpassword(ticket['password'])
            )
            rm_connection.auth()

            for attachment in ticket.get('attachments', []):
                attachment = os.path.normpath(attachment)
                attachments.append({'path': attachment, 'filename': os.path.split(attachment)[-1]})
                if not os.path.exists(attachment):
                    raise TaskException(errno.ENOENT, 'File {} does not exists.'.format(attachment))

            if ticket.get('debug'):
                debug_file_name = os.path.join(
                    DEFAULT_DEBUG_DUMP_DIR, version + '_' + time.strftime('%Y%m%d%H%M%S') + '.tar.gz'
                )
                self.run_subtask_sync('debug.save_to_file', debug_file_name)
                attachments.append({'path': debug_file_name, 'filename': os.path.split(debug_file_name)[-1]})

            redmine_response = rm_connection.issue.create(
                project_id=project_name,
                subject=ticket['subject'],
                description=ticket['description'],
                category_id=ticket['category'],
                custom_fields=[{'id': 2, 'value': VERSION_CODES['BETA2']}],
                is_private=ticket.get('debug', False),
                tracker_id=1 if ticket['type'] == 'bug' else 2,
                uploads=attachments
            )
        except redmine.exceptions.AuthError:
            raise TaskException(errno.EINVAL, 'Invalid username or password')

        finally:
            if ticket.get('debug') and os.path.exists(debug_file_name):
                os.remove(debug_file_name)

        return redmine_response.id
示例#21
0
    def run(self, webdav):
        node = ConfigNode('service.webdav', self.configstore).__getstate__()
        if 'password' in webdav:
            webdav['password'] = unpassword(webdav['password'])

        for p in ('http_port', 'https_port'):
            port = webdav.get(p)
            if port and port != node[p] and is_port_open(port):
                raise TaskException(
                    errno.EBUSY,
                    'Port number : {0} is already in use'.format(port))

        node.update(webdav)

        if node['http_port'] == node['https_port']:
            raise TaskException(errno.EINVAL,
                                'HTTP and HTTPS ports cannot be the same')

        if 'HTTPS' in node['protocol'] and not node['certificate']:
            raise TaskException(
                errno.EINVAL,
                'SSL protocol specified without choosing a certificate')

        if node['certificate'] and not self.dispatcher.call_sync(
                'crypto.certificate.query', [('id', '=', node['certificate'])],
            {'single': True}):
            raise TaskException(errno.ENOENT, 'SSL Certificate not found.')

        try:
            node = ConfigNode('service.webdav', self.configstore)
            node.update(webdav)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'services')
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'webdav')
            self.dispatcher.dispatch_event('service.webdav.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure WebDAV: {0}'.format(str(e)))

        return 'RESTART'
示例#22
0
    def run(self, id, updated_params):
        if not self.dispatcher.call_sync('ipmi.is_ipmi_loaded'):
            raise TaskException(errno.ENXIO, 'No IPMI module loaded')

        if id not in self.dispatcher.call_sync('ipmi.channels'):
            raise TaskException(errno.ENXIO, 'Invalid channel')

        config = self.dispatcher.call_sync('ipmi.query', [('id', '=', id)], {'single': True})
        config.update(updated_params)
        channel = str(id)

        if updated_params.get('gateway') is None:
            config['gateway'] = '0.0.0.0'

        try:
            if config['dhcp']:
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'ipsrc', 'dhcp')
            else:
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'ipsrc', 'static')
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'ipaddr', config['address'])
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'netmask', cidr_to_netmask(config['netmask']))
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'defgw', 'ipaddr', config['gateway'])

            vlan_id = config['vlan_id'] if config.get('vlan_id') else 'off'
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'vlan', 'id', str(vlan_id))
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'access', 'on')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth', 'USER', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth', 'OPERATOR', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth', 'ADMIN', 'MD2,MD5')
            system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'auth', 'CALLBACK', 'MD2,MD5')

            with contextlib.suppress(SubprocessException):
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'arp', 'respond', 'on')

            with contextlib.suppress(SubprocessException):
                system('/usr/local/bin/ipmitool', 'lan', 'set', channel, 'arp', 'generate', 'on')

            if 'password' in updated_params:
                system('/usr/local/bin/ipmitool', 'user', 'set', 'password', '2', unpassword(updated_params['password']))
                system('/usr/local/bin/ipmitool', 'user', 'enable', '2')

        except SubprocessException as err:
            raise TaskException(errno.EFAULT, 'Cannot configure IPMI channel {0}: {1}'.format(channel, err.err))
示例#23
0
    def run(self, webdav):
        node = ConfigNode('service.webdav', self.configstore).__getstate__()
        if 'password' in webdav:
            webdav['password'] = unpassword(webdav['password'])

        for p in ('http_port', 'https_port'):
            port = webdav.get(p)
            if port and port != node[p] and is_port_open(port):
                raise TaskException(errno.EBUSY, 'Port number : {0} is already in use'.format(port))

        node.update(webdav)

        if node['http_port'] == node['https_port']:
            raise TaskException(errno.EINVAL, 'HTTP and HTTPS ports cannot be the same')

        if 'HTTPS' in node['protocol'] and not node['certificate']:
            raise TaskException(errno.EINVAL, 'SSL protocol specified without choosing a certificate')

        if node['certificate'] and not self.dispatcher.call_sync(
            'crypto.certificate.query', [('id', '=', node['certificate'])], {'single': True}
        ):
                raise TaskException(errno.ENOENT, 'SSL Certificate not found.')

        try:
            node = ConfigNode('service.webdav', self.configstore)
            node.update(webdav)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'services')
            self.dispatcher.call_sync('etcd.generation.generate_group', 'webdav')
            self.dispatcher.dispatch_event('service.webdav.changed', {
                'operation': 'updated',
                'ids': None,
            })
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot reconfigure WebDAV: {0}'.format(str(e))
            )

        return 'RESTART'
示例#24
0
    def get_status(self, id):
        si = None
        peer = self.datastore.get_by_id('peers', id)
        if peer['type'] != 'vmware':
            raise RpcException(errno.EINVAL, 'Invalid peer type: {0}'.format(peer['type']))

        try:
            start_time = datetime.now()
            ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
            ssl_context.verify_mode = ssl.CERT_NONE
            si = connect.SmartConnect(
                host=q.get(peer, 'credentials.address'),
                user=q.get(peer, 'credentials.username'),
                pwd=unpassword(q.get(peer, 'credentials.password')),
                sslContext=ssl_context
            )
            delta = datetime.now() - start_time
        except:
            return {'state': 'OFFLINE', 'rtt': None}
        finally:
            if si:
                connect.Disconnect(si)

        return {'state': 'ONLINE', 'rtt': delta.total_seconds()}
示例#25
0
    def run(self, id, updated_fields):
        def update_all_signed_certs_and_get_ids(old_signing_name,
                                                new_signing_name):
            certs = self.datastore.query(
                'crypto.certificates',
                ('signing_ca_name', '=', old_signing_name))
            for c in certs:
                c['signing_ca_name'] = new_signing_name
                self.datastore.update('crypto.certificates', c['id'], c)
            return [c['id'] for c in certs]

        ids = [id]
        if not self.datastore.exists('crypto.certificates', ('id', '=', id)):
            raise TaskException(errno.ENOENT,
                                'Certificate ID {0} does not exist'.format(id))

        cert = self.datastore.get_by_id('crypto.certificates', id)
        if cert['type'] in ('CA_EXISTING', 'CERT_EXISTING'):
            if 'certificate' in updated_fields:
                try:
                    crypto.load_certificate(crypto.FILETYPE_PEM,
                                            updated_fields['certificate'])
                except Exception:
                    raise TaskException(errno.EINVAL, 'Invalid certificate')
            if 'privatekey' in updated_fields:
                updated_fields['privatekey'] = unpassword(
                    updated_fields['privatekey'])
                try:
                    crypto.load_privatekey(crypto.FILETYPE_PEM,
                                           updated_fields['privatekey'])
                except Exception:
                    raise TaskException(errno.EINVAL, 'Invalid privatekey')
            if 'name' in updated_fields:
                if self.datastore.exists(
                        'crypto.certificates',
                    ('name', '=', updated_fields['name'])):
                    raise TaskException(
                        errno.EEXIST,
                        'Certificate name : "{0}" already in use'.format(
                            updated_fields['name']))
        else:
            if len(updated_fields) > 1 or 'name' not in updated_fields:
                raise TaskException(
                    errno.EINVAL,
                    'Only "name" field can be modified'.format(id))

            if self.datastore.exists('crypto.certificates',
                                     ('name', '=', updated_fields['name'])):
                raise TaskException(
                    errno.EEXIST,
                    'Certificate name : "{0}" already in use'.format(
                        updated_fields['name']))

        try:
            if 'certificate' in updated_fields:
                cert['certificate'] = updated_fields['certificate']
                cert.update(get_cert_info(cert['certificate']))
            if 'privatekey' in updated_fields:
                cert['privatekey'] = updated_fields['privatekey']
            if 'name' in updated_fields:
                old_name = cert['name']
                cert['name'] = updated_fields['name']
                ids.extend(
                    update_all_signed_certs_and_get_ids(
                        old_name, cert['name']))
            pkey = self.datastore.update('crypto.certificates', id, cert)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'crypto')
            self.dispatcher.dispatch_event('crypto.certificate.changed', {
                'operation': 'update',
                'ids': ids
            })
        except DatastoreException as e:
            raise TaskException(
                errno.EBADMSG, 'Cannot update certificate: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e)))

        return pkey
示例#26
0
    def run(self, certificate):
        if self.datastore.exists('crypto.certificates',
                                 ('name', '=', certificate['name'])):
            raise TaskException(
                errno.EEXIST, 'Certificate named "{0}" already exists'.format(
                    certificate['name']))

        new_cert_db_entry = {}
        new_cert_db_entry['name'] = certificate['name']
        new_cert_db_entry['type'] = certificate['type']

        if certificate.get('signing_ca_name'):
            signing_cert_db_entry = self.datastore.get_one(
                'crypto.certificates',
                ('name', '=', certificate['signing_ca_name']))
            if not signing_cert_db_entry:
                raise TaskException(
                    errno.ENOENT, 'Signing CA "{0}" not found'.format(
                        certificate['signing_ca_name']))
            new_cert_db_entry['signing_ca_id'] = signing_cert_db_entry['id']

        if certificate.get('certificate_path'):
            imported_cert = crypto.load_certificate(
                crypto.FILETYPE_PEM,
                get_file_contents(
                    certificate['certificate_path']).encode('utf-8'))
        elif certificate.get('certificate'):
            imported_cert = crypto.load_certificate(
                crypto.FILETYPE_PEM,
                certificate['certificate'].encode('utf-8'))
        else:
            imported_cert = False

        if imported_cert:
            new_cert_db_entry['certificate'] = crypto.dump_certificate(
                crypto.FILETYPE_PEM, imported_cert).decode('utf-8')
            new_cert_db_entry.update(get_cert_info(imported_cert))
            new_cert_db_entry['serial'] = str(
                imported_cert.get_serial_number())
            #certificate['selfsigned'] = False
            new_cert_db_entry[
                'not_before'] = get_utc_string_from_asn1generalizedtime(
                    imported_cert.get_notBefore().decode('utf-8'))
            new_cert_db_entry[
                'not_after'] = get_utc_string_from_asn1generalizedtime(
                    imported_cert.get_notAfter().decode('utf-8'))
            new_cert_db_entry['lifetime'] = 3650
        else:
            new_cert_db_entry['certificate'] = ""

        if certificate.get('privatekey_path'):
            imported_privkey = crypto.load_privatekey(
                crypto.FILETYPE_PEM,
                get_file_contents(certificate['privatekey_path']))
        elif certificate.get('privatekey'):
            certificate['privatekey'] = unpassword(certificate['privatekey'])
            imported_privkey = crypto.load_privatekey(
                crypto.FILETYPE_PEM, certificate['privatekey'])
        else:
            imported_privkey = False

        if imported_privkey:
            new_cert_db_entry['privatekey'] = crypto.dump_privatekey(
                crypto.FILETYPE_PEM, imported_privkey).decode('utf-8')
            new_cert_db_entry['key_length'] = imported_privkey.bits()
        else:
            new_cert_db_entry['privatekey'] = ""

        try:
            pkey = self.datastore.insert('crypto.certificates',
                                         new_cert_db_entry)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'crypto')
            self.dispatcher.dispatch_event('crypto.certificate.changed', {
                'operation': 'create',
                'ids': [pkey]
            })
        except DatastoreException as e:
            raise TaskException(
                errno.EBADMSG, 'Cannot import certificate: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO, 'Cannot generate certificate: {0}'.format(str(e)))

        return pkey
示例#27
0
    def verify(self, certificate):
        if '"' in certificate['name']:
            raise VerifyException(errno.EINVAL,
                                  'Provide certificate name without : `"`')

        if certificate['type'] not in ('CERT_EXISTING', 'CA_EXISTING'):
            raise VerifyException(
                errno.EINVAL,
                'Invalid certificate type: {0}. Should be "CERT_EXISTING" or "CA_EXISTING"'
                .format(certificate['type']))

        if all(k in certificate for k in ('certificate', 'certificate_path')):
            raise VerifyException(
                errno.EINVAL,
                'Both "certificate" and "certificate_path" arguments specified at the same time.'
            )

        if all(k in certificate for k in ('privatekey', 'privatekey_path')):
            raise VerifyException(
                errno.EINVAL,
                'Both "privatekey" and "privatekey_path" arguments specified at the same time.'
            )

        if certificate.get('certificate_path') and not Path(
                certificate['certificate_path']).is_file():
            raise VerifyException(
                errno.ENOENT, 'Certificate file {0} does not exist'.format(
                    certificate['certificate_path']))

        if certificate.get('privatekey_path') and not Path(
                certificate['privatekey_path']).is_file():
            raise VerifyException(
                errno.ENOENT,
                "Certificate's privatekey file {0} does not exist".format(
                    certificate['privatekey_path']))

        if certificate.get('certificate_path'):
            try:
                crypto.load_certificate(
                    crypto.FILETYPE_PEM,
                    get_file_contents(
                        certificate['certificate_path']).encode('utf-8'))
            except Exception:
                raise VerifyException(
                    errno.EINVAL,
                    "Invalid certificate file contents: '{0}'".format(
                        certificate['certificate_path']))

        if certificate.get('certificate'):
            try:
                crypto.load_certificate(
                    crypto.FILETYPE_PEM,
                    certificate['certificate'].encode('utf-8'))
            except Exception:
                raise VerifyException(errno.EINVAL,
                                      "Invalid certificate provided")

        if certificate.get('privatekey_path'):
            try:
                crypto.load_privatekey(
                    crypto.FILETYPE_PEM,
                    get_file_contents(certificate['privatekey_path']))
            except Exception:
                raise VerifyException(
                    errno.EINVAL,
                    "Invalid privatekey file contents: '{0}'".format(
                        certificate['privatekey_path']))

        if certificate.get('privatekey'):
            certificate['privatekey'] = unpassword(certificate['privatekey'])
            try:
                crypto.load_privatekey(crypto.FILETYPE_PEM,
                                       certificate['privatekey'])
            except Exception:
                raise VerifyException(errno.EINVAL,
                                      "Invalid privatekey provided")

        return ['system']
示例#28
0
    def run(self, mail):
        node = ConfigNode('mail', self.configstore)
        if mail.get('password'):
            mail['password'] = unpassword(mail['password'])

        node.update(mail)
示例#29
0
    def send(self, mailmessage, mail=None):
        if mail is None:
            mail = ConfigNode('mail', self.configstore).__getstate__()
        elif mail.get('password'):
            mail['password'] = unpassword(mail['password'])

        if not mail.get('server') or not mail.get('port'):
            raise RpcException(
                errno.EINVAL,
                'You must provide an outgoing server and port when sending mail',
            )

        to = mailmessage.get('to')
        attachments = mailmessage.get('attachments')
        subject = mailmessage.get('subject')
        extra_headers = mailmessage.get('extra_headers')

        if not to:
            to = self.dispatcher.call_sync('user.query', [('username', '=', 'root')], {'single': True})
            if to and to.get('email'):
                to = [to['email']]

        if attachments:
            msg = MIMEMultipart()
            msg.preamble = mailmessage['message']
            list(map(lambda attachment: msg.attach(attachment), attachments))
        else:
            msg = MIMEText(mailmessage['message'], _charset='utf-8')
        if subject:
            msg['Subject'] = subject

        msg['From'] = mailmessage.get('from_address', mail['from_address'])
        msg['To'] = ', '.join(to)
        msg['Date'] = formatdate()

        local_hostname = socket.gethostname()
        version = self.dispatcher.call_sync('system.info.version').split('-')[0].lower()

        msg['Message-ID'] = "<{0}-{1}.{2}@{3}>".format(
            version,
            datetime.utcnow().strftime("%Y%m%d.%H%M%S.%f"),
            base64.urlsafe_b64encode(os.urandom(3)),
            local_hostname)

        if not extra_headers:
            extra_headers = {}
        for key, val in list(extra_headers.items()):
            if key in msg:
                msg.replace_header(key, val)
            else:
                msg[key] = val
        msg = msg.as_string()

        try:
            if mail['encryption'] == 'SSL':
                klass = smtplib.SMTP_SSL
            else:
                klass = smtplib.SMTP
            server = klass(mail['server'], mail['port'], timeout=300, local_hostname=local_hostname)
            if mail['encryption'] == 'TLS':
                server.starttls()

            if mail['auth']:
                server.login(mail['user'], mail['password'])
            server.sendmail(mail['from_address'], to, msg)
            server.quit()
        except smtplib.SMTPAuthenticationError as e:
            raise RpcException(errno.EACCES, 'Authentication error: {0} {1}'.format(
                e.smtp_code, e.smtp_error))
        except Exception as e:
            logger.error('Failed to send email: {0}'.format(str(e)), exc_info=True)
            raise RpcException(errno.EFAULT, 'Email send error: {0}'.format(str(e)))
        except:
            raise RpcException(errno.EFAULT, 'Unexpected error')
示例#30
0
    def run(self, user):
        if self.datastore.exists('users', ('username', '=', user['username'])):
            raise TaskException(errno.EEXIST,
                                'User with given name already exists')

        if 'uid' not in user:
            # Need to get next free UID
            uid = self.dispatcher.call_sync('user.next_uid',
                                            user.get('group') is None)
        else:
            uid = user.pop('uid')

        normalize_name(user, 'username')
        normalize(
            user, {
                'builtin': False,
                'unixhash': None,
                'nthash': None,
                'password_changed_at': None,
                'full_name': 'User &',
                'shell': '/bin/sh',
                'home': '/nonexistent',
                'groups': [],
                'uid': uid,
                'sudo': False,
                'attributes': {}
            })

        if user['home'] is None:
            user['home'] = '/nonexistent'

        if user['home'] != '/nonexistent':
            user['home'] = os.path.normpath(user['home'])
            zfs_dataset_mountpoints = list(
                self.dispatcher.call_sync('volume.dataset.query', [],
                                          {'select': 'mountpoint'}))
            zfs_pool_mountpoints = list(
                self.dispatcher.call_sync('volume.query', [],
                                          {'select': 'mountpoint'}))
            homedir_occurrence = self.dispatcher.call_sync(
                'user.query', [('home', '=', user['home'])], {'single': True})

            if user['home'] in zfs_pool_mountpoints:
                raise TaskException(
                    errno.ENXIO,
                    'ZFS pool mountpoint cannot be set as the home directory.')

            homedir_mount_path = os.path.join(
                '/', *(user['home'].split(os.path.sep)[:-1]))
            if not any(homedir_mount_path == dataset_mountpoint
                       and os.path.ismount(dataset_mountpoint)
                       for dataset_mountpoint in zfs_dataset_mountpoints):
                raise TaskException(
                    errno.ENXIO,
                    'Home directory has to reside in zfs pool or dataset.' +
                    ' Provide a path which starts with valid, mounted zfs pool or dataset location.'
                )

            if homedir_occurrence:
                raise TaskException(
                    errno.ENXIO,
                    '{} is already assigned to another user.'.format(
                        user['home']) +
                    ' Multiple users cannot share the same home directory.')

        password = user.pop('password', None)
        if password:
            user.update({
                'unixhash': crypted_password(unpassword(password)),
                'nthash': nt_password(unpassword(password)),
                'password_changed_at': datetime.utcnow()
            })

        if user.get('group') is None:
            try:
                result = self.run_subtask_sync('group.create', {
                    'gid': uid,
                    'name': user['username']
                })
            except RpcException as err:
                raise err

            user['group'] = result
            self.created_group = result

        try:
            id = self.datastore.insert('users', user)
            self.id = id
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'accounts')
        except DuplicateKeyException as e:
            raise TaskException(errno.EBADMSG,
                                'Cannot add user: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(errno.ENXIO,
                                'Cannot regenerate users file: {0}'.format(e))

        if user['home'] != '/nonexistent':
            group = self.dispatcher.call_sync('group.query',
                                              [('id', '=', user['group'])],
                                              {'single': True})

            if not group:
                raise TaskException(
                    errno.ENOENT, 'Group {0} not found'.format(user['group']))
            user_gid = group['gid']

            if not os.path.exists(user['home']):
                parent_dataset = self.dispatcher.call_sync(
                    'volume.dataset.query',
                    [('mountpoint', '=', homedir_mount_path)],
                    {'single': True})

                homedir_dataset_id = os.path.join(
                    parent_dataset['id'], user['home'].split(os.path.sep)[-1])
                self.run_subtask_sync(
                    'volume.dataset.create', {
                        'id': homedir_dataset_id,
                        'type': 'FILESYSTEM',
                        'volume': parent_dataset['volume']
                    })
                os.chmod(user['home'], 0o755)

            else:
                os.chmod(user['home'], 0o755)

            for file in os.listdir(SKEL_PATH):
                if file.startswith('dot'):
                    dest_file = os.path.join(user['home'], file[3:])
                    if not os.path.exists(dest_file):
                        shutil.copyfile(os.path.join(SKEL_PATH, file),
                                        dest_file)
                        os.chown(dest_file, uid, user_gid)

                else:
                    dest_file = os.path.join(user['home'], file)
                    if not os.path.exists(dest_file):
                        shutil.copyfile(os.path.join(SKEL_PATH, file),
                                        dest_file)
                        os.chown(dest_file, uid, user_gid)

            os.chown(user['home'], uid, user_gid)
            if not self.configstore.get('system.home_directory_root'):
                self.configstore.set('system.home_directory_root',
                                     os.path.dirname(user['home']))

        self.dispatcher.dispatch_event('user.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id
示例#31
0
    def run(self, id, updated_fields):
        normalize_name(updated_fields, 'username')

        user = self.datastore.get_by_id('users', id)
        self.original_user = copy.deepcopy(user)
        if user is None:
            raise TaskException(errno.ENOENT,
                                "User with id: {0} does not exist".format(id))

        if user.get('builtin'):
            if 'home' in updated_fields:
                raise TaskException(
                    errno.EPERM, "Cannot change builtin user's home directory")

            # Similarly ignore uid changes for builtin users
            if 'uid' in updated_fields:
                raise TaskException(errno.EPERM,
                                    "Cannot change builtin user's UID")

            if 'username' in updated_fields:
                raise TaskException(errno.EPERM,
                                    "Cannot change builtin user's username")

            if 'locked' in updated_fields:
                raise TaskException(
                    errno.EPERM, "Cannot change builtin user's locked flag")

        if not user:
            raise TaskException(errno.ENOENT, 'User {0} not found'.format(id))

        if 'home' in updated_fields and updated_fields['home'] is None:
            updated_fields['home'] = '/nonexistent'

        if 'home' in updated_fields and updated_fields[
                'home'] != '/nonexistent':
            updated_fields['home'] = os.path.normpath(updated_fields['home'])
            zfs_dataset_mountpoints = list(
                self.dispatcher.call_sync('volume.dataset.query', [],
                                          {'select': 'mountpoint'}))
            zfs_pool_mountpoints = list(
                self.dispatcher.call_sync('volume.query', [],
                                          {'select': 'mountpoint'}))
            homedir_occurrence = self.dispatcher.call_sync(
                'user.query', [('home', '=', updated_fields['home'])],
                {'single': True})
            homedir_mount_path = os.path.join(
                '/', *(updated_fields['home'].split(os.path.sep)[:-1]))

            user_gid = self.datastore.get_by_id('groups', user['group'])
            user_gid = user_gid['gid'] if user_gid else 0

            if user['home'] != updated_fields['home']:

                if updated_fields['home'] in zfs_pool_mountpoints:
                    raise TaskException(
                        errno.ENXIO,
                        'Volume mountpoint cannot be set as the home directory.'
                    )

                if not any(homedir_mount_path == dataset_mountpoint
                           and os.path.ismount(dataset_mountpoint)
                           for dataset_mountpoint in zfs_dataset_mountpoints):
                    raise TaskException(
                        errno.ENXIO,
                        'Home directory has to reside in zfs pool or dataset.'
                        +
                        ' Provide a path which starts with valid, mounted zfs pool or dataset location.'
                    )

                if homedir_occurrence:
                    raise TaskException(
                        errno.ENXIO,
                        '{} is already assigned to another user.'.format(
                            updated_fields['home']) +
                        ' Multiple users cannot share the same home directory.'
                    )

                if not os.path.exists(updated_fields['home']):
                    parent_dataset = self.dispatcher.call_sync(
                        'volume.dataset.query',
                        [('mountpoint', '=', homedir_mount_path)],
                        {'single': True})

                    homedir_dataset_id = os.path.join(
                        parent_dataset['id'],
                        updated_fields['home'].split(os.path.sep)[-1])

                    self.run_subtask_sync(
                        'volume.dataset.create', {
                            'id': homedir_dataset_id,
                            'type': 'FILESYSTEM',
                            'volume': parent_dataset['volume']
                        })
                    os.chmod(updated_fields['home'], 0o755)

                else:
                    os.chmod(updated_fields['home'], 0o755)

                for file in os.listdir(SKEL_PATH):
                    if file.startswith('dot'):
                        dest_file = os.path.join(updated_fields['home'],
                                                 file[3:])
                        if not os.path.exists(dest_file):
                            shutil.copyfile(os.path.join(SKEL_PATH, file),
                                            dest_file)
                            os.chown(dest_file, user['uid'], user_gid)

                    else:
                        dest_file = os.path.join(updated_fields['home'], file)
                        if not os.path.exists(dest_file):
                            shutil.copyfile(os.path.join(SKEL_PATH, file),
                                            dest_file)
                            os.chown(dest_file, user['uid'], user_gid)

                os.chown(updated_fields['home'], user['uid'], user_gid)

        user.update(updated_fields)

        try:
            password = user.pop('password', None)
            if password:
                user.update({
                    'unixhash': crypted_password(unpassword(password)),
                    'nthash': nt_password(unpassword(password)),
                    'password_changed_at': datetime.utcnow()
                })

            self.datastore.update('users', user['id'], user)
            self.dispatcher.call_sync('etcd.generation.generate_group',
                                      'accounts')
        except SubprocessException as e:
            raise TaskException(
                errno.ENXIO,
                'Could not generate samba password. stdout: {0}\nstderr: {1}'.
                format(e.out, e.err))
        except DatastoreException as e:
            raise TaskException(errno.EBADMSG,
                                'Cannot update user: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(
                e.code, 'Cannot regenerate users file: {0}'.format(e.message))

        self.dispatcher.dispatch_event('user.changed', {
            'operation': 'update',
            'ids': [user['id']]
        })
示例#32
0
    def run(self, snapshot, recursive=False):
        # Find the matching datastore mappings
        dataset = snapshot.get('dataset') or snapshot.get('id').split('@')[0]
        vm_snapname = 'FreeNAS-{0}'.format(str(uuid.uuid4()))
        vm_snapdescr = '{0} (Created by FreeNAS)'.format(datetime.utcnow())
        failed_snapshots = []

        # Save the snapshot name in parent task environment to the delete counterpart can find it
        self.dispatcher.task_setenv(self.environment['parent'],
                                    'vmware_snapshot_name', vm_snapname)

        for mapping in self.datastore.query_stream('vmware.datasets'):
            if recursive:
                if not re.search('^{0}(/|$)'.format(mapping['dataset']), dataset) and \
                   not re.search('^{0}(/|$)'.format(dataset), mapping['dataset']):
                    continue
            else:
                if mapping['dataset'] != dataset:
                    continue

            peer = self.dispatcher.call_sync('peer.query',
                                             [('id', '=', mapping['peer'])],
                                             {'single': True})
            if not peer:
                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    '<mapping {0}>'.format(mapping['name']),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    'Cannot find peer entry for mapping {0}'.format(
                        mapping['name'])
                })
                continue

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(
                    host=q.get(peer, 'credentials.address'),
                    user=q.get(peer, 'credentials.username'),
                    pwd=unpassword(q.get(peer, 'credentials.password')),
                    sslContext=ssl_context)
                content = si.RetrieveContent()
                vm_view = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
            except BaseException as err:
                logger.warning(
                    'Connecting to VMware instance at {0} failed: {1}'.format(
                        q.get(peer, 'credentials.address'), str(err)))

                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    q.get(peer, 'credentials.address'),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    getattr(err, 'msg') or str(err)
                })

                continue

            for vm in vm_view.view:
                if mapping[
                        'vm_filter_op'] == 'INCLUDE' and vm.summary.config.name not in mapping[
                            'vm_filter_entries']:
                    continue

                if mapping[
                        'vm_filter_op'] == 'EXCLUDE' and vm.summary.config.name in mapping[
                            'vm_filter_entries']:
                    continue

                if not any(i.info.name == mapping['datastore']
                           for i in vm.datastore):
                    continue

                if vm.snapshot and find_snapshot(vm.snapshot.rootSnapshotList,
                                                 vm_snapname):
                    continue

                logger.info(
                    'Creating snapshot of VM {0} (datastore {1})'.format(
                        vm.summary.config.name, mapping['datastore']))

                try:
                    task.WaitForTask(
                        vm.CreateSnapshot_Task(name=vm_snapname,
                                               description=vm_snapdescr,
                                               memory=False,
                                               quiesce=False))
                except vmodl.MethodFault as err:
                    logger.warning(
                        'Creating snapshot of {0} failed: {1}'.format(
                            vm.summary.config.name, err.msg))
                    failed_snapshots.append({
                        'when': 'create',
                        'vm': vm.summary.config.name,
                        'datastore': mapping['datastore'],
                        'error': err.msg
                    })

            connect.Disconnect(si)

        self.dispatcher.task_setenv(self.environment['parent'],
                                    'vmware_failed_snapshots',
                                    failed_snapshots)
示例#33
0
    def run(self, peer, initial_credentials):
        hostid = self.dispatcher.call_sync('system.info.host_uuid')
        hostname = self.dispatcher.call_sync('system.general.get_config')['hostname']
        remote_peer_name = hostname
        credentials = peer['credentials']
        remote = credentials.get('address')
        port = credentials.get('port', 22)
        username = initial_credentials.get('username')
        password = initial_credentials.get('password')
        auth_code = initial_credentials.get('auth_code')
        key_auth = initial_credentials.get('key_auth')

        local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')

        if self.datastore.exists('peers', ('credentials.address', '=', remote), ('type', '=', 'freenas')):
            raise TaskException(
                errno.EEXIST,
                'FreeNAS peer entry for {0} already exists'.format(remote)
            )

        remote_client = Client()

        try:
            if auth_code:
                try:
                    remote_client.connect('ws://{0}'.format(wrap_address(remote)))
                except (AuthenticationException, OSError, ConnectionRefusedError):
                    raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))

                try:
                    remote_host_uuid, pubkey = remote_client.call_sync(
                        'peer.freenas.auth_with_code',
                        auth_code,
                        hostname,
                        local_ssh_config['port']
                    )
                except RpcException as err:
                    raise TaskException(err.code, err.message)

                try:
                    self.dispatcher.call_sync('peer.freenas.put_temp_pubkey', pubkey)
                    if not self.dispatcher.test_or_wait_for_event(
                        'peer.changed',
                        lambda ar: ar['operation'] == 'create' and remote_host_uuid in ar['ids'],
                        lambda: self.datastore.exists('peers', ('id', '=', remote_host_uuid)),
                        timeout=30
                    ):
                        raise TaskException(
                            errno.EAUTH,
                            'FreeNAS peer creation failed. Check connection to host {0}.'.format(remote)
                        )
                finally:
                    self.dispatcher.call_sync('peer.freenas.remove_temp_pubkey', pubkey)

            else:
                try:
                    if key_auth:
                        with io.StringIO() as f:
                            f.write(self.configstore.get('peer.freenas.key.private'))
                            f.seek(0)
                            pkey = RSAKey.from_private_key(f)

                        max_tries = 50
                        while True:
                            try:
                                remote_client.connect('ws+ssh://freenas@{0}'.format(
                                    wrap_address(remote)), pkey=pkey, port=port
                                )
                                break
                            except AuthenticationException:
                                if max_tries:
                                    max_tries -= 1
                                    time.sleep(1)
                                else:
                                    raise
                    else:
                        remote_client.connect(
                            'ws+ssh://{0}@{1}'.format(username, wrap_address(remote)),
                            port=port,
                            password=unpassword(password)
                        )

                    remote_client.login_service('replicator')
                except (AuthenticationException, OSError, ConnectionRefusedError):
                    raise TaskException(errno.ECONNABORTED, 'Cannot connect to {0}:{1}'.format(remote, port))

                local_host_key, local_pub_key = self.dispatcher.call_sync('peer.freenas.get_ssh_keys')
                remote_host_key, remote_pub_key = remote_client.call_sync('peer.freenas.get_ssh_keys')
                ip_at_remote_side = remote_client.local_address[0]

                remote_hostname = remote_client.call_sync('system.general.get_config')['hostname']

                remote_host_key = remote_host_key.rsplit(' ', 1)[0]
                local_host_key = local_host_key.rsplit(' ', 1)[0]

                if remote_client.call_sync('peer.query', [('id', '=', hostid)]):
                    raise TaskException(errno.EEXIST, 'Peer entry of {0} already exists at {1}'.format(hostname, remote))

                peer['credentials'] = {
                    '%type': 'FreenasCredentials',
                    'pubkey': remote_pub_key,
                    'hostkey': remote_host_key,
                    'port': port,
                    'address': remote_hostname
                }

                local_id = remote_client.call_sync('system.info.host_uuid')
                peer['id'] = local_id
                peer['name'] = remote_hostname
                ip = socket.gethostbyname(remote)

                created_id = self.run_subtask_sync(
                    'peer.freenas.create_local',
                    peer,
                    ip,
                    True
                )

                peer['id'] = hostid
                peer['name'] = remote_peer_name
                peer['credentials'] = {
                    '%type': 'FreenasCredentials',
                    'pubkey': local_pub_key,
                    'hostkey': local_host_key,
                    'port': local_ssh_config['port'],
                    'address': hostname
                }

                try:
                    call_task_and_check_state(
                        remote_client,
                        'peer.freenas.create_local',
                        peer,
                        ip_at_remote_side
                    )
                except TaskException:
                    self.datastore.delete('peers', local_id)
                    self.dispatcher.dispatch_event('peer.changed', {
                        'operation': 'delete',
                        'ids': [local_id]
                    })
                    raise

                return created_id
        finally:
            remote_client.disconnect()
示例#34
0
    def run(self, id, updated_fields):
        normalize_name(updated_fields, 'username')

        user = self.datastore.get_by_id('users', id)
        self.original_user = copy.deepcopy(user)
        if user is None:
            raise TaskException(
                errno.ENOENT, "User with id: {0} does not exist".format(id)
            )

        if user.get('builtin'):
            if 'home' in updated_fields:
                raise TaskException(
                    errno.EPERM, "Cannot change builtin user's home directory"
                )

            # Similarly ignore uid changes for builtin users
            if 'uid' in updated_fields:
                raise TaskException(errno.EPERM, "Cannot change builtin user's UID")

            if 'username' in updated_fields:
                raise TaskException(
                    errno.EPERM, "Cannot change builtin user's username"
                )

            if 'locked' in updated_fields:
                raise TaskException(
                    errno.EPERM, "Cannot change builtin user's locked flag"
                )

        if not user:
            raise TaskException(errno.ENOENT, 'User {0} not found'.format(id))

        if 'home' in updated_fields and updated_fields['home'] is None:
            updated_fields['home'] = '/nonexistent'

        if 'home' in updated_fields and updated_fields['home'] != '/nonexistent':
            updated_fields['home'] = os.path.normpath(updated_fields['home'])
            zfs_dataset_mountpoints = list(self.dispatcher.call_sync('volume.dataset.query', [], {'select': 'mountpoint'}))
            zfs_pool_mountpoints = list(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))
            homedir_occurrence = self.dispatcher.call_sync(
                'user.query',
                [('home', '=', updated_fields['home'])],
                {'single': True}
            )
            homedir_mount_path = os.path.join('/', *(updated_fields['home'].split(os.path.sep)[:-1]))

            user_gid = self.datastore.get_by_id('groups', user['group'])
            user_gid = user_gid['gid'] if user_gid else 0

            if user['home'] != updated_fields['home']:

                if updated_fields['home'] in zfs_pool_mountpoints:
                    raise TaskException(
                        errno.ENXIO,
                        'Volume mountpoint cannot be set as the home directory.'
                    )

                if not any(homedir_mount_path == dataset_mountpoint
                           and os.path.ismount(dataset_mountpoint) for dataset_mountpoint in zfs_dataset_mountpoints):
                    raise TaskException(
                        errno.ENXIO,
                        'Home directory has to reside in zfs pool or dataset.' +
                        ' Provide a path which starts with valid, mounted zfs pool or dataset location.'
                    )

                if homedir_occurrence:
                    raise TaskException(
                        errno.ENXIO,
                        '{} is already assigned to another user.'.format(updated_fields['home']) +
                        ' Multiple users cannot share the same home directory.'
                    )

                if not os.path.exists(updated_fields['home']):
                    parent_dataset = self.dispatcher.call_sync(
                        'volume.dataset.query',
                        [('mountpoint', '=', homedir_mount_path)],
                        {'single': True}
                    )

                    homedir_dataset_id = os.path.join(
                        parent_dataset['id'],
                        updated_fields['home'].split(os.path.sep)[-1]
                    )

                    self.run_subtask_sync(
                        'volume.dataset.create',
                        {'id': homedir_dataset_id, 'type': 'FILESYSTEM', 'volume': parent_dataset['volume']}
                    )
                    os.chmod(updated_fields['home'], 0o755)

                else:
                    os.chmod(updated_fields['home'], 0o755)

                for file in os.listdir(SKEL_PATH):
                    if file.startswith('dot'):
                        dest_file = os.path.join(updated_fields['home'], file[3:])
                        if not os.path.exists(dest_file):
                            shutil.copyfile(os.path.join(SKEL_PATH, file), dest_file)
                            os.chown(dest_file, user['uid'], user_gid)

                    else:
                        dest_file = os.path.join(updated_fields['home'], file)
                        if not os.path.exists(dest_file):
                            shutil.copyfile(os.path.join(SKEL_PATH, file), dest_file)
                            os.chown(dest_file, user['uid'], user_gid)

                os.chown(updated_fields['home'], user['uid'], user_gid)

        user.update(updated_fields)

        try:
            password = user.pop('password', None)
            if password:
                user.update({
                    'unixhash': crypted_password(unpassword(password)),
                    'nthash': nt_password(unpassword(password)),
                    'password_changed_at': datetime.utcnow()
                })

            self.datastore.update('users', user['id'], user)
            self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts')
        except SubprocessException as e:
            raise TaskException(
                errno.ENXIO,
                'Could not generate samba password. stdout: {0}\nstderr: {1}'.format(e.out, e.err))
        except DatastoreException as e:
            raise TaskException(errno.EBADMSG, 'Cannot update user: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(e.code, 'Cannot regenerate users file: {0}'.format(e.message))

        self.dispatcher.dispatch_event('user.changed', {
            'operation': 'update',
            'ids': [user['id']]
        })
示例#35
0
    def run(self, peer, initial_credentials):
        hostid = self.dispatcher.call_sync('system.info.host_uuid')
        hostname = self.dispatcher.call_sync(
            'system.general.get_config')['hostname']
        remote_peer_name = hostname
        credentials = peer['credentials']
        remote = credentials.get('address')
        port = credentials.get('port', 22)
        username = initial_credentials.get('username')
        password = initial_credentials.get('password')
        auth_code = initial_credentials.get('auth_code')
        key_auth = initial_credentials.get('key_auth')

        local_ssh_config = self.dispatcher.call_sync('service.sshd.get_config')

        if self.datastore.exists('peers', ('credentials.address', '=', remote),
                                 ('type', '=', 'freenas')):
            raise TaskException(
                errno.EEXIST,
                'FreeNAS peer entry for {0} already exists'.format(remote))

        remote_client = Client()

        try:
            if auth_code:
                try:
                    remote_client.connect('ws://{0}'.format(
                        wrap_address(remote)))
                except (AuthenticationException, OSError,
                        ConnectionRefusedError):
                    raise TaskException(
                        errno.ECONNABORTED,
                        'Cannot connect to {0}:{1}'.format(remote, port))

                try:
                    remote_host_uuid, pubkey = remote_client.call_sync(
                        'peer.freenas.auth_with_code', auth_code, hostname,
                        local_ssh_config['port'])
                except RpcException as err:
                    raise TaskException(err.code, err.message)

                try:
                    self.dispatcher.call_sync('peer.freenas.put_temp_pubkey',
                                              pubkey)
                    if not self.dispatcher.test_or_wait_for_event(
                            'peer.changed',
                            lambda ar: ar['operation'] == 'create' and
                            remote_host_uuid in ar['ids'],
                            lambda: self.datastore.exists(
                                'peers', ('id', '=', remote_host_uuid)),
                            timeout=30):
                        raise TaskException(
                            errno.EAUTH,
                            'FreeNAS peer creation failed. Check connection to host {0}.'
                            .format(remote))
                finally:
                    self.dispatcher.call_sync(
                        'peer.freenas.remove_temp_pubkey', pubkey)

            else:
                try:
                    if key_auth:
                        with io.StringIO() as f:
                            f.write(
                                self.configstore.get(
                                    'peer.freenas.key.private'))
                            f.seek(0)
                            pkey = RSAKey.from_private_key(f)

                        max_tries = 50
                        while True:
                            try:
                                remote_client.connect(
                                    'ws+ssh://freenas@{0}'.format(
                                        wrap_address(remote)),
                                    pkey=pkey,
                                    port=port)
                                break
                            except AuthenticationException:
                                if max_tries:
                                    max_tries -= 1
                                    time.sleep(1)
                                else:
                                    raise
                    else:
                        remote_client.connect('ws+ssh://{0}@{1}'.format(
                            username, wrap_address(remote)),
                                              port=port,
                                              password=unpassword(password))

                    remote_client.login_service('replicator')
                except (AuthenticationException, OSError,
                        ConnectionRefusedError):
                    raise TaskException(
                        errno.ECONNABORTED,
                        'Cannot connect to {0}:{1}'.format(remote, port))

                local_host_key, local_pub_key = self.dispatcher.call_sync(
                    'peer.freenas.get_ssh_keys')
                remote_host_key, remote_pub_key = remote_client.call_sync(
                    'peer.freenas.get_ssh_keys')
                ip_at_remote_side = remote_client.local_address[0]

                remote_hostname = remote_client.call_sync(
                    'system.general.get_config')['hostname']

                remote_host_key = remote_host_key.rsplit(' ', 1)[0]
                local_host_key = local_host_key.rsplit(' ', 1)[0]

                if remote_client.call_sync('peer.query',
                                           [('id', '=', hostid)]):
                    raise TaskException(
                        errno.EEXIST,
                        'Peer entry of {0} already exists at {1}'.format(
                            hostname, remote))

                peer['credentials'] = {
                    '%type': 'FreenasCredentials',
                    'pubkey': remote_pub_key,
                    'hostkey': remote_host_key,
                    'port': port,
                    'address': remote_hostname
                }

                local_id = remote_client.call_sync('system.info.host_uuid')
                peer['id'] = local_id
                peer['name'] = remote_hostname
                ip = socket.gethostbyname(remote)

                created_id = self.run_subtask_sync('peer.freenas.create_local',
                                                   peer, ip, True)

                peer['id'] = hostid
                peer['name'] = remote_peer_name
                peer['credentials'] = {
                    '%type': 'FreenasCredentials',
                    'pubkey': local_pub_key,
                    'hostkey': local_host_key,
                    'port': local_ssh_config['port'],
                    'address': hostname
                }

                try:
                    call_task_and_check_state(remote_client,
                                              'peer.freenas.create_local',
                                              peer, ip_at_remote_side)
                except TaskException:
                    self.datastore.delete('peers', local_id)
                    self.dispatcher.dispatch_event('peer.changed', {
                        'operation': 'delete',
                        'ids': [local_id]
                    })
                    raise

                return created_id
        finally:
            remote_client.disconnect()
示例#36
0
    def run(self, snapshot, recursive=False):
        dataset = snapshot.get('dataset') or snapshot.get('id').split('@')[0]
        id = snapshot.get('id') or '{0}@{1}'.format(dataset,
                                                    snapshot.get('name'))
        vm_snapname = self.environment.get('vmware_snapshot_name')
        failed_snapshots = self.environment.get('vmware_failed_snapshots', [])

        if not vm_snapname:
            return

        logger.info('VM snapshot name is: {0}'.format(vm_snapname))

        for mapping in self.datastore.query_stream('vmware.datasets'):
            if recursive:
                if not re.search('^{0}(/|$)'.format(mapping['dataset']), dataset) and \
                   not re.search('^{0}(/|$)'.format(dataset), mapping['dataset']):
                    continue
            else:
                if mapping['dataset'] != dataset:
                    continue

            peer = self.dispatcher.call_sync('peer.query',
                                             [('id', '=', mapping['peer'])],
                                             {'single': True})
            if not peer:
                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    '<mapping {0}>'.format(mapping['name']),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    'Cannot find peer entry for mapping {0}'.format(
                        mapping['name'])
                })
                continue

            if any(
                    i.get('host') == q.get(peer, 'credentials.address')
                    for i in failed_snapshots):
                continue

            try:
                ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
                ssl_context.verify_mode = ssl.CERT_NONE
                si = connect.SmartConnect(
                    host=q.get(peer, 'credentials.address'),
                    user=q.get(peer, 'credentials.username'),
                    pwd=unpassword(q.get(peer, 'credentials.password')),
                    sslContext=ssl_context)
                content = si.RetrieveContent()
                vm_view = content.viewManager.CreateContainerView(
                    content.rootFolder, [vim.VirtualMachine], True)
            except BaseException as err:
                logger.warning(
                    'Connecting to VMware instance at {0} failed: {1}'.format(
                        q.get(peer, 'credentials.address'), str(err)))

                failed_snapshots.append({
                    'when':
                    'connect',
                    'host':
                    q.get(peer, 'credentials.address'),
                    'datastore':
                    mapping['datastore'],
                    'error':
                    getattr(err, 'msg', str(err))
                })

                continue

            for vm in vm_view.view:
                if not any(i.info.name == mapping['datastore']
                           for i in vm.datastore):
                    continue

                if not vm.snapshot:
                    continue

                snapshot = find_snapshot(vm.snapshot.rootSnapshotList,
                                         vm_snapname)
                if not snapshot:
                    continue

                logger.info(
                    'Removing snapshot of VM {0} (datastore {1})'.format(
                        vm.summary.config.name, mapping['datastore']))

                try:
                    task.WaitForTask(snapshot.RemoveSnapshot_Task(True))
                except vmodl.MethodFault as err:
                    logger.warning(
                        'Deleting snapshot of {0} failed: {1}'.format(
                            vm.summary.config.name, err.msg))
                    failed_snapshots.append({
                        'when': 'delete',
                        'vm': vm.summary.config.name,
                        'datastore': mapping['datastore'],
                        'error': err.msg
                    })

            connect.Disconnect(si)

        if failed_snapshots:
            descr = Template(ALERT_TEMPLATE).render(
                id=id, failed_snapshots=failed_snapshots)
            self.dispatcher.call_sync(
                'alert.emit', {
                    'clazz': 'VMwareSnapshotFailed',
                    'target': dataset,
                    'title':
                    'Failed to create or remove snapshot of one or more VMware virtual machines',
                    'description': descr
                })
示例#37
0
    def run(self, user):
        if self.datastore.exists('users', ('username', '=', user['username'])):
            raise TaskException(errno.EEXIST, 'User with given name already exists')

        if 'uid' not in user:
            # Need to get next free UID
            uid = self.dispatcher.call_sync('user.next_uid', user.get('group') is None)
        else:
            uid = user.pop('uid')

        normalize_name(user, 'username')
        normalize(user, {
            'builtin': False,
            'unixhash': None,
            'nthash': None,
            'password_changed_at': None,
            'full_name': 'User &',
            'shell': '/bin/sh',
            'home': '/nonexistent',
            'groups': [],
            'uid': uid,
            'sudo': False,
            'attributes': {}
        })

        if user['home'] is None:
            user['home'] = '/nonexistent'

        if user['home'] != '/nonexistent':
            user['home'] = os.path.normpath(user['home'])
            zfs_dataset_mountpoints = list(self.dispatcher.call_sync('volume.dataset.query', [], {'select': 'mountpoint'}))
            zfs_pool_mountpoints = list(self.dispatcher.call_sync('volume.query', [], {'select': 'mountpoint'}))
            homedir_occurrence = self.dispatcher.call_sync(
                'user.query',
                [('home', '=', user['home'])],
                {'single': True}
            )

            if user['home'] in zfs_pool_mountpoints:
                raise TaskException(
                    errno.ENXIO,
                    'ZFS pool mountpoint cannot be set as the home directory.'
                )

            homedir_mount_path = os.path.join('/', *(user['home'].split(os.path.sep)[:-1]))
            if not any(homedir_mount_path == dataset_mountpoint
                       and os.path.ismount(dataset_mountpoint) for dataset_mountpoint in zfs_dataset_mountpoints):
                raise TaskException(
                    errno.ENXIO,
                    'Home directory has to reside in zfs pool or dataset.' +
                    ' Provide a path which starts with valid, mounted zfs pool or dataset location.'
                )

            if homedir_occurrence:
                raise TaskException(
                    errno.ENXIO,
                    '{} is already assigned to another user.'.format(user['home']) +
                    ' Multiple users cannot share the same home directory.'
                )

        password = user.pop('password', None)
        if password:
            user.update({
                'unixhash': crypted_password(unpassword(password)),
                'nthash': nt_password(unpassword(password)),
                'password_changed_at': datetime.utcnow()
            })

        if user.get('group') is None:
            try:
                result = self.run_subtask_sync('group.create', {
                    'gid': uid,
                    'name': user['username']
                })
            except RpcException as err:
                raise err

            user['group'] = result
            self.created_group = result

        try:
            id = self.datastore.insert('users', user)
            self.id = id
            self.dispatcher.call_sync('etcd.generation.generate_group', 'accounts')
        except SubprocessException as e:
            raise TaskException(
                errno.ENXIO,
                'Could not generate samba password. stdout: {0}\nstderr: {1}'.format(e.out, e.err)
            )
        except DuplicateKeyException as e:
            raise TaskException(errno.EBADMSG, 'Cannot add user: {0}'.format(str(e)))
        except RpcException as e:
            raise TaskException(
                errno.ENXIO,
                'Cannot regenerate users file: {0}'.format(e)
            )

        if user['home'] != '/nonexistent':
            group = self.dispatcher.call_sync('group.query', [('id', '=', user['group'])], {'single': True})

            if not group:
                raise TaskException(errno.ENOENT, 'Group {0} not found'.format(user['group']))
            user_gid = group['gid']

            if not os.path.exists(user['home']):
                parent_dataset = self.dispatcher.call_sync(
                    'volume.dataset.query',
                    [('mountpoint', '=', homedir_mount_path)],
                    {'single': True}
                )

                homedir_dataset_id = os.path.join(parent_dataset['id'], user['home'].split(os.path.sep)[-1])
                self.run_subtask_sync(
                    'volume.dataset.create',
                    {'id': homedir_dataset_id, 'type': 'FILESYSTEM', 'volume': parent_dataset['volume']}
                )
                os.chmod(user['home'], 0o755)

            else:
                os.chmod(user['home'], 0o755)

            for file in os.listdir(SKEL_PATH):
                if file.startswith('dot'):
                    dest_file = os.path.join(user['home'], file[3:])
                    if not os.path.exists(dest_file):
                        shutil.copyfile(os.path.join(SKEL_PATH, file), dest_file)
                        os.chown(dest_file, uid, user_gid)

                else:
                    dest_file = os.path.join(user['home'], file)
                    if not os.path.exists(dest_file):
                        shutil.copyfile(os.path.join(SKEL_PATH, file), dest_file)
                        os.chown(dest_file, uid, user_gid)

            os.chown(user['home'], uid, user_gid)

        self.dispatcher.dispatch_event('user.changed', {
            'operation': 'create',
            'ids': [id]
        })

        return id