Example #1
0
def get_initiator():
    """Get the initiator connector dict."""
    # Get the intiator side connector properties
    my_ip = netutils.get_my_ipv4()
    initiator = connector.get_connector_properties('sudo', my_ip, False, False)
    LOG.debug("initiator = %s", initiator)
    return initiator
Example #2
0
 def _test_brick_get_connector_properties(self,
                                          multipath,
                                          enforce_multipath,
                                          multipath_result,
                                          mock_wwnns,
                                          mock_wwpns,
                                          mock_initiator,
                                          mock_sysuuid,
                                          host='fakehost'):
     props_actual = connector.get_connector_properties('sudo',
                                                       MY_IP,
                                                       multipath,
                                                       enforce_multipath,
                                                       host=host)
     os_type = 'linux2'
     platform = 's390x'
     props = {
         'initiator': 'fakeinitiator',
         'host': host,
         'ip': MY_IP,
         'multipath': multipath_result,
         'os_type': os_type,
         'platform': platform,
         'do_local_attach': False
     }
     self.assertEqual(props, props_actual)
Example #3
0
    def attach(self, client, volume_id, hostname):
        # TODO(e0ne): use oslo.rootwrap
        # TODO(e0ne): multipath support
        root_helper = 'sudo'
        conn_prop = connector.get_connector_properties(root_helper,
                                                       _get_my_ip(),
                                                       multipath=False,
                                                       enforce_multipath=False)
        connection = client.volumes.initialize_connection(volume_id, conn_prop)

        protocol = connection['driver_volume_type']
        protocol = protocol.upper()
        nfs_mount_point_base = connection.get('mount_point_base')
        brick_connector = self._brick_get_connector(
            protocol, nfs_mount_point_base=nfs_mount_point_base)

        device_info = brick_connector.connect_volume(connection['data'])
        if protocol == 'RBD':
            # TODO(e0ne): move to attach_rbd_volume() function
            # TODO(e0ne): use oslo.rootwrap
            # TODO(e0ne): multipath support
            pool, volume = connection['data']['name'].split('/')
            cmd = ['rbd', 'map', volume, '--pool', pool]
            processutils.execute(*cmd, root_helper='sudo', run_as_root=True)
        client.volumes.attach(volume_id, None, None, host_name=hostname)
        return device_info
Example #4
0
    def detach(self, client, volume_id):
        # TODO(e0ne): multipath support
        conn_prop = connector.get_connector_properties(utils.get_root_helper(),
                                                       utils.get_my_ip(),
                                                       multipath=False,
                                                       enforce_multipath=False)
        connection = client.volumes.initialize_connection(volume_id, conn_prop)
        nfs_mount_point_base = connection.get('mount_point_base')
        brick_connector = self._brick_get_connector(
            connection['driver_volume_type'],
            nfs_mount_point_base=nfs_mount_point_base)

        # TODO(e0ne): use real device info from params
        device_info = {}
        brick_connector.disconnect_volume(connection['data'], device_info)
        protocol = connection['driver_volume_type']
        protocol = protocol.upper()
        if protocol == 'RBD':
            # TODO(e0ne): move to detach_rbd_volume() function
            # TODO(e0ne): multipath support
            pool, volume = connection['data']['name'].split('/')
            dev_name = '/dev/rbd/{pool}/{volume}'.format(pool=pool,
                                                         volume=volume)
            cmd = ['rbd', 'unmap', dev_name]
            processutils.execute(*cmd,
                                 root_helper=utils.get_root_helper(),
                                 run_as_root=True)
        elif protocol == 'NFS':
            nfs_share = connection['data']['export']
            cmd = ['umount', nfs_share]
            processutils.execute(*cmd, root_helper=utils.get_root_helper(),
                                 run_as_root=True)
        client.volumes.terminate_connection(volume_id, conn_prop)
        client.volumes.detach(volume_id)
Example #5
0
 def get_connector(self):
     # TODO(e0ne): multipath support
     conn_prop = connector.get_connector_properties(utils.get_root_helper(),
                                                    utils.get_my_ip(),
                                                    multipath=False,
                                                    enforce_multipath=False)
     return conn_prop
Example #6
0
 def get_connector(self, multipath=False, enforce_multipath=False):
     conn_prop = connector.get_connector_properties(
         brick_utils.get_root_helper(),
         brick_utils.get_my_ip(),
         multipath=multipath,
         enforce_multipath=(enforce_multipath),
         execute=processutils.execute)
     return conn_prop
Example #7
0
 def set(cls, node_id, storage_nw_ip):
     # For now just set multipathing and not enforcing it
     connector_dict = brick_connector.get_connector_properties(
         'sudo', storage_nw_ip, CONF.REQUEST_MULTIPATH, False)
     value = json.dumps(connector_dict, separators=(',', ':'))
     kv = cinderlib.KeyValue(node_id, value)
     cinderlib.Backend.persistence.set_key_value(kv)
     return NodeInfo(node_id, connector_dict)
Example #8
0
 def get_connector(self, multipath=False, enforce_multipath=False):
     conn_prop = connector.get_connector_properties(
         brick_utils.get_root_helper(),
         brick_utils.get_my_ip(),
         multipath=multipath,
         enforce_multipath=(enforce_multipath),
         execute=processutils.execute)
     return conn_prop
Example #9
0
    def _open_cinder_volume(self, client, volume, mode):
        attach_mode = 'rw' if mode == 'wb' else 'ro'
        device = None
        root_helper = get_root_helper()
        host = socket.gethostname()
        properties = connector.get_connector_properties(root_helper, host,
                                                        False, False)

        try:
            volume.reserve(volume)
        except cinder_exception.ClientException as e:
            msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
                   % {'volume_id': volume.id, 'error': e})
            LOG.error(msg)
            raise exceptions.BackendException(msg)

        try:
            connection_info = volume.initialize_connection(volume, properties)
            conn = connector.InitiatorConnector.factory(
                connection_info['driver_volume_type'], root_helper,
                conn=connection_info)
            device = conn.connect_volume(connection_info['data'])
            volume.attach(None, None, attach_mode, host_name=host)
            volume = self._wait_volume_status(volume, 'attaching', 'in-use')
            LOG.debug('Opening host device "%s"', device['path'])
            with temporary_chown(device['path']), \
                    open(device['path'], mode) as f:
                yield f
        except Exception:
            LOG.exception(_LE('Exception while accessing to cinder volume '
                              '%(volume_id)s.'), {'volume_id': volume.id})
            raise
        finally:
            if volume.status == 'in-use':
                volume.begin_detaching(volume)
            elif volume.status == 'attaching':
                volume.unreserve(volume)

            if device:
                try:
                    conn.disconnect_volume(connection_info['data'], device)
                except Exception:
                    LOG.exception(_LE('Failed to disconnect volume '
                                      '%(volume_id)s.'),
                                  {'volume_id': volume.id})

            try:
                volume.terminate_connection(volume, properties)
            except Exception:
                LOG.exception(_LE('Failed to terminate connection of volume '
                                  '%(volume_id)s.'), {'volume_id': volume.id})

            try:
                client.volumes.detach(volume)
            except Exception:
                LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
                              {'volume_id': volume.id})
 def _get_brick_connector_info(self):
     conn = brick_connector.get_connector_properties(
         root_helper=None,
         my_ip=self.initiator_ip,
         multipath=self.use_multipath,
         # if multipath is requested, ensure it's honored.
         enforce_multipath=True,
         host=self.hostname)
     return conn
 def _get_brick_connector_info(self):
     conn = brick_connector.get_connector_properties(
         root_helper=None,
         my_ip=self.initiator_ip,
         multipath=self.use_multipath,
         # if multipath is requested, ensure it's honored.
         enforce_multipath=True,
         host=self.hostname)
     return conn
Example #12
0
    def _open_cinder_volume(self, client, volume, mode):
        attach_mode = 'rw' if mode == 'wb' else 'ro'
        device = None
        root_helper = get_root_helper()
        host = socket.gethostname()
        properties = connector.get_connector_properties(root_helper, host,
                                                        False, False)

        try:
            volume.reserve(volume)
        except cinder_exception.ClientException as e:
            msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
                   % {'volume_id': volume.id, 'error': e})
            LOG.error(msg)
            raise exceptions.BackendException(msg)

        try:
            connection_info = volume.initialize_connection(volume, properties)
            conn = connector.InitiatorConnector.factory(
                connection_info['driver_volume_type'], root_helper,
                conn=connection_info)
            device = conn.connect_volume(connection_info['data'])
            volume.attach(None, None, attach_mode, host_name=host)
            volume = self._wait_volume_status(volume, 'attaching', 'in-use')
            LOG.debug('Opening host device "%s"', device['path'])
            with temporary_chown(device['path']), \
                    open(device['path'], mode) as f:
                yield f
        except Exception:
            LOG.exception(_LE('Exception while accessing to cinder volume '
                              '%(volume_id)s.'), {'volume_id': volume.id})
            raise
        finally:
            if volume.status == 'in-use':
                volume.begin_detaching(volume)
            elif volume.status == 'attaching':
                volume.unreserve(volume)

            if device:
                try:
                    conn.disconnect_volume(connection_info['data'], device)
                except Exception:
                    LOG.exception(_LE('Failed to disconnect volume '
                                      '%(volume_id)s.'),
                                  {'volume_id': volume.id})

            try:
                volume.terminate_connection(volume, properties)
            except Exception:
                LOG.exception(_LE('Failed to terminate connection of volume '
                                  '%(volume_id)s.'), {'volume_id': volume.id})

            try:
                client.volumes.detach(volume)
            except Exception:
                LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
                              {'volume_id': volume.id})
Example #13
0
 def get_connector(self):
     # TODO(e0ne): use oslo.rootwrap
     # TODO(e0ne): multipath support
     root_helper = 'sudo'
     conn_prop = connector.get_connector_properties(root_helper,
                                                    _get_my_ip(),
                                                    multipath=False,
                                                    enforce_multipath=False)
     return conn_prop
Example #14
0
 def get_volume_connector(self):
     # NOTE(lpetrut): the Windows os-brick connectors
     # do not use a root helper.
     conn = connector.get_connector_properties(
         root_helper=None,
         my_ip=CONF.my_block_storage_ip,
         multipath=CONF.hyperv.use_multipath_io,
         enforce_multipath=True,
         host=CONF.host)
     return conn
Example #15
0
 def get_volume_connector(self):
     # NOTE(lpetrut): the Windows os-brick connectors
     # do not use a root helper.
     conn = connector.get_connector_properties(
         root_helper=None,
         my_ip=CONF.my_block_storage_ip,
         multipath=CONF.hyperv.use_multipath_io,
         enforce_multipath=True,
         host=CONF.host)
     return conn
Example #16
0
    def set(self, node_id, storage_nw_ip):
        if not storage_nw_ip:
            storage_nw_ip = socket.gethostbyname(socket.gethostname())

        # For now just set multipathing and not enforcing it
        connector_dict = brick_connector.get_connector_properties(
            'sudo', storage_nw_ip, True, False)
        kv = cinderlib.KeyValue(node_id, json.dumps(connector_dict))
        cinderlib.Backend.persistence.set_key_value(kv)
        return NodeInfo(node_id, connector_dict)
Example #17
0
    def test_brick_get_connector_properties_connectors_called(self):
        """Make sure every connector is called."""

        mock_list = []
        # Make sure every connector is called
        for item in connector._get_connector_list():
            patched = mock.MagicMock()
            patched.platform = platform.machine()
            patched.os_type = sys.platform
            patched.__name__ = item
            patched.get_connector_properties.return_value = {}
            patcher = mock.patch(item, new=patched)
            patcher.start()
            self.addCleanup(patcher.stop)
            mock_list.append(patched)

        connector.get_connector_properties('sudo', MY_IP, True, True)

        for item in mock_list:
            assert item.get_connector_properties.called
 def get_connector(self,
                   multipath=False,
                   enforce_multipath=False,
                   nic=None):
     conn_prop = connector.get_connector_properties(
         brick_utils.get_root_helper(),
         brick_utils.get_ip(nic),
         multipath=multipath,
         enforce_multipath=(enforce_multipath),
         execute=None)
     return conn_prop
    def _get_initiator_info():
        info = connector.get_connector_properties(None, None, True, True)
        if not 'wwpns' in info:
            info['wwpns'] = []

        if not 'initiator' in info:
            info['initiator'] = []
        elif not isinstance(info['initiator'], list):
            info['initiator'] = [info['initiator']]

        return info
Example #20
0
 def attach(self):
     connector_dict = brick_connector.get_connector_properties(
         self.backend_class.root_helper, cfg.CONF.my_ip,
         self.backend.configuration.use_multipath_for_image_xfer,
         self.backend.configuration.enforce_multipath_for_image_xfer)
     conn = self.connect(connector_dict)
     try:
         conn.attach()
     except Exception:
         self.disconnect(conn)
         raise
     return conn
Example #21
0
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
    """Wrapper to automatically set root_helper in brick calls.
    :param multipath: A boolean indicating whether the connector can
                      support multipath.
    :param enforce_multipath: If True, it raises exception when multipath=True
                              is specified but multipathd is not running.
                              If False, it falls back to multipath=False
                              when multipathd is not running.
    """

    root_helper = utils.get_root_helper()
    return connector.get_connector_properties(root_helper, CONF.my_ip,
                                              multipath, enforce_multipath)
Example #22
0
    def test_brick_get_connector_properties_connectors_called(self):
        """Make sure every connector is called."""

        mock_list = []
        # Make sure every connector is called
        for item in connector.connector_list:
            patched = mock.MagicMock()
            patched.platform = platform.machine()
            patched.os_type = sys.platform
            patched.__name__ = item
            patched.get_connector_properties.return_value = {}
            patcher = mock.patch(item, new=patched)
            patcher.start()
            self.addCleanup(patcher.stop)
            mock_list.append(patched)

        connector.get_connector_properties('sudo',
                                           MY_IP,
                                           True, True)

        for item in mock_list:
            assert item.get_connector_properties.called
Example #23
0
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
    """Wrapper to automatically set root_helper in brick calls.

    :param multipath: A boolean indicating whether the connector can
                      support multipath.
    :param enforce_multipath: If True, it raises exception when multipath=True
                              is specified but multipathd is not running.
                              If False, it falls back to multipath=False
                              when multipathd is not running.
    """

    root_helper = get_root_helper()
    return connector.get_connector_properties(root_helper, CONF.my_ip, multipath, enforce_multipath)
def node(module):
    specs = module.argument_spec
    specs.update(ips={'type': 'list', 'required': True},
                 multipath={'type': 'bool', 'default': True},
                 enforce_multipath={'type': 'bool', 'default': True})
    module = basic.AnsibleModule(module.argument_spec,
                                 check_invalid_arguments=True)

    connector_dict = connector.get_connector_properties(
        root_helper='sudo',
        my_ip=module.params['ips'][0],
        multipath=module.params['multipath'],
        enforce_multipath=module.params['enforce_multipath'])
    return {common.STORAGE_DATA: {common.CONNECTOR_DICT: connector_dict}}
Example #25
0
def get_volume_connector_properties():
    """Wrapper to automatically set root_helper in brick calls.

    :param multipath: A boolean indicating whether the connector can
                      support multipath.
    :param enforce_multipath: If True, it raises exception when multipath=True
                              is specified but multipathd is not running.
                              If False, it falls back to multipath=False
                              when multipathd is not running.
    """

    return brick_connector.get_connector_properties(None,
                                                    CONF.my_block_storage_ip,
                                                    CONF.volume.use_multipath,
                                                    enforce_multipath=True,
                                                    host=CONF.host)
Example #26
0
def get_volume_connector_properties():
    """Wrapper to automatically set root_helper in brick calls.

    :param multipath: A boolean indicating whether the connector can
                      support multipath.
    :param enforce_multipath: If True, it raises exception when multipath=True
                              is specified but multipathd is not running.
                              If False, it falls back to multipath=False
                              when multipathd is not running.
    """

    return brick_connector.get_connector_properties(
        None,
        CONF.my_block_storage_ip,
        CONF.volume.use_multipath,
        enforce_multipath=True,
        host=CONF.host)
Example #27
0
 def _test_brick_get_connector_properties(self, multipath,
                                          enforce_multipath,
                                          multipath_result,
                                          mock_wwnns, mock_wwpns,
                                          mock_initiator,
                                          host='fakehost'):
     props_actual = connector.get_connector_properties('sudo',
                                                       MY_IP,
                                                       multipath,
                                                       enforce_multipath,
                                                       host=host)
     os_type = 'linux2'
     platform = 's390x'
     props = {'initiator': 'fakeinitiator',
              'host': host,
              'ip': MY_IP,
              'multipath': multipath_result,
              'os_type': os_type,
              'platform': platform,
              'do_local_attach': False}
     self.assertEqual(props, props_actual)
Example #28
0
    def _open_cinder_volume(self, client, volume, mode):
        attach_mode = 'rw' if mode == 'wb' else 'ro'
        device = None
        root_helper = self.get_root_helper()
        priv_context.init(root_helper=shlex.split(root_helper))
        host = socket.gethostname()
        use_multipath = self.store_conf.cinder_use_multipath
        enforce_multipath = self.store_conf.cinder_enforce_multipath
        mount_point_base = self.store_conf.cinder_mount_point_base

        properties = connector.get_connector_properties(
            root_helper, host, use_multipath, enforce_multipath)

        try:
            volume.reserve(volume)
        except cinder_exception.ClientException as e:
            msg = (_('Failed to reserve volume %(volume_id)s: %(error)s') % {
                'volume_id': volume.id,
                'error': e
            })
            LOG.error(msg)
            raise exceptions.BackendException(msg)

        try:
            connection_info = volume.initialize_connection(volume, properties)
            conn = connector.InitiatorConnector.factory(
                connection_info['driver_volume_type'],
                root_helper,
                conn=connection_info)
            if connection_info['driver_volume_type'] == 'nfs':
                if volume.encrypted:
                    volume.unreserve(volume)
                    volume.delete()
                    msg = (_('Encrypted volume creation for cinder nfs is not '
                             'supported from glance_store. Failed to create '
                             'volume %(volume_id)s') % {
                                 'volume_id': volume.id
                             })
                    LOG.error(msg)
                    raise exceptions.BackendException(msg)

                @utils.synchronized(connection_info['data']['export'])
                def connect_volume_nfs():
                    data = connection_info['data']
                    export = data['export']
                    vol_name = data['name']
                    mountpoint = self._get_mount_path(
                        export, os.path.join(mount_point_base, 'nfs'))
                    options = data['options']
                    self.mount.mount('nfs', export, vol_name, mountpoint, host,
                                     root_helper, options)
                    return {'path': os.path.join(mountpoint, vol_name)}

                device = connect_volume_nfs()
            else:
                device = conn.connect_volume(connection_info['data'])
            volume.attach(None, 'glance_store', attach_mode, host_name=host)
            volume = self._wait_volume_status(volume, 'attaching', 'in-use')
            if (connection_info['driver_volume_type'] == 'rbd'
                    and not conn.do_local_attach):
                yield device['path']
            else:
                with self.temporary_chown(device['path']), open(
                        device['path'], mode) as f:
                    yield f
        except Exception:
            LOG.exception(
                _LE('Exception while accessing to cinder volume '
                    '%(volume_id)s.'), {'volume_id': volume.id})
            raise
        finally:
            if volume.status == 'in-use':
                volume.begin_detaching(volume)
            elif volume.status == 'attaching':
                volume.unreserve(volume)

            if device:
                try:
                    if connection_info['driver_volume_type'] == 'nfs':

                        @utils.synchronized(connection_info['data']['export'])
                        def disconnect_volume_nfs():
                            path, vol_name = device['path'].rsplit('/', 1)
                            self.mount.umount(vol_name, path, host,
                                              root_helper)

                        disconnect_volume_nfs()
                    else:
                        conn.disconnect_volume(connection_info['data'], device)
                except Exception:
                    LOG.exception(
                        _LE('Failed to disconnect volume '
                            '%(volume_id)s.'), {'volume_id': volume.id})

            try:
                volume.terminate_connection(volume, properties)
            except Exception:
                LOG.exception(
                    _LE('Failed to terminate connection of volume '
                        '%(volume_id)s.'), {'volume_id': volume.id})

            try:
                client.volumes.detach(volume)
            except Exception:
                LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
                              {'volume_id': volume.id})
    def volumedriver_mount(self, name):
        """
        Mount the volume
        Mount the volume

        NOTE: If for any reason the mount request fails, Docker
        will automatically call uMount. So, just make sure uMount
        can handle partially completed Mount requests.

        :param unicode name: The name of the volume.

        :return: Result that includes the mountpoint.
        """
        LOG.debug('In volumedriver_mount')

        # TODO: use persistent storage to lookup volume for deletion
        contents = {}
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume mount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        vol_mount = DEFAULT_MOUNT_VOLUME
        if ('Opts' in contents and contents['Opts']
                and 'mount-volume' in contents['Opts']):
            vol_mount = str(contents['Opts']['mount-volume'])

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'

        connector_info = connector.get_connector_properties(
            root_helper,
            self._my_ip,
            multipath=self.use_multipath,
            enforce_multipath=self.enforce_multipath)

        try:
            # Call driver to initialize the connection
            self.hpeplugin_driver.create_export(vol, connector_info)
            connection_info = \
                self.hpeplugin_driver.initialize_connection(
                    vol, connector_info)
            LOG.debug(
                'connection_info: %(connection_info)s, '
                'was successfully retrieved',
                {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_('connection info retrieval failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        # Call OS Brick to connect volume
        try:
            device_info = self.connector.\
                connect_volume(connection_info['data'])
        except Exception as ex:
            msg = (_('OS Brick connect volume failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        # Make sure the path exists
        path = FilePath(device_info['path']).realpath()
        if path.exists is False:
            msg = (_('path: %s,  does not exist'), path)
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        LOG.debug(
            'path for volume: %(name)s, was successfully created: '
            '%(device)s realpath is: %(realpath)s', {
                'name': volname,
                'device': device_info['path'],
                'realpath': path.path
            })

        # Create filesystem on the new device
        if fileutil.has_filesystem(path.path) is False:
            fileutil.create_filesystem(path.path)
            LOG.debug('filesystem successfully created on : %(path)s',
                      {'path': path.path})

        # Determine if we need to mount the volume
        if vol_mount is DEFAULT_MOUNT_VOLUME:
            # mkdir for mounting the filesystem
            mount_dir = fileutil.mkdir_for_mounting(device_info['path'])
            LOG.debug(
                'Directory: %(mount_dir)s, '
                'successfully created to mount: '
                '%(mount)s', {
                    'mount_dir': mount_dir,
                    'mount': device_info['path']
                })

            # mount the directory
            fileutil.mount_dir(path.path, mount_dir)
            LOG.debug('Device: %(path) successfully mounted on %(mount)s', {
                'path': path.path,
                'mount': mount_dir
            })

            # TODO: find out how to invoke mkfs so that it creates the
            # filesystem without the lost+found directory
            # KLUDGE!!!!!
            lostfound = mount_dir + '/lost+found'
            lfdir = FilePath(lostfound)
            if lfdir.exists and fileutil.remove_dir(lostfound):
                LOG.debug(
                    'Successfully removed : '
                    '%(lost)s from mount: %(mount)s', {
                        'lost': lostfound,
                        'mount': mount_dir
                    })
        else:
            mount_dir = ''

        path_info = {}
        path_info['name'] = volname
        path_info['path'] = path.path
        path_info['device_info'] = device_info
        path_info['connection_info'] = connection_info
        path_info['mount_dir'] = mount_dir

        self._etcd.update_vol(volid, 'path_info', json.dumps(path_info))

        response = json.dumps({
            u"Err": '',
            u"Name": volname,
            u"Mountpoint": mount_dir,
            u"Devicename": path.path
        })
        return response
    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        vol_mount = DEFAULT_MOUNT_VOLUME
        if ('Opts' in contents and contents['Opts']
                and 'mount-volume' in contents['Opts']):
            vol_mount = str(contents['Opts']['mount-volume'])

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'

        connector_info = connector.get_connector_properties(
            root_helper,
            self._my_ip,
            multipath=self.use_multipath,
            enforce_multipath=self.enforce_multipath)

        # Determine if we need to unmount a previously mounted volume
        if vol_mount is DEFAULT_MOUNT_VOLUME:
            # unmount directory
            fileutil.umount_dir(mount_dir)
            # remove directory
            fileutil.remove_dir(mount_dir)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(
                _LI('connection_info: %(connection_info)s, '
                    'was successfully terminated'),
                {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            # Not much we can do here, so just continue on with unmount
            # We need to ensure we update etcd path_info so the stale
            # path does not stay around
            # raise exception.HPEPluginUMountException(reason=msg)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(
            _LI('path for volume: %(name)s, was successfully removed: '
                '%(path_name)s'), {
                    'name': volname,
                    'path_name': path_name
                })

        response = json.dumps({u"Err": ''})
        return response
Example #31
0
    def _open_cinder_volume(self, client, volume, mode):
        attach_mode = 'rw' if mode == 'wb' else 'ro'
        device = None
        root_helper = get_root_helper(backend=self.backend_group)
        priv_context.init(root_helper=shlex.split(root_helper))
        host = socket.gethostname()
        if self.backend_group:
            use_multipath = getattr(
                self.conf, self.backend_group).cinder_use_multipath
            enforce_multipath = getattr(
                self.conf, self.backend_group).cinder_enforce_multipath
        else:
            use_multipath = self.conf.glance_store.cinder_use_multipath
            enforce_multipath = self.conf.glance_store.cinder_enforce_multipath

        properties = connector.get_connector_properties(
            root_helper, host, use_multipath, enforce_multipath)

        try:
            volume.reserve(volume)
        except cinder_exception.ClientException as e:
            msg = (_('Failed to reserve volume %(volume_id)s: %(error)s')
                   % {'volume_id': volume.id, 'error': e})
            LOG.error(msg)
            raise exceptions.BackendException(msg)

        try:
            connection_info = volume.initialize_connection(volume, properties)
            conn = connector.InitiatorConnector.factory(
                connection_info['driver_volume_type'], root_helper,
                conn=connection_info)
            device = conn.connect_volume(connection_info['data'])
            volume.attach(None, 'glance_store', attach_mode, host_name=host)
            volume = self._wait_volume_status(volume, 'attaching', 'in-use')
            if (connection_info['driver_volume_type'] == 'rbd' and
               not conn.do_local_attach):
                yield device['path']
            else:
                with temporary_chown(device['path'],
                                     backend=self.backend_group), \
                        open(device['path'], mode) as f:
                    yield f
        except Exception:
            LOG.exception(_LE('Exception while accessing to cinder volume '
                              '%(volume_id)s.'), {'volume_id': volume.id})
            raise
        finally:
            if volume.status == 'in-use':
                volume.begin_detaching(volume)
            elif volume.status == 'attaching':
                volume.unreserve(volume)

            if device:
                try:
                    conn.disconnect_volume(connection_info['data'], device)
                except Exception:
                    LOG.exception(_LE('Failed to disconnect volume '
                                      '%(volume_id)s.'),
                                  {'volume_id': volume.id})

            try:
                volume.terminate_connection(volume, properties)
            except Exception:
                LOG.exception(_LE('Failed to terminate connection of volume '
                                  '%(volume_id)s.'), {'volume_id': volume.id})

            try:
                client.volumes.detach(volume)
            except Exception:
                LOG.exception(_LE('Failed to detach volume %(volume_id)s.'),
                              {'volume_id': volume.id})
import nova.netconf
import nova.virt.libvirt.driver
from nova import objects

# import shlex
# from oslo_privsep import priv_context
# priv_context.init(root_helper=shlex.split(utils.get_root_helper()))



CONF = nova.conf.CONF
argv = []
default_config_files = ['/etc/nova/nova.conf']
config.parse_args(argv, default_config_files=default_config_files)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
logging.setup(CONF, 'nova')
LOG = logging.getLogger('nova.compute')
utils.monkey_patch()
objects.register_all()

root_helper = utils.get_root_helper()
root_helper = 'sudo privsep-helper --config-file /etc/nova/nova.conf --config-dir /etc/nova'
connector.get_connector_properties(root_helper, CONF.my_block_storage_ip,CONF.libvirt.iscsi_use_multipath,enforce_multipath=True,host=CONF.host)


connector = importutils.import_class('os_brick.initiator.connector.FibreChannelConnector')
connector.get_connector_properties(root_helper,host='i620d7-app',multipath=False,enforce_multipath=True,execute=None)

fc = linuxfc.LinuxFibreChannel(root_helper,execute=None)

    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        vol_mount = DEFAULT_MOUNT_VOLUME
        if ('Opts' in contents and contents['Opts'] and
                'mount-volume' in contents['Opts']):
            vol_mount = str(contents['Opts']['mount-volume'])

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'

        connector_info = connector.get_connector_properties(
            root_helper, self._my_ip, multipath=self.use_multipath,
            enforce_multipath=self.enforce_multipath)

        # Determine if we need to unmount a previously mounted volume
        if vol_mount is DEFAULT_MOUNT_VOLUME:
            # unmount directory
            fileutil.umount_dir(mount_dir)
            # remove directory
            fileutil.remove_dir(mount_dir)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(_LI('connection_info: %(connection_info)s, '
                         'was successfully terminated'),
                     {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            # Not much we can do here, so just continue on with unmount
            # We need to ensure we update etcd path_info so the stale
            # path does not stay around
            # raise exception.HPEPluginUMountException(reason=msg)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(_LI('path for volume: %(name)s, was successfully removed: '
                     '%(path_name)s'), {'name': volname,
                                        'path_name': path_name})

        response = json.dumps({u"Err": ''})
        return response
    def volumedriver_mount(self, name):
        """
        Mount the volume
        Mount the volume

        NOTE: If for any reason the mount request fails, Docker
        will automatically call uMount. So, just make sure uMount
        can handle partially completed Mount requests.

        :param unicode name: The name of the volume.

        :return: Result that includes the mountpoint.
        """
        LOG.debug('In volumedriver_mount')

        # TODO: use persistent storage to lookup volume for deletion
        contents = {}
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume mount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'
        use_multipath = False

        connector_info = connector.get_connector_properties(
            root_helper, self._my_ip, use_multipath, enforce_multipath=False)

        try:
            # Call driver to initialize the connection
            self.hpeplugin_driver.create_export(vol, connector_info)
            connection_info = \
                self.hpeplugin_driver.initialize_connection(
                    vol, connector_info)
            LOG.debug('connection_info: %(connection_info)s, '
                      'was successfully retrieved',
                      {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_('connection info retrieval failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        # Call OS Brick to connect volume
        try:
            device_info = self.connector.\
                connect_volume(connection_info['data'])
        except Exception as ex:
            msg = (_('OS Brick connect volume failed, error is: %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        # Make sure the path exists
        path = FilePath(device_info['path']).realpath()
        if path.exists is False:
            msg = (_('path: %s,  does not exist'), path)
            LOG.error(msg)
            raise exception.HPEPluginMountException(reason=msg)

        LOG.debug('path for volume: %(name)s, was successfully created: '
                  '%(device)s realpath is: %(realpath)s',
                  {'name': volname, 'device': device_info['path'],
                   'realpath': path.path})

        # mkdir for mounting the filesystem
        mount_dir = fileutil.mkdir_for_mounting(device_info['path'])
        LOG.debug('Directory: %(mount_dir)s, successfully created to mount: '
                  '%(mount)s',
                  {'mount_dir': mount_dir, 'mount': device_info['path']})

        # Create filesystem on the new device
        if fileutil.has_filesystem(path.path) is False:
            fileutil.create_filesystem(path.path)
            LOG.debug('filesystem successfully created on : %(path)s',
                      {'path': path.path})

        # mount the directory
        fileutil.mount_dir(path.path, mount_dir)
        LOG.debug('Device: %(path) successfully mounted on %(mount)s',
                  {'path': path.path, 'mount': mount_dir})

        # TODO: find out how to invoke mkfs so that it creates the filesystem
        # without the lost+found directory
        # KLUDGE!!!!!
        lostfound = mount_dir + '/lost+found'
        lfdir = FilePath(lostfound)
        if lfdir.exists and fileutil.remove_dir(lostfound):
            LOG.debug('Successfully removed : %(lost)s from mount: %(mount)s',
                      {'lost': lostfound, 'mount': mount_dir})

        path_info = {}
        path_info['name'] = volname
        path_info['path'] = path.path
        path_info['device_info'] = device_info
        path_info['connection_info'] = connection_info
        path_info['mount_dir'] = mount_dir

        self._etcd.update_vol(volid, 'path_info', json.dumps(path_info))

        response = json.dumps({u"Err": '', u"Mountpoint": mount_dir})
        return response
    def volumedriver_unmount(self, name):
        """
        The Docker container is no longer using the given volume,
        so unmount it.
        NOTE: Since Docker will automatically call Unmount if the Mount
        fails, make sure we properly handle partially completed Mounts.

        :param unicode name: The name of the volume.
        :return: Result indicating success.
        """
        LOG.info(_LI('In volumedriver_unmount'))
        contents = json.loads(name.content.getvalue())
        volname = contents['Name']
        vol = self._etcd.get_vol_byname(volname)
        if vol is not None:
            volid = vol['id']
        else:
            msg = (_LE('Volume unmount name not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        path_info = self._etcd.get_vol_path_info(volname)
        if path_info:
            path_name = path_info['path']
            connection_info = path_info['connection_info']
            mount_dir = path_info['mount_dir']
        else:
            msg = (_LE('Volume unmount path info not found %s'), volname)
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # Get connector info from OS Brick
        # TODO: retrieve use_multipath and enforce_multipath from config file
        root_helper = 'sudo'
        use_multipath = False

        connector_info = connector.get_connector_properties(
            root_helper, self._my_ip, use_multipath, enforce_multipath=False)
        # unmount directory
        fileutil.umount_dir(mount_dir)
        # remove directory
        fileutil.remove_dir(mount_dir)

        try:
            # Call driver to terminate the connection
            self.hpeplugin_driver.terminate_connection(vol, connector_info)
            LOG.info(_LI('connection_info: %(connection_info)s, '
                         'was successfully terminated'),
                     {'connection_info': json.dumps(connection_info)})
        except Exception as ex:
            msg = (_LE('connection info termination failed %s'),
                   six.text_type(ex))
            LOG.error(msg)
            raise exception.HPEPluginUMountException(reason=msg)

        # We're deferring the execution of the disconnect_volume as it can take
        # substantial
        # time (over 2 minutes) to cleanup the iscsi files
        if connection_info:
            LOG.info(_LI('call os brick to disconnect volume'))
            d = threads.deferToThread(self.connector.disconnect_volume,
                                      connection_info['data'], None)
            d.addCallbacks(self.disconnect_volume_callback,
                           self.disconnect_volume_error_callback)

        # TODO(leeantho) Without this sleep the volume is sometimes not
        # removed after the unmount. There must be a different way to fix
        # the issue?
        time.sleep(1)

        # TODO: Create path_info list as we can mount the volume to multiple
        # hosts at the same time.
        self._etcd.update_vol(volid, 'path_info', None)

        LOG.info(_LI('path for volume: %(name)s, was successfully removed: '
                     '%(path_name)s'), {'name': volname,
                                        'path_name': path_name})

        response = json.dumps({u"Err": ''})
        return response