def _remove_vpool(self):
     """
     Clean up
     This is not actually a test of "Remove Vpool from OVS",
     so any failure here will be reported as a tearDown error and no cleanup will occur
     """
     self._debug('Removing vpool')
     vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
     if vpool is None:
         self._debug('already removed')
         return
     for storagedriver_guid in vpool.storagedrivers_guids:
         self._debug(
             'removing storagedriver {0}'.format(storagedriver_guid))
         StorageRouterController.remove_storagedriver(storagedriver_guid)
     attempt = 0
     while attempt < 10:
         vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
         if vpool is None:
             self._debug('vpool {0} deleted'.format(
                 OVSPluginTestCase.VPOOL_NAME))
             return
         attempt += 1
         time.sleep(2)
     raise RuntimeError('Vpool {0} was not removed correctly.'.format(
         OVSPluginTestCase.VPOOL_NAME))
Exemple #2
0
def _getVPoolByUrl(url, vpoolname=None):
    if vpoolname is None and url.port:
        vpool = getVPoolByIPandPort(url.hostname, url.port)
    elif vpoolname is None:
        vpool = VPoolList.get_vpool_by_name(VPOOLNAME)
    else:
        vpool = VPoolList.get_vpool_by_name(vpoolname)
    return vpool
Exemple #3
0
    def get_vdisk_by_name(vdisk_name, vpool_name):
        """
        Fetch disk partitions by disk guid

        :param vdisk_name: location of a vdisk on a vpool
                           (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw )
        :type vdisk_name: str
        :param vpool_name: name of a existing vpool
        :type vpool_name: str
        :return: a vdisk object
        :rtype: ovs.dal.hybrids.vdisk
        """

        vpool = VPoolList.get_vpool_by_name(vpool_name)
        if vpool:
            if not vdisk_name.startswith("/"):
                vdisk_name = "/{0}".format(vdisk_name)
            if not vdisk_name.endswith('.raw'):
                vdisk_name = '{0}.raw'.format(vdisk_name)
            vdisk = VDiskList.get_by_devicename_and_vpool(vdisk_name, vpool)
            if vdisk:
                return vdisk
            else:
                raise VDiskNotFoundError(
                    "VDisk with name `{0}` not found on vPool `{1}`!".format(
                        vdisk_name, vpool_name))
        else:
            raise VPoolNotFoundError(
                "vPool with name `{0}` cannot be found!".format(vpool_name))
Exemple #4
0
    def update_vdisk_name(volume_id, old_name, new_name):
        """
        Update a vDisk name using Management Center: set new name
        """
        vdisk = None
        for mgmt_center in MgmtCenterList.get_mgmtcenters():
            mgmt = Factory.get_mgmtcenter(mgmt_center=mgmt_center)
            try:
                disk_info = mgmt.get_vdisk_device_info(volume_id)
                device_path = disk_info['device_path']
                vpool_name = disk_info['vpool_name']
                vp = VPoolList.get_vpool_by_name(vpool_name)
                file_name = os.path.basename(device_path)
                vdisk = VDiskList.get_by_devicename_and_vpool(file_name, vp)
                if vdisk:
                    break
            except Exception as ex:
                logger.info(
                    'Trying to get mgmt center failed for disk {0} with volume_id {1}. {2}'
                    .format(old_name, volume_id, ex))
        if not vdisk:
            logger.error('No vdisk found for name {0}'.format(old_name))
            return

        vpool = vdisk.vpool
        mutex = VolatileMutex('{}_{}'.format(
            old_name, vpool.guid if vpool is not None else 'none'))
        try:
            mutex.acquire(wait=5)
            vdisk.name = new_name
            vdisk.save()
        finally:
            mutex.release()
 def get_vpool_by_name(vpool_name):
     """
     Retrieve the vPool object by its name
     :param vpool_name: Name of the vPool
     :return: vPool DAL object
     """
     return VPoolList.get_vpool_by_name(vpool_name)
Exemple #6
0
    def update_vdisk_name(volume_id, old_name, new_name):
        """
        Update a vDisk name using Management Center: set new name
        """
        vdisk = None
        for mgmt_center in MgmtCenterList.get_mgmtcenters():
            mgmt = Factory.get_mgmtcenter(mgmt_center = mgmt_center)
            try:
                disk_info = mgmt.get_vdisk_device_info(volume_id)
                device_path = disk_info['device_path']
                vpool_name = disk_info['vpool_name']
                vp = VPoolList.get_vpool_by_name(vpool_name)
                file_name = os.path.basename(device_path)
                vdisk = VDiskList.get_by_devicename_and_vpool(file_name, vp)
                if vdisk:
                    break
            except Exception as ex:
                logger.info('Trying to get mgmt center failed for disk {0} with volume_id {1}. {2}'.format(old_name, volume_id, ex))
        if not vdisk:
            logger.error('No vdisk found for name {0}'.format(old_name))
            return

        vpool = vdisk.vpool
        mutex = VolatileMutex('{}_{}'.format(old_name, vpool.guid if vpool is not None else 'none'))
        try:
            mutex.acquire(wait=5)
            vdisk.name = new_name
            vdisk.save()
        finally:
            mutex.release()
Exemple #7
0
    def __init__(self, name):
        """
        Initialize a StorageDriverInstaller class instance containing information about:
            - vPool information on which a new StorageDriver is going to be deployed, eg: global vPool configurations, vPool name, ...
            - Information about caching behavior
            - Information about which ALBA Backends to use as main Backend, fragment cache Backend, block cache Backend
            - Connection information about how to reach the ALBA Backends via the API
            - StorageDriver configuration settings
            - The storage IP address
        """
        if not re.match(pattern=ExtensionsToolbox.regex_vpool, string=name):
            raise ValueError('Incorrect vPool name provided')

        self.name = name
        self.vpool = VPoolList.get_vpool_by_name(vpool_name=name)
        self.is_new = True if self.vpool is None else False
        self.mds_tlogs = None
        self.mds_safety = None
        self.mds_maxload = None
        self.mds_services = []
        self.sd_installer = None
        self.sr_installer = None
        self.storagedriver_amount = 0 if self.vpool is None else len(
            self.vpool.storagedrivers)
        self.complete_backend_info = {
        }  # Used to store the Backend information retrieved via the API in a dict, because used in several places
    def _create_vpool(self):
        """
        Needed to actually run tests on
        This is not actually a test of "Add Vpool to OVS",
        so any failure here will be reported as a setUp error and no tests will run
        """
        pmachine = System.get_my_storagerouter().pmachine
        mgmt_center = MgmtCenter(
            data={
                'name': 'Openstack',
                'description': 'test',
                'username': OVSPluginTestCase.CINDER_USER,
                'password': OVSPluginTestCase.CINDER_PASS,
                'ip': OVSPluginTestCase.CINDER_CONTROLLER,
                'port': 80,
                'type': 'OPENSTACK',
                'metadata': {
                    'integratemgmt': True
                }
            })
        mgmt_center.save()
        pmachine.mgmtcenter = mgmt_center
        pmachine.save()
        self._debug('Creating vpool')

        parameters = {
            'storagerouter_ip': OVSPluginTestCase.ip,
            'vpool_name': OVSPluginTestCase.VPOOL_NAME,
            'type': 'local',
            'storage_ip': '127.0.0.1',  # KVM
            'vrouter_port': OVSPluginTestCase.VPOOL_PORT,
            'integrate_vpool': True,
            'connection_host': OVSPluginTestCase.ip,
            'connection_port': OVSPluginTestCase.VPOOL_PORT,
            'connection_username': '',
            'connection_password': '',
            'connection_backend': {},
            'readcache_size': 50,
            'writecache_size': 50
        }
        StorageRouterController.add_vpool(parameters)
        attempt = 0
        while attempt < 10:
            vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
            if vpool is not None:
                self._debug('vpool {0} created'.format(
                    OVSPluginTestCase.VPOOL_NAME))
                try:
                    os.listdir(OVSPluginTestCase.VPOOL_MOUNTPOINT)
                    return vpool
                except Exception as ex:
                    # either it doesn't exist, or we don't have permission
                    self._debug('vpool not ready yet {0}'.format(str(ex)))
                    pass
            attempt += 1
            time.sleep(2)
        raise RuntimeError(
            'Vpool {0} was not modeled correctly or did not start.'.format(
                OVSPluginTestCase.VPOOL_NAME))
Exemple #9
0
    def _create_vpool(self):
        """
        Needed to actually run tests on
        This is not actually a test of "Add Vpool to OVS",
        so any failure here will be reported as a setUp error and no tests will run
        """
        pmachine = System.get_my_storagerouter().pmachine
        mgmt_center = MgmtCenter(data={'name':'Openstack',
                                       'description':'test',
                                       'username':CINDER_USER,
                                       'password':CINDER_PASS,
                                       'ip':CINDER_CONTROLLER,
                                       'port':80,
                                       'type':'OPENSTACK',
                                       'metadata':{'integratemgmt':True}})
        mgmt_center.save()
        pmachine.mgmtcenter = mgmt_center
        pmachine.save()
        self._debug('Creating vpool')
        backend_type = 'local'
        fields = ['storage_ip', 'vrouter_port']

        parameters = {'storagerouter_ip': IP,
                      'vpool_name': VPOOL_NAME,
                      'type': 'local',
                      'mountpoint_bfs': VPOOL_BFS,
                      'mountpoint_temp': VPOOL_TEMP,
                      'mountpoint_md': VPOOL_MD,
                      'mountpoint_readcaches': [VPOOL_READCACHE],
                      'mountpoint_writecaches': [VPOOL_WRITECACHE],
                      'mountpoint_foc': VPOOL_FOC,
                      'storage_ip': '127.0.0.1', #KVM
                      'vrouter_port': VPOOL_PORT,
                      'integrate_vpool': True,
                      'connection_host': IP,
                      'connection_port': VPOOL_PORT,
                      'connection_username': '',
                      'connection_password': '',
                      'connection_backend': {},
                      }
        StorageRouterController.add_vpool(parameters)
        attempt = 0
        while attempt < 10:
            vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
            if vpool is not None:
                self._debug('vpool %s created' % VPOOL_NAME)
                try:
                    os.listdir(VPOOL_MOUNTPOINT)
                    return vpool
                except Exception as ex:
                    #either it doesn't exist, or we don't have permission
                    self._debug('vpool not ready yet %s' % (str(ex)))
                    pass
            attempt += 1
            time.sleep(2)
        raise RuntimeError('Vpool %s was not modeled correctly or did not start.' % VPOOL_NAME)
Exemple #10
0
    def _create_vpool(self):
        """
        Needed to actually run tests on
        This is not actually a test of "Add Vpool to OVS",
        so any failure here will be reported as a setUp error and no tests will run
        """
        pmachine = System.get_my_storagerouter().pmachine
        mgmt_center = MgmtCenter(
            data={
                "name": "Openstack",
                "description": "test",
                "username": CINDER_USER,
                "password": CINDER_PASS,
                "ip": CINDER_CONTROLLER,
                "port": 80,
                "type": "OPENSTACK",
                "metadata": {"integratemgmt": True},
            }
        )
        mgmt_center.save()
        pmachine.mgmtcenter = mgmt_center
        pmachine.save()
        self._debug("Creating vpool")

        parameters = {
            "storagerouter_ip": IP,
            "vpool_name": VPOOL_NAME,
            "type": "local",
            "storage_ip": "127.0.0.1",  # KVM
            "vrouter_port": VPOOL_PORT,
            "integrate_vpool": True,
            "connection_host": IP,
            "connection_port": VPOOL_PORT,
            "connection_username": "",
            "connection_password": "",
            "connection_backend": {},
            "readcache_size": 50,
            "writecache_size": 50,
        }
        StorageRouterController.add_vpool(parameters)
        attempt = 0
        while attempt < 10:
            vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
            if vpool is not None:
                self._debug("vpool {0} created".format(VPOOL_NAME))
                try:
                    os.listdir(VPOOL_MOUNTPOINT)
                    return vpool
                except Exception as ex:
                    # either it doesn't exist, or we don't have permission
                    self._debug("vpool not ready yet {0}".format(str(ex)))
                    pass
            attempt += 1
            time.sleep(2)
        raise RuntimeError("Vpool {0} was not modeled correctly or did not start.".format(VPOOL_NAME))
 def __init__(self, *args, **kwargs):
     """Init: args, kwargs pass through;
     Options come from CONF
     """
     super(OVSVolumeDriver, self).__init__(*args, **kwargs)
     LOG.info('INIT %s %s %s ' % (CONF.vpool_name, str(args), str(kwargs)))
     self.configuration.append_config_values(OPTS)
     self._vpool_name = self.configuration.vpool_name
     self._vp = VPoolList.get_vpool_by_name(self._vpool_name)
     self._context = None
     self._db = kwargs.get('db', None)
     self._api = api.API()
 def __init__(self, *args, **kwargs):
     """Init: args, kwargs pass through;
     Options come from CONF
     """
     super(OVSVolumeDriver, self).__init__(*args, **kwargs)
     LOG.info('INIT %s %s %s ' % (CONF.vpool_name, str(args), str(kwargs)))
     self.configuration.append_config_values(OPTS)
     self._vpool_name = self.configuration.vpool_name
     self._vp = VPoolList.get_vpool_by_name(self._vpool_name)
     self._context = None
     self._db = kwargs.get('db', None)
     self._api = api.API()
Exemple #13
0
    def _create_vpool(self):
        """
        Needed to actually run tests on
        This is not actually a test of "Add Vpool to OVS",
        so any failure here will be reported as a setUp error and no tests will run
        """
        pmachine = System.get_my_storagerouter().pmachine
        mgmt_center = MgmtCenter(data={'name': 'Openstack',
                                       'description': 'test',
                                       'username': OVSPluginTestCase.CINDER_USER,
                                       'password': OVSPluginTestCase.CINDER_PASS,
                                       'ip': OVSPluginTestCase.CINDER_CONTROLLER,
                                       'port': 80,
                                       'type': 'OPENSTACK',
                                       'metadata': {'integratemgmt': True}})
        mgmt_center.save()
        pmachine.mgmtcenter = mgmt_center
        pmachine.save()
        self._debug('Creating vpool')

        parameters = {'storagerouter_ip': OVSPluginTestCase.ip,
                      'vpool_name': OVSPluginTestCase.VPOOL_NAME,
                      'type': 'local',
                      'storage_ip': '127.0.0.1',  # KVM
                      'vrouter_port': OVSPluginTestCase.VPOOL_PORT,
                      'integrate_vpool': True,
                      'connection_host': OVSPluginTestCase.ip,
                      'connection_port': OVSPluginTestCase.VPOOL_PORT,
                      'connection_username': '',
                      'connection_password': '',
                      'connection_backend': {},
                      'readcache_size': 50,
                      'writecache_size': 50
                      }
        StorageRouterController.add_vpool(parameters)
        attempt = 0
        while attempt < 10:
            vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
            if vpool is not None:
                self._debug('vpool {0} created'.format(OVSPluginTestCase.VPOOL_NAME))
                try:
                    os.listdir(OVSPluginTestCase.VPOOL_MOUNTPOINT)
                    return vpool
                except Exception as ex:
                    # either it doesn't exist, or we don't have permission
                    self._debug('vpool not ready yet {0}'.format(str(ex)))
                    pass
            attempt += 1
            time.sleep(2)
        raise RuntimeError('Vpool {0} was not modeled correctly or did not start.'.format(OVSPluginTestCase.VPOOL_NAME))
Exemple #14
0
 def _remove_vpool(self):
     """
     Clean up
     This is not actually a test of "Remove Vpool from OVS",
     so any failure here will be reported as a tearDown error and no cleanup will occur
     """
     self._debug('Removing vpool')
     vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
     if vpool is None:
         self._debug('already removed')
         return
     for storagedriver_guid in vpool.storagedrivers_guids:
         self._debug('removing storagedriver {0}'.format(storagedriver_guid))
         StorageRouterController.remove_storagedriver(storagedriver_guid)
     attempt = 0
     while attempt < 10:
         vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
         if vpool is None:
             self._debug('vpool {0} deleted'.format(VPOOL_NAME))
             return
         attempt += 1
         time.sleep(2)
     raise RuntimeError('Vpool {0} was not removed correctly.'.format(VPOOL_NAME))
 def _create_vpool(self):
     """
     Needed to actually run tests on
     This is not actually a test of "Add Vpool to OVS",
     so any failure here will be reported as a setUp error and no tests will run
     """
     self._debug('Creating vpool')
     backend_type = 'local'
     fields = ['storage_ip', 'vrouter_port']
     parameters = {
         'storagerouter_ip': IP,
         'vpool_name': VPOOL_NAME,
         'type': 'LOCAL',
         'mountpoint_bfs': VPOOL_BFS,
         'mountpoint_temp': VPOOL_TEMP,
         'mountpoint_md': VPOOL_MD,
         'mountpoint_readcache1': VPOOL_READCACHE1,
         'mountpoint_readcache2': VPOOL_READCACHE2,
         'mountpoint_writecache': VPOOL_WRITECACHE,
         'mountpoint_foc': VPOOL_FOC,
         'storage_ip': '127.0.0.1',  #KVM
         'vrouter_port': VPOOL_PORT
     }
     StorageRouterController.add_vpool(parameters)
     attempt = 0
     while attempt < 10:
         vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
         if vpool is not None:
             self._debug('vpool %s created' % VPOOL_NAME)
             try:
                 self._get_shell_client()
                 self.shell_client('chown %s %s' %
                                   (self.current_user_id, VPOOL_MOUNTPOINT))
                 os.listdir(VPOOL_MOUNTPOINT)
                 return vpool
             except Exception as ex:
                 #either it doesn't exist, or we don't have permission
                 self._debug('vpool not ready yet %s' % (str(ex)))
                 pass
         attempt += 1
         time.sleep(1)
     raise RuntimeError(
         'Vpool %s was not modeled correctly or did not start.' %
         VPOOL_NAME)
Exemple #16
0
    def update_vmachine_name(instance_id, old_name, new_name):
        """
        Update a vMachine name: find vmachine by management center instance id, set new name
        :param instance_id: ID for the virtual machine known by management center
        :param old_name: Old name of the virtual machine
        :param new_name: New name for the virtual machine
        """
        vmachine = None
        for mgmt_center in MgmtCenterList.get_mgmtcenters():
            mgmt = Factory.get_mgmtcenter(mgmt_center=mgmt_center)
            try:
                machine_info = mgmt.get_vmachine_device_info(instance_id)
                file_name = machine_info['file_name']
                host_name = machine_info['host_name']
                vpool_name = machine_info['vpool_name']
                storage_router = StorageRouterList.get_by_name(host_name)
                machine_id = storage_router.machine_id
                device_name = '{0}/{1}'.format(machine_id, file_name)
                vp = VPoolList.get_vpool_by_name(vpool_name)
                vmachine = VMachineList.get_by_devicename_and_vpool(
                    device_name, vp)
                if vmachine:
                    break
                vmachine = VMachineList.get_by_devicename_and_vpool(
                    device_name, None)
                if vmachine:
                    break
            except Exception as ex:
                logger.info(
                    'Trying to get mgmt center failed for vmachine {0}. {1}'.
                    format(old_name, ex))
        if not vmachine:
            logger.error('No vmachine found for name {0}'.format(old_name))
            return

        vpool = vmachine.vpool
        mutex = VolatileMutex('{0}_{1}'.format(
            old_name, vpool.guid if vpool is not None else 'none'))
        try:
            mutex.acquire(wait=5)
            vmachine.name = new_name
            vmachine.save()
        finally:
            mutex.release()
 def get_vdisk_by_name(vdisk_name, vpool_name):
     """
     Fetch disk partitions by disk guid
     :param vdisk_name: location of a vdisk on a vpool
                        (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw )
     :type vdisk_name: str
     :param vpool_name: name of a existing vpool
     :type vpool_name: str
     :return: a vdisk object
     :rtype: ovs.dal.hybrids.vdisk.VDisk
     """
     vpool = VPoolList.get_vpool_by_name(vpool_name)
     if vpool:
         vdisk = VDiskList.get_by_devicename_and_vpool('/{0}'.format(vdisk_name), vpool)
         if vdisk:
             return vdisk
         else:
             raise VDiskNotFoundError("VDisk with name `{0}` not found!".format(vdisk_name))
     else:
         raise VPoolNotFoundError("vPool with name `{0}` cannot be found!".format(vpool_name))
 def _create_vpool(self):
     """
     Needed to actually run tests on
     This is not actually a test of "Add Vpool to OVS",
     so any failure here will be reported as a setUp error and no tests will run
     """
     self._debug('Creating vpool')
     backend_type = 'local'
     fields = ['storage_ip', 'vrouter_port']
     parameters = {'storagerouter_ip': IP,
                   'vpool_name': VPOOL_NAME,
                   'type': 'local',
                   'mountpoint_bfs': VPOOL_BFS,
                   'mountpoint_temp': VPOOL_TEMP,
                   'mountpoint_md': VPOOL_MD,
                   'mountpoint_readcache1': VPOOL_READCACHE1,
                   'mountpoint_readcache2': VPOOL_READCACHE2,
                   'mountpoint_writecache': VPOOL_WRITECACHE,
                   'mountpoint_foc': VPOOL_FOC,
                   'storage_ip': '127.0.0.1', #KVM
                   'vrouter_port': VPOOL_PORT
                   }
     StorageRouterController.add_vpool(parameters)
     attempt = 0
     while attempt < 10:
         vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
         if vpool is not None:
             self._debug('vpool %s created' % VPOOL_NAME)
             try:
                 self._get_shell_client()
                 self.shell_client('sudo chown %s %s' % (self.current_user_id, VPOOL_MOUNTPOINT))
                 self.shell_client('sudo chmod 775 %s' % (VPOOL_MOUNTPOINT))
                 os.listdir(VPOOL_MOUNTPOINT)
                 return vpool
             except Exception as ex:
                 #either it doesn't exist, or we don't have permission
                 self._debug('vpool not ready yet %s' % (str(ex)))
                 pass
         attempt += 1
         time.sleep(2)
     raise RuntimeError('Vpool %s was not modeled correctly or did not start.' % VPOOL_NAME)
Exemple #19
0
    def update_vmachine_name(instance_id, old_name, new_name):
        """
        Update a vMachine name: find vmachine by management center instance id, set new name
        :param instance_id: ID for the virtual machine known by management center
        :param old_name: Old name of the virtual machine
        :param new_name: New name for the virtual machine
        """
        vmachine = None
        for mgmt_center in MgmtCenterList.get_mgmtcenters():
            mgmt = Factory.get_mgmtcenter(mgmt_center = mgmt_center)
            try:
                machine_info = mgmt.get_vmachine_device_info(instance_id)
                file_name = machine_info['file_name']
                host_name = machine_info['host_name']
                vpool_name = machine_info['vpool_name']
                storage_router = StorageRouterList.get_by_name(host_name)
                machine_id = storage_router.machine_id
                device_name = '{0}/{1}'.format(machine_id, file_name)
                vp = VPoolList.get_vpool_by_name(vpool_name)
                vmachine = VMachineList.get_by_devicename_and_vpool(device_name, vp)
                if vmachine:
                    break
                vmachine = VMachineList.get_by_devicename_and_vpool(device_name, None)
                if vmachine:
                    break
            except Exception as ex:
                VMachineController._logger.info('Trying to get mgmt center failed for vmachine {0}. {1}'.format(old_name, ex))
        if not vmachine:
            VMachineController._logger.error('No vmachine found for name {0}'.format(old_name))
            return

        vpool = vmachine.vpool
        mutex = volatile_mutex('{0}_{1}'.format(old_name, vpool.guid if vpool is not None else 'none'))
        try:
            mutex.acquire(wait=5)
            vmachine.name = new_name
            vmachine.save()
        finally:
            mutex.release()
Exemple #20
0
    def _create_vpool(self):
        """
        Needed to actually run tests on
        This is not actually a test of "Add Vpool to OVS",
        so any failure here will be reported as a setUp error and no tests will run
        """
        pmachine = System.get_my_storagerouter().pmachine
        mgmt_center = MgmtCenter(
            data={
                'name': 'Openstack',
                'description': 'test',
                'username': CINDER_USER,
                'password': CINDER_PASS,
                'ip': CINDER_CONTROLLER,
                'port': 80,
                'type': 'OPENSTACK',
                'metadata': {
                    'integratemgmt': True
                }
            })
        mgmt_center.save()
        pmachine.mgmtcenter = mgmt_center
        pmachine.save()
        self._debug('Creating vpool')
        backend_type = 'local'
        fields = ['storage_ip', 'vrouter_port']

        parameters = {
            'storagerouter_ip': IP,
            'vpool_name': VPOOL_NAME,
            'type': 'local',
            'mountpoint_bfs': VPOOL_BFS,
            'mountpoint_temp': VPOOL_TEMP,
            'mountpoint_md': VPOOL_MD,
            'mountpoint_readcaches': [VPOOL_READCACHE],
            'mountpoint_writecaches': [VPOOL_WRITECACHE],
            'mountpoint_foc': VPOOL_FOC,
            'storage_ip': '127.0.0.1',  #KVM
            'vrouter_port': VPOOL_PORT,
            'integrate_vpool': True,
            'connection_host': IP,
            'connection_port': VPOOL_PORT,
            'connection_username': '',
            'connection_password': '',
            'connection_backend': {},
        }
        StorageRouterController.add_vpool(parameters)
        attempt = 0
        while attempt < 10:
            vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
            if vpool is not None:
                self._debug('vpool %s created' % VPOOL_NAME)
                try:
                    os.listdir(VPOOL_MOUNTPOINT)
                    return vpool
                except Exception as ex:
                    #either it doesn't exist, or we don't have permission
                    self._debug('vpool not ready yet %s' % (str(ex)))
                    pass
            attempt += 1
            time.sleep(2)
        raise RuntimeError(
            'Vpool %s was not modeled correctly or did not start.' %
            VPOOL_NAME)
Exemple #21
0
 def _vpool_exists(self):
     return VPoolList.get_vpool_by_name(VPOOL_NAME) is not None
Exemple #22
0
 def _get_vpool(self):
     self.vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
 def _get_vpool(self):
     self.vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
 def _vpool_exists(self):
     _ = self
     return VPoolList.get_vpool_by_name(
         OVSPluginTestCase.VPOOL_NAME) is not None
Exemple #25
0
 def _get_vpool(self):
     self.vpool = VPoolList.get_vpool_by_name(VPOOL_NAME)
Exemple #26
0
 def _vpool_exists(self):
     _ = self
     return VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME) is not None
Exemple #27
0
 def _get_vpool(self):
     self.vpool = VPoolList.get_vpool_by_name(OVSPluginTestCase.VPOOL_NAME)
Exemple #28
0
 def _vpool_exists(self):
     return VPoolList.get_vpool_by_name(VPOOL_NAME) is not None
    def add_vpool(parameters):
        """
        Add a vPool to the machine this task is running on
        """

        parameters = {} if parameters is None else parameters
        ip = parameters['storagerouter_ip']
        vpool_name = parameters['vpool_name']

        if StorageRouterController._validate_ip(ip) is False:
            raise ValueError('The entered ip address is invalid')

        if not re.match('^[0-9a-z]+(\-+[0-9a-z]+)*$', vpool_name):
            raise ValueError('Invalid vpool_name given')

        client = SSHClient.load(ip)  # Make sure to ALWAYS reload the client, as Fabric seems to be singleton-ish
        unique_id = System.get_my_machine_id(client)

        storagerouter = None
        for current_storagerouter in StorageRouterList.get_storagerouters():
            if current_storagerouter.ip == ip and current_storagerouter.machine_id == unique_id:
                storagerouter = current_storagerouter
                break
        if storagerouter is None:
            raise RuntimeError('Could not find Storage Router with given ip address')

        vpool = VPoolList.get_vpool_by_name(vpool_name)
        storagedriver = None
        if vpool is not None:
            if vpool.backend_type.code == 'local':
                # Might be an issue, investigating whether it's on the same not or not
                if len(vpool.storagedrivers) == 1 and vpool.storagedrivers[0].storagerouter.machine_id != unique_id:
                    raise RuntimeError('A local vPool with name {0} already exists'.format(vpool_name))
            for vpool_storagedriver in vpool.storagedrivers:
                if vpool_storagedriver.storagerouter_guid == storagerouter.guid:
                    storagedriver = vpool_storagedriver  # The vPool is already added to this Storage Router and this might be a cleanup/recovery

            # Check whether there are running machines on this vPool
            machine_guids = []
            for vdisk in vpool.vdisks:
                if vdisk.vmachine_guid not in machine_guids:
                    machine_guids.append(vdisk.vmachine_guid)
                    if vdisk.vmachine.hypervisor_status in ['RUNNING', 'PAUSED']:
                        raise RuntimeError(
                            'At least one vMachine using this vPool is still running or paused. Make sure there are no active vMachines'
                        )

        nodes = {ip}  # Set comprehension
        if vpool is not None:
            for vpool_storagedriver in vpool.storagedrivers:
                nodes.add(vpool_storagedriver.storagerouter.ip)
        nodes = list(nodes)

        services = ['volumedriver_{0}'.format(vpool_name),
                    'failovercache_{0}'.format(vpool_name)]

        # Stop services
        for node in nodes:
            node_client = SSHClient.load(node)
            for service in services:
                System.exec_remote_python(node_client, """
from ovs.plugin.provider.service import Service
if Service.has_service('{0}'):
    Service.disable_service('{0}')
""".format(service))
                System.exec_remote_python(node_client, """
from ovs.plugin.provider.service import Service
if Service.has_service('{0}'):
    Service.stop_service('{0}')
""".format(service))

        # Keep in mind that if the Storage Driver exists, the vPool does as well
        client = SSHClient.load(ip)
        mountpoint_bfs = ''
        directories_to_create = []

        if vpool is None:
            vpool = VPool()
            supported_backends = System.read_remote_config(client, 'volumedriver.supported.backends').split(',')
            if 'rest' in supported_backends:
                supported_backends.remove('rest')  # REST is not supported for now
            backend_type = BackendTypeList.get_backend_type_by_code(parameters['type'])
            vpool.backend_type = backend_type
            connection_host = connection_port = connection_username = connection_password = None
            if vpool.backend_type.code in ['local', 'distributed']:
                vpool.metadata = {'backend_type': 'LOCAL'}
                mountpoint_bfs = parameters['mountpoint_bfs']
                directories_to_create.append(mountpoint_bfs)
                vpool.metadata['local_connection_path'] = mountpoint_bfs
            if vpool.backend_type.code == 'rest':
                connection_host = parameters['connection_host']
                connection_port = parameters['connection_port']
                rest_connection_timeout_secs = parameters['connection_timeout']
                vpool.metadata = {'rest_connection_host': connection_host,
                                  'rest_connection_port': connection_port,
                                  'buchla_connection_log_level': "0",
                                  'rest_connection_verbose_logging': rest_connection_timeout_secs,
                                  'rest_connection_metadata_format': "JSON",
                                  'backend_type': 'REST'}
            elif vpool.backend_type.code in ('ceph_s3', 'amazon_s3', 'swift_s3'):
                connection_host = parameters['connection_host']
                connection_port = parameters['connection_port']
                connection_username = parameters['connection_username']
                connection_password = parameters['connection_password']
                if vpool.backend_type.code in ['swift_s3']:
                    strict_consistency = 'false'
                    s3_connection_flavour = 'SWIFT'
                else:
                    strict_consistency = 'true'
                    s3_connection_flavour = 'S3'

                vpool.metadata = {'s3_connection_host': connection_host,
                                  's3_connection_port': connection_port,
                                  's3_connection_username': connection_username,
                                  's3_connection_password': connection_password,
                                  's3_connection_flavour': s3_connection_flavour,
                                  's3_connection_strict_consistency': strict_consistency,
                                  's3_connection_verbose_logging': 1,
                                  'backend_type': 'S3'}

            vpool.name = vpool_name
            vpool.description = "{} {}".format(vpool.backend_type.code, vpool_name)
            vpool.login = connection_username
            vpool.password = connection_password
            if not connection_host:
                vpool.connection = None
            else:
                vpool.connection = '{}:{}'.format(connection_host, connection_port)
            vpool.save()

        # Connection information is Storage Driver related information
        new_storagedriver = False
        if storagedriver is None:
            storagedriver = StorageDriver()
            new_storagedriver = True

        mountpoint_temp = parameters['mountpoint_temp']
        mountpoint_md = parameters['mountpoint_md']
        mountpoint_readcache1 = parameters['mountpoint_readcache1']
        mountpoint_readcache2 = parameters.get('mountpoint_readcache2', '')
        mountpoint_writecache = parameters['mountpoint_writecache']
        mountpoint_foc = parameters['mountpoint_foc']

        directories_to_create.append(mountpoint_temp)
        directories_to_create.append(mountpoint_md)
        directories_to_create.append(mountpoint_readcache1)
        if mountpoint_readcache2:
            directories_to_create.append(mountpoint_readcache2)
        directories_to_create.append(mountpoint_writecache)
        directories_to_create.append(mountpoint_foc)

        client = SSHClient.load(ip)
        dir_create_script = """
import os
for directory in {0}:
    if not os.path.exists(directory):
        os.makedirs(directory)
""".format(directories_to_create)
        System.exec_remote_python(client, dir_create_script)

        read_cache1_fs = os.statvfs(mountpoint_readcache1)
        read_cache2_fs = None
        if mountpoint_readcache2:
            read_cache2_fs = os.statvfs(mountpoint_readcache2)
        write_cache_fs = os.statvfs(mountpoint_writecache)
        fdcache = '{}/fd_{}'.format(mountpoint_writecache, vpool_name)
        scocache = '{}/sco_{}'.format(mountpoint_writecache, vpool_name)
        readcache1 = '{}/read1_{}'.format(mountpoint_readcache1, vpool_name)
        files2create = [readcache1]
        if mountpoint_readcache2 and mountpoint_readcache1 != mountpoint_readcache2:
            readcache2 = '{}/read2_{}'.format(mountpoint_readcache2, vpool_name)
            files2create.append(readcache2)
        else:
            readcache2 = ''
        failovercache = '{}/foc_{}'.format(mountpoint_foc, vpool_name)
        metadatapath = '{}/metadata_{}'.format(mountpoint_md, vpool_name)
        tlogpath = '{}/tlogs_{}'.format(mountpoint_md, vpool_name)
        rsppath = '/var/rsp/{}'.format(vpool_name)
        dirs2create = [scocache, failovercache, metadatapath, tlogpath, rsppath,
                       System.read_remote_config(client, 'volumedriver.readcache.serialization.path')]

        cmd = "cat /etc/mtab | grep ^/dev/ | cut -d ' ' -f 2"
        mountpoints = [device.strip() for device in client.run(cmd).strip().split('\n')]
        mountpoints.remove('/')

        def is_partition(directory):
            for mountpoint in mountpoints:
                if directory == mountpoint:
                    return True
            return False
        # Cache sizes
        # 20% = scocache
        # 20% = failovercache (@TODO: check if this can possibly consume more than 20%)
        # 60% = readcache

        # safety values:
        readcache1_factor = 0.2
        readcache2_factor = 0.2
        writecache_factor = 0.1

        if (mountpoint_readcache1 == mountpoint_readcache2) or not mountpoint_readcache2:
            delta = set()
            delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy')
            delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy')
            delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy')
            if len(delta) == 1:
                readcache1_factor = 0.49
                writecache_factor = 0.2
            elif len(delta) == 2:
                if mountpoint_writecache == mountpoint_foc:
                    readcache1_factor = 0.98
                    writecache_factor = 0.49
                else:
                    readcache1_factor = 0.49
                    if mountpoint_readcache1 == mountpoint_writecache:
                        writecache_factor = 0.49
                    else:
                        writecache_factor = 0.98
            elif len(delta) == 3:
                readcache1_factor = 0.98
                writecache_factor = 0.98
        else:
            delta = set()
            delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy')
            delta.add(mountpoint_readcache2 if is_partition(mountpoint_readcache2) else '/dummy')
            delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy')
            delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy')
            if len(delta) == 1:
                # consider them all to be directories
                readcache1_factor = 0.24
                readcache2_factor = 0.24
                writecache_factor = 0.24
            elif len(delta) == 2:
                if mountpoint_writecache == mountpoint_foc:
                    writecache_factor = 0.24
                    if mountpoint_readcache1 == mountpoint_writecache:
                        readcache1_factor = 0.49
                        readcache2_factor = 0.98
                    else:
                        readcache1_factor = 0.98
                        readcache2_factor = 0.49
                else:
                    readcache1_factor = readcache2_factor = 0.49
                    writecache_factor = 0.49
            elif len(delta) == 3:
                if mountpoint_writecache == mountpoint_foc:
                    readcache1_factor = 0.98
                    readcache2_factor = 0.98
                    writecache_factor = 0.49
                elif mountpoint_readcache1 == mountpoint_writecache:
                    readcache1_factor = 0.49
                    readcache2_factor = 0.98
                    writecache_factor = 0.49
                elif mountpoint_readcache1 == mountpoint_foc:
                    readcache1_factor = 0.49
                    readcache2_factor = 0.98
                    writecache_factor = 0.98
                elif mountpoint_readcache2 == mountpoint_writecache:
                    readcache1_factor = 0.98
                    readcache2_factor = 0.49
                    writecache_factor = 0.49
                elif mountpoint_readcache2 == mountpoint_foc:
                    readcache1_factor = 0.98
                    readcache2_factor = 0.49
                    writecache_factor = 0.98
            elif len(delta) == 4:
                readcache1_factor = 0.98
                readcache2_factor = 0.98
                writecache_factor = 0.98

        # summarize caching on root partition (directory only)
        root_assigned = dict()
        if not is_partition(mountpoint_readcache1):
            root_assigned['readcache1_factor'] = readcache1_factor
        if not is_partition(mountpoint_readcache2):
            root_assigned['readcache2_factor'] = readcache2_factor
        if not is_partition(mountpoint_writecache):
            root_assigned['writecache_factor'] = writecache_factor
        if not is_partition(mountpoint_foc):
            root_assigned['foc_factor'] = min(readcache1_factor, readcache2_factor, writecache_factor)

        # always leave at least 20% of free space
        division_factor = 1.0
        total_size = sum(root_assigned.values()) + .02 * len(root_assigned)
        if 0.8 < total_size < 1.6:
            division_factor = 2.0
        elif 1.6 < total_size < 3.2:
            division_factor = 4.0
        elif total_size >= 3.2:
            division_factor = 8.0

        if 'readcache1_factor' in root_assigned.keys():
            readcache1_factor /= division_factor
        if 'readcache2_factor' in root_assigned.keys():
            readcache2_factor /= division_factor
        if 'writecache_factor' in root_assigned.keys():
            writecache_factor /= division_factor

        scocache_size = '{0}KiB'.format((int(write_cache_fs.f_bavail * writecache_factor / 4096) * 4096) * 4)
        if (mountpoint_readcache1 and not mountpoint_readcache2) or (mountpoint_readcache1 == mountpoint_readcache2):
            mountpoint_readcache2 = ''
            readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4)
            readcache2 = ''
            readcache2_size = '0KiB'
        else:
            readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4)
            readcache2_size = '{0}KiB'.format((int(read_cache2_fs.f_bavail * readcache2_factor / 4096) * 4096) * 4)
        if new_storagedriver:
            ports_in_use = System.ports_in_use(client)
            ports_reserved = []
            ports_in_use_model = {}
            for port_storagedriver in StorageDriverList.get_storagedrivers():
                if port_storagedriver.vpool_guid not in ports_in_use_model:
                    ports_in_use_model[port_storagedriver.vpool_guid] = port_storagedriver.ports
                    ports_reserved += port_storagedriver.ports
            if vpool.guid in ports_in_use_model:  # The vPool is extended to another StorageRouter. We need to use these ports.
                ports = ports_in_use_model[vpool.guid]
                if any(port in ports_in_use for port in ports):
                    raise RuntimeError('The required ports are in use')
            else:  # First StorageDriver for this vPool, so generating new ports
                ports = []
                for port_range in System.read_remote_config(client, 'volumedriver.filesystem.ports').split(','):
                    port_range = port_range.strip()
                    if '-' in port_range:
                        current_range = (int(port_range.split('-')[0]), int(port_range.split('-')[1]))
                    else:
                        current_range = (int(port_range), 65536)
                    current_port = current_range[0]
                    while len(ports) < 3:
                        if current_port not in ports_in_use and current_port not in ports_reserved:
                            ports.append(current_port)
                        current_port += 1
                        if current_port > current_range[1]:
                            break
                if len(ports) != 3:
                    raise RuntimeError('Could not find enough free ports')
        else:
            ports = storagedriver.ports

        ip_path = Configuration.get('ovs.core.ip.path')
        if ip_path is None:
            ip_path = "`which ip`"
        cmd = "{0} a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1".format(ip_path)
        ipaddresses = client.run(cmd).strip().split('\n')
        ipaddresses = [ipaddr.strip() for ipaddr in ipaddresses]
        grid_ip = System.read_remote_config(client, 'ovs.grid.ip')
        if grid_ip in ipaddresses:
            ipaddresses.remove(grid_ip)
        if not ipaddresses:
            raise RuntimeError('No available ip addresses found suitable for Storage Router storage ip')
        if storagerouter.pmachine.hvtype == 'KVM':
            volumedriver_storageip = '127.0.0.1'
        else:
            volumedriver_storageip = parameters['storage_ip']
        vrouter_id = '{0}{1}'.format(vpool_name, unique_id)

        vrouter_config = {'vrouter_id': vrouter_id,
                          'vrouter_redirect_timeout_ms': '5000',
                          'vrouter_routing_retries': 10,
                          'vrouter_volume_read_threshold': 1024,
                          'vrouter_volume_write_threshold': 1024,
                          'vrouter_file_read_threshold': 1024,
                          'vrouter_file_write_threshold': 1024,
                          'vrouter_min_workers': 4,
                          'vrouter_max_workers': 16}
        voldrv_arakoon_cluster_id = str(System.read_remote_config(client, 'volumedriver.arakoon.clusterid'))
        voldrv_arakoon_cluster = ArakoonManagementEx().getCluster(voldrv_arakoon_cluster_id)
        voldrv_arakoon_client_config = voldrv_arakoon_cluster.getClientConfig()
        arakoon_node_configs = []
        for arakoon_node in voldrv_arakoon_client_config.keys():
            arakoon_node_configs.append(ArakoonNodeConfig(arakoon_node,
                                                          voldrv_arakoon_client_config[arakoon_node][0][0],
                                                          voldrv_arakoon_client_config[arakoon_node][1]))
        vrouter_clusterregistry = ClusterRegistry(str(vpool.guid), voldrv_arakoon_cluster_id, arakoon_node_configs)
        node_configs = []
        for existing_storagedriver in StorageDriverList.get_storagedrivers():
            if existing_storagedriver.vpool_guid == vpool.guid:
                node_configs.append(ClusterNodeConfig(str(existing_storagedriver.storagedriver_id),
                                                      str(existing_storagedriver.cluster_ip),
                                                      existing_storagedriver.ports[0],
                                                      existing_storagedriver.ports[1],
                                                      existing_storagedriver.ports[2]))
        if new_storagedriver:
            node_configs.append(ClusterNodeConfig(vrouter_id, grid_ip, ports[0], ports[1], ports[2]))
        vrouter_clusterregistry.set_node_configs(node_configs)
        readcaches = [{'path': readcache1, 'size': readcache1_size}]
        if readcache2:
            readcaches.append({'path': readcache2, 'size': readcache2_size})
        scocaches = [{'path': scocache, 'size': scocache_size}]
        filesystem_config = {'fs_backend_path': mountpoint_bfs}
        volumemanager_config = {'metadata_path': metadatapath, 'tlog_path': tlogpath}
        storagedriver_config_script = """
from ovs.plugin.provider.configuration import Configuration
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration

fd_config = {{'fd_cache_path': '{11}',
              'fd_extent_cache_capacity': '1024',
              'fd_namespace' : 'fd-{0}-{12}'}}
storagedriver_configuration = StorageDriverConfiguration('{0}')
storagedriver_configuration.configure_backend({1})
storagedriver_configuration.configure_readcache({2}, Configuration.get('volumedriver.readcache.serialization.path') + '/{0}')
storagedriver_configuration.configure_scocache({3}, '1GB', '2GB')
storagedriver_configuration.configure_failovercache('{4}')
storagedriver_configuration.configure_filesystem({5})
storagedriver_configuration.configure_volumemanager({6})
storagedriver_configuration.configure_volumerouter('{12}', {7})
storagedriver_configuration.configure_arakoon_cluster('{8}', {9})
storagedriver_configuration.configure_hypervisor('{10}')
storagedriver_configuration.configure_filedriver(fd_config)
""".format(vpool_name, vpool.metadata, readcaches, scocaches, failovercache, filesystem_config,
           volumemanager_config, vrouter_config, voldrv_arakoon_cluster_id, voldrv_arakoon_client_config,
           storagerouter.pmachine.hvtype, fdcache, vpool.guid)
        System.exec_remote_python(client, storagedriver_config_script)
        remote_script = """
import os
from configobj import ConfigObj
from ovs.plugin.provider.configuration import Configuration
protocol = Configuration.get('ovs.core.broker.protocol')
login = Configuration.get('ovs.core.broker.login')
password = Configuration.get('ovs.core.broker.password')
vpool_name = {0}
uris = []
cfg = ConfigObj('/opt/OpenvStorage/config/rabbitmqclient.cfg')
main_section = cfg.get('main')
nodes = main_section['nodes'] if type(main_section['nodes']) == list else [main_section['nodes']]
for node in nodes:
    uris.append({{'amqp_uri': '{{0}}://{{1}}:{{2}}@{{3}}'.format(protocol, login, password, cfg.get(node)['location'])}})
from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
queue_config = {{'events_amqp_routing_key': Configuration.get('ovs.core.broker.volumerouter.queue'),
                 'events_amqp_uris': uris}}
for config_file in os.listdir('/opt/OpenvStorage/config/voldrv_vpools'):
    this_vpool_name = config_file.replace('.json', '')
    if config_file.endswith('.json') and (vpool_name is None or vpool_name == this_vpool_name):
        storagedriver_configuration = StorageDriverConfiguration(this_vpool_name)
        storagedriver_configuration.configure_event_publisher(queue_config)
""".format(vpool_name if vpool_name is None else "'{0}'".format(vpool_name))
        System.exec_remote_python(client, remote_script)

        # Updating the model
        storagedriver.storagedriver_id = vrouter_id
        storagedriver.name = vrouter_id.replace('_', ' ')
        storagedriver.description = storagedriver.name
        storagedriver.storage_ip = volumedriver_storageip
        storagedriver.cluster_ip = grid_ip
        storagedriver.ports = ports
        storagedriver.mountpoint = '/mnt/{0}'.format(vpool_name)
        storagedriver.mountpoint_temp = mountpoint_temp
        storagedriver.mountpoint_readcache1 = mountpoint_readcache1
        storagedriver.mountpoint_readcache2 = mountpoint_readcache2
        storagedriver.mountpoint_writecache = mountpoint_writecache
        storagedriver.mountpoint_foc = mountpoint_foc
        storagedriver.mountpoint_bfs = mountpoint_bfs
        storagedriver.mountpoint_md = mountpoint_md
        storagedriver.storagerouter = storagerouter
        storagedriver.vpool = vpool
        storagedriver.save()

        dirs2create.append(storagedriver.mountpoint)
        dirs2create.append(mountpoint_writecache + '/' + '/fd_' + vpool_name)
        dirs2create.append('{0}/fd_{1}'.format(mountpoint_writecache, vpool_name))

        file_create_script = """
import os
for directory in {0}:
    if not os.path.exists(directory):
        os.makedirs(directory)
for filename in {1}:
    if not os.path.exists(filename):
        open(filename, 'a').close()
""".format(dirs2create, files2create)
        System.exec_remote_python(client, file_create_script)

        voldrv_config_file = '{0}/voldrv_vpools/{1}.json'.format(System.read_remote_config(client, 'ovs.core.cfgdir'),
                                                                 vpool_name)
        log_file = '/var/log/ovs/volumedriver/{0}.log'.format(vpool_name)
        vd_cmd = '/usr/bin/volumedriver_fs -f --config-file={0} --mountpoint {1} --logrotation --logfile {2} -o big_writes -o sync_read -o allow_other'.format(
            voldrv_config_file, storagedriver.mountpoint, log_file)
        if storagerouter.pmachine.hvtype == 'KVM':
            vd_stopcmd = 'umount {0}'.format(storagedriver.mountpoint)
        else:
            vd_stopcmd = 'exportfs -u *:{0}; umount {0}'.format(storagedriver.mountpoint)
        vd_name = 'volumedriver_{}'.format(vpool_name)

        log_file = '/var/log/ovs/volumedriver/foc_{0}.log'.format(vpool_name)
        fc_cmd = '/usr/bin/failovercachehelper --config-file={0} --logfile={1}'.format(voldrv_config_file, log_file)
        fc_name = 'failovercache_{0}'.format(vpool_name)

        params = {'<VPOOL_MOUNTPOINT>': storagedriver.mountpoint,
                  '<HYPERVISOR_TYPE>': storagerouter.pmachine.hvtype,
                  '<VPOOL_NAME>': vpool_name,
                  '<UUID>': str(uuid.uuid4())}
        if Osdist.is_ubuntu(client):
            if client.file_exists('/opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf'):
                client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver_{0}.conf'.format(vpool_name))
                client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-failovercache.conf /opt/OpenvStorage/config/templates/upstart/ovs-failovercache_{0}.conf'.format(vpool_name))
        else:
             if client.file_exists('/opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service'):
                client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver_{0}.service'.format(vpool_name))
                client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-failovercache.service /opt/OpenvStorage/config/templates/systemd/ovs-failovercache_{0}.service'.format(vpool_name))

        service_script = """
from ovs.plugin.provider.service import Service
Service.add_service(package=('openvstorage', 'volumedriver'), name='{0}', command='{1}', stop_command='{2}', params={5})
Service.add_service(package=('openvstorage', 'failovercache'), name='{3}', command='{4}', stop_command=None, params={5})
""".format(
            vd_name, vd_cmd, vd_stopcmd,
            fc_name, fc_cmd, params
        )
        System.exec_remote_python(client, service_script)

        if storagerouter.pmachine.hvtype == 'VMWARE':
            client.run("grep -q '/tmp localhost(ro,no_subtree_check)' /etc/exports || echo '/tmp localhost(ro,no_subtree_check)' >> /etc/exports")
            if Osdist.is_ubuntu(client):
                client.run('service nfs-kernel-server start')
            else:
                client.run('service nfs start')

        if storagerouter.pmachine.hvtype == 'KVM':
            client.run('virsh pool-define-as {0} dir - - - - {1}'.format(vpool_name, storagedriver.mountpoint))
            client.run('virsh pool-build {0}'.format(vpool_name))
            client.run('virsh pool-start {0}'.format(vpool_name))
            client.run('virsh pool-autostart {0}'.format(vpool_name))

        # Start services
        for node in nodes:
            node_client = SSHClient.load(node)
            for service in services:
                System.exec_remote_python(node_client, """
from ovs.plugin.provider.service import Service
Service.enable_service('{0}')
""".format(service))
                System.exec_remote_python(node_client, """
from ovs.plugin.provider.service import Service
Service.start_service('{0}')
""".format(service))

        # Fill vPool size
        vfs_info = os.statvfs('/mnt/{0}'.format(vpool_name))
        vpool.size = vfs_info.f_blocks * vfs_info.f_bsize
        vpool.save()

        # Configure Cinder
        ovsdb = PersistentFactory.get_client()
        vpool_config_key = str('ovs_openstack_cinder_%s' % storagedriver.vpool_guid)
        if ovsdb.exists(vpool_config_key):
            # Second node gets values saved by first node
            cinder_password, cinder_user, tenant_name, controller_ip, config_cinder = ovsdb.get(vpool_config_key)
        else:
            config_cinder = parameters.get('config_cinder', False)
            cinder_password = ''
            cinder_user = ''
            tenant_name = ''
            controller_ip = ''
        if config_cinder:
            cinder_password = parameters.get('cinder_pass', cinder_password)
            cinder_user = parameters.get('cinder_user', cinder_user)
            tenant_name = parameters.get('cinder_tenant', tenant_name)
            controller_ip = parameters.get('cinder_controller', controller_ip) # Keystone host
            if cinder_password:
                osc = OpenStackCinder(cinder_password = cinder_password,
                                      cinder_user = cinder_user,
                                      tenant_name = tenant_name,
                                      controller_ip = controller_ip)

                osc.configure_vpool(vpool_name, storagedriver.mountpoint)
                # Save values for first node to use
                ovsdb.set(vpool_config_key,
                          [cinder_password, cinder_user, tenant_name, controller_ip, config_cinder])