Пример #1
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = LogHandler.get('extensions', name='heartbeat')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(node.guid, current_time).apply_async(routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(node.name.replace(r"'", r"'\''")), shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Пример #2
0
 def set_rdma_capability(storagerouter_guid):
     """
     Check if the StorageRouter has been reconfigured to be able to support RDMA
     :param storagerouter_guid: Guid of the StorageRouter to check and set
     :type storagerouter_guid: str
     :return: None
     :rtype: NoneType
     """
     storagerouter = StorageRouter(storagerouter_guid)
     client = SSHClient(storagerouter, username='******')
     rdma_capable = False
     with remote(client.ip, [os], username='******') as rem:
         for root, dirs, files in rem.os.walk('/sys/class/infiniband'):
             for directory in dirs:
                 ports_dir = '/'.join([root, directory, 'ports'])
                 if not rem.os.path.exists(ports_dir):
                     continue
                 for sub_root, sub_dirs, _ in rem.os.walk(ports_dir):
                     if sub_root != ports_dir:
                         continue
                     for sub_directory in sub_dirs:
                         state_file = '/'.join(
                             [sub_root, sub_directory, 'state'])
                         if rem.os.path.exists(state_file):
                             if 'ACTIVE' in client.run(['cat', state_file]):
                                 rdma_capable = True
     storagerouter.rdma_capable = rdma_capable
     storagerouter.save()
Пример #3
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = Logger('extensions-generic')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(
                        node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(
                    node.guid, current_time).apply_async(
                        routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats[
                                'process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(
                                node.name.replace(r"'", r"'\''")),
                                         shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Пример #4
0
 def list(self, vpoolguid=None, storagerouterguid=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :return: List of vDisks matching the parameters specified
     :rtype: list[ovs.dal.hybrids.vdisk.VDisk]
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     return vdisks
Пример #5
0
 def list(self, vpoolguid=None, storagerouterguid=None, query=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :param query: A query to be executed if required
     :type query: DataQuery
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     if query is not None:
         query_vdisk_guids = DataList(VDisk, query).guids
         vdisks = [
             vdisk for vdisk in vdisks if vdisk.guid in query_vdisk_guids
         ]
     return vdisks
Пример #6
0
    def shrink_vpool(self, vpool, storagerouter_guid):
        """
        Remove the storagedriver linking the specified vPool and storagerouter_guid
        :param vpool: vPool to shrink (or delete if its the last storagerouter linked to it)
        :type vpool: VPool
        :param storagerouter_guid: Guid of the Storage Router
        :type storagerouter_guid: str
        """
        if len(vpool.vdisks) > 0:  # Check to prevent obsolete testing
            backend_info = vpool.metadata['backend']['backend_info']
            preset_name = backend_info['preset']
            # Check if the policy is satisfiable before shrinking - Doing it here so the issue is transparent in the GUI
            alba_backend_guid = backend_info['alba_backend_guid']
            api_url = 'alba/backends/{0}'.format(alba_backend_guid)
            connection_info = backend_info['connection_info']
            ovs_client = OVSClient.get_instance(connection_info=connection_info, cache_store=VolatileFactory.get_client())
            _presets = ovs_client.get(api_url, params={'contents': 'presets'})['presets']
            try:
                _preset = filter(lambda p: p['name'] == preset_name, _presets)[0]
                if _preset['is_available'] is False:
                    raise RuntimeError('Policy is currently not satisfied: cannot shrink vPool {0} according to preset {1}'.format(vpool.name, preset_name))
            except IndexError:
                pass

        # Raise if not satisfied
        sr = StorageRouter(storagerouter_guid)
        intersection = set(vpool.storagedrivers_guids).intersection(set(sr.storagedrivers_guids))
        if not intersection:
            raise HttpNotAcceptableException(error='impossible_request',
                                             error_description='Storage Router {0} is not a member of vPool {1}'.format(sr.name, vpool.name))
        return VPoolController.shrink_vpool.delay(VPoolController, list(intersection)[0])
Пример #7
0
    def get_logfiles(albanode_guid, local_storagerouter_guid):
        """
        Collects logs, moves them to a web-accessible location and returns log tgz's filename
        :param albanode_guid: Alba Node guid to retrieve log files on
        :type albanode_guid: str
        :param local_storagerouter_guid: Guid of the StorageRouter on which the collect logs was initiated, eg: through the GUI
        :type local_storagerouter_guid: str
        :return: Name of tgz containing the logs
        :rtype: str
        """
        web_path = '/opt/OpenvStorage/webapps/frontend/downloads'
        alba_node = AlbaNode(albanode_guid)
        logfile_name = alba_node.client.get_logs()['filename']
        download_url = 'https://{0}:{1}@{2}:{3}/downloads/{4}'.format(
            alba_node.username, alba_node.password, alba_node.ip,
            alba_node.port, logfile_name)

        client = SSHClient(endpoint=StorageRouter(local_storagerouter_guid),
                           username='******')
        client.dir_create(web_path)
        client.run([
            'wget', download_url, '--directory-prefix', web_path,
            '--no-check-certificate'
        ])
        client.run(['chmod', '666', '{0}/{1}'.format(web_path, logfile_name)])
        return logfile_name
Пример #8
0
    def get_physical_metadata(files, storagerouter_guid):
        """
        Gets physical information about the machine this task is running on
        """
        from ovs.lib.vpool import VPoolController

        storagerouter = StorageRouter(storagerouter_guid)
        mountpoints = check_output('mount -v', shell=True).strip().split('\n')
        mountpoints = [p.split(' ')[2] for p in mountpoints if len(p.split(' ')) > 2
                       and not p.split(' ')[2].startswith('/dev') and not p.split(' ')[2].startswith('/proc')
                       and not p.split(' ')[2].startswith('/sys') and not p.split(' ')[2].startswith('/run')
                       and p.split(' ')[2] != '/']
        arakoon_mountpoint = Configuration.get('ovs.core.db.arakoon.location')
        if arakoon_mountpoint in mountpoints:
            mountpoints.remove(arakoon_mountpoint)
        if storagerouter.pmachine.hvtype == 'KVM':
            ipaddresses = ['127.0.0.1']
        else:
            ip_path = Configuration.get('ovs.core.ip.path')
            if ip_path is None:
                ip_path = "`which ip`"
            ipaddresses = check_output("{0} a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1".format(ip_path), shell=True).strip().split('\n')
            ipaddresses = [ip.strip() for ip in ipaddresses]
            ipaddresses.remove('127.0.0.1')
        allow_vpool = VPoolController.can_be_served_on(storagerouter_guid)
        file_existence = {}
        for check_file in files:
            file_existence[check_file] = os.path.exists(check_file) and os.path.isfile(check_file)
        return {'mountpoints': mountpoints,
                'ipaddresses': ipaddresses,
                'files': file_existence,
                'allow_vpool': allow_vpool}
Пример #9
0
 def create(self, name, size, vpool_guid, storagerouter_guid, pagecache_ratio=1.0, cache_quota=None):
     """
     Create a new vdisk
     :param name: Name of the new vdisk
     :type name: str
     :param size: Size of  virtual disk in bytes
     :type size: int
     :param vpool_guid: Guid of vPool to create new vdisk on
     :type vpool_guid: str
     :param storagerouter_guid: Guid of the storagerouter to assign disk to
     :type storagerouter_guid: str
     :param pagecache_ratio: Ratio (0 < x <= 1) of the pagecache size related to the size
     :type pagecache_ratio: float
     :param cache_quota: Maximum caching space(s) the new volume can consume (in Bytes) per cache type.
     :type cache_quota: dict
     :return: Asynchronous result of a CeleryTask
     :rtype: celery.result.AsyncResult
     """
     storagerouter = StorageRouter(storagerouter_guid)
     for storagedriver in storagerouter.storagedrivers:
         if storagedriver.vpool_guid == vpool_guid:
             return VDiskController.create_new.delay(volume_name=name,
                                                     volume_size=size,
                                                     storagedriver_guid=storagedriver.guid,
                                                     pagecache_ratio=pagecache_ratio,
                                                     cache_quota=cache_quota)
     raise HttpNotAcceptableException(error_description='No storagedriver found for vPool: {0} and StorageRouter: {1}'.format(vpool_guid, storagerouter_guid),
                                      error='impossible_request')
Пример #10
0
 def ping(storagerouter_guid, timestamp):
     """
     Update a StorageRouter's celery heartbeat
     :param storagerouter_guid: Guid of the StorageRouter to update
     :type storagerouter_guid: str
     :param timestamp: Timestamp to compare to
     :type timestamp: float
     :return: None
     :rtype: NoneType
     """
     with volatile_mutex(
             'storagerouter_heartbeat_{0}'.format(storagerouter_guid)):
         storagerouter = StorageRouter(storagerouter_guid)
         if timestamp > storagerouter.heartbeats.get('celery', 0):
             storagerouter.heartbeats['celery'] = timestamp
             storagerouter.save()
Пример #11
0
    def validate_vdisk(self):
        """
        Validates if the vDisk is ready for ensuring the MDS safety
        :raises SRCObjectNotFoundException: If the vDisk is no associated with a StorageRouter
        :raises RuntimeError: if
        - Current host is in the excluded storagerouters
        - vDisk is in a different state than running
        :return: None
        :rtype: NoneType
        """
        self.vdisk.invalidate_dynamics(['info', 'storagerouter_guid'])

        if self.vdisk.storagerouter_guid is None:
            raise SRCObjectNotFoundException(
                'Cannot ensure MDS safety for vDisk {0} with guid {1} because vDisk is not attached to any StorageRouter'
                .format(self.vdisk.name, self.vdisk.guid))

        vdisk_storagerouter = StorageRouter(self.vdisk.storagerouter_guid)
        if vdisk_storagerouter in self.excluded_storagerouters:
            raise RuntimeError(
                'Current host ({0}) of vDisk {1} is in the list of excluded StorageRouters'
                .format(vdisk_storagerouter.ip, self.vdisk.guid))

        if self.vdisk.info['live_status'] != VDisk.STATUSES.RUNNING:
            raise RuntimeError(
                'vDisk {0} is not {1}, cannot update MDS configuration'.format(
                    self.vdisk.guid, VDisk.STATUSES.RUNNING))

        self.metadata_backend_config_start = self.vdisk.info[
            'metadata_backend_config']
        if self.vdisk.info['metadata_backend_config'] == {}:
            raise RuntimeError(
                'Configured MDS layout for vDisk {0} could not be retrieved}, cannot update MDS configuration'
                .format(self.vdisk.guid))
Пример #12
0
 def list(self, storagerouterguid=None):
     """
     Overview of all disks
     """
     if storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         return storagerouter.disks
     return DiskList.get_disks()
Пример #13
0
 def delete(self, vdisk):
     """
     Delete vdisk
     @param vdisk Guid of the vdisk to delete:
     """
     storagerouter = StorageRouter(vdisk.storagerouter_guid)
     return VDiskController.delete.s(diskguid=vdisk.guid).apply_async(
         routing_key="sr.{0}".format(storagerouter.machine_id))
Пример #14
0
 def get_storagerouter_ip(storagerouter_guid):
     """
     :param storagerouter_guid: guid of a storagerouter
     :type storagerouter_guid: str
     :return: storagerouter ip
     :rtype: str
     """
     return StorageRouter(storagerouter_guid).ip
Пример #15
0
 def get_storagerouter_by_guid(storagerouter_guid):
     """
     :param storagerouter_guid: guid of a storagerouter
     :type storagerouter_guid: str
     :return: storagerouter guid
     :rtype: ovs.dal.hybrids.storagerouter.StorageRouter
     """
     return StorageRouter(storagerouter_guid)
Пример #16
0
 def list(self, storagerouterguid=None):
     """
     Overview of all disks
     :param storagerouterguid: The StorageRouter to get the disks from
     :type storagerouterguid: guid
     """
     if storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         return storagerouter.disks
     return DiskList.get_disks()
Пример #17
0
 def schedule_backend_sync(self, vdisk):
     """
     Schedule a backend sync on a vdisk
     :param vdisk: vdisk to schedule a backend sync to
     :return: TLogName associated with the data sent off to the backend
     """
     storagerouter = StorageRouter(vdisk.storagerouter_guid)
     return VDiskController.schedule_backend_sync.s(
         vdisk_guid=vdisk.guid).apply_async(
             routing_key="sr.{0}".format(storagerouter.machine_id))
    def check_recovery_domains(result_handler):
        result_handler.info('Checking recovery domains:')
        prim_domains = [domain.name for domain in DomainList.get_domains() if len(domain.storage_router_layout['regular']) >= 1]
        for domain in DomainList.get_domains():
            layout = domain.storage_router_layout
            recovery = layout['recovery']
            regular = layout['regular']
            # Check recovery usage
            if len(recovery) >= 1 and domain.name not in prim_domains:
                sr_ips = ', '.join([StorageRouter(guid).ip for guid in recovery])
                result_handler.warning('Domain {0} set as recovery domain on storagerouter(s) {1}, but nowhere as regular domain'.format(domain.name, sr_ips))
            else:
                result_handler.info('Domain {0} passed test, set {1} time(s) as regular domain'.format(domain.name, len(regular)))

            # Check for double usage
            intersection = set(recovery).intersection(regular)
            if intersection:
                sr_ips = ', '.join([StorageRouter(guid).ip for guid in intersection])
                result_handler.warning('Recovery domain {0} is also found to be a regular domain in {1}.'.format(domain.name, sr_ips))
Пример #19
0
 def is_volume_synced_up_to_snapshot(self, vdisk, snapshot_id):
     """
     Verify if volume is synced to backend up to a specific snapshot
     :param vdisk: vdisk to verify
     :param snapshot_id: Snapshot to verify
     """
     storagerouter = StorageRouter(vdisk.storagerouter_guid)
     return VDiskController.is_volume_synced_up_to_snapshot.s(
         vdisk_guid=vdisk.guid, snapshot_id=snapshot_id).apply_async(
             routing_key="sr.{0}".format(storagerouter.machine_id))
Пример #20
0
 def get_ip_addresses(storagerouter_guid):
     """
     Retrieves the IP addresses of a StorageRouter
     :param storagerouter_guid: Guid of the StorageRouter
     :return: List of IP addresses
     :rtype: list
     """
     client = SSHClient(endpoint=StorageRouter(storagerouter_guid))
     return StorageRouterController._os_manager.get_ip_addresses(
         client=client)
Пример #21
0
 def is_volume_synced_up_to_tlog(self, vdisk, tlog_name):
     """
     Verify if volume is synced to backend up to a specific tlog
     :param vdisk: vdisk to verify
     :param tlog_name: TLogName to verify
     """
     storagerouter = StorageRouter(vdisk.storagerouter_guid)
     return VDiskController.is_volume_synced_up_to_tlog.s(
         vdisk_guid=vdisk.guid, tlog_name=tlog_name).apply_async(
             routing_key="sr.{0}".format(storagerouter.machine_id))
Пример #22
0
    def __init__(self, endpoint, username='******', password=None):
        """
        Initializes an SSHClient
        """
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError('Incorrect IP {0} specified'.format(ip))
        elif Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            endpoint = StorageRouter(endpoint.guid)
            process_heartbeat = endpoint.heartbeats.get('process')
            ip = endpoint.ip
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = 'StorageRouter {0} process heartbeat > 300s'.format(
                        ip)
                    logger.error(message)
                    raise UnableToConnectException(message)
        else:
            raise ValueError(
                'The endpoint parameter should be either an ip address or a StorageRouter'
            )

        logging.getLogger('paramiko').setLevel(logging.WARNING)
        self.client = paramiko.SSHClient()
        self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        self.ip = ip
        local_ips = check_output(
            "ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1",
            shell=True).strip().splitlines()
        self.local_ips = [ip.strip() for ip in local_ips]
        self.is_local = self.ip in self.local_ips

        current_user = check_output('whoami', shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = False  # If specified user differs from current executing user, we always use the paramiko SSHClient
        self.password = password

        if not self.is_local:
            try:
                self._connect()
            except socket.error, ex:
                if 'No route to host' in str(ex):
                    message = 'SocketException: No route to host {0}'.format(
                        ip)
                    logger.error(message)
                    raise UnableToConnectException(message)
                raise
Пример #23
0
 def move_away(storagerouter_guid):
     """
     Moves away all vDisks from all Storage Drivers this Storage Router is serving
     """
     storagedrivers = StorageRouter(storagerouter_guid).storagedrivers
     if len(storagedrivers) > 0:
         storagedriver_client = StorageDriverClient.load(
             storagedrivers[0].vpool)
         for storagedriver in storagedrivers:
             storagedriver_client.mark_node_offline(
                 str(storagedriver.storagedriver_id))
Пример #24
0
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        # Load dummy stores
        PersistentFactory.store = DummyPersistentStore()
        VolatileFactory.store = DummyVolatileStore()
        # Replace mocked classes
        sys.modules[
            'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule
        # Import required modules/classes after mocking is done
        from ovs.dal.hybrids.vdisk import VDisk
        from ovs.dal.hybrids.service import Service
        from ovs.dal.hybrids.vpool import VPool
        from ovs.dal.hybrids.storagerouter import StorageRouter
        from ovs.dal.hybrids.pmachine import PMachine
        from ovs.dal.hybrids.servicetype import ServiceType
        from ovs.dal.hybrids.storagedriver import StorageDriver
        from ovs.dal.hybrids.backendtype import BackendType
        from ovs.dal.hybrids.j_mdsservice import MDSService
        from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk
        from ovs.extensions.generic.volatilemutex import VolatileMutex
        from ovs.lib.mdsservice import MDSServiceController
        # Globalize mocked classes
        global VDisk
        global VPool
        global Service
        global StorageRouter
        global StorageDriver
        global BackendType
        global PMachine
        global MDSService
        global ServiceType
        global MDSServiceVDisk
        global VolatileMutex
        global MDSServiceController
        _ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \
            StorageRouter(), StorageDriver(), BackendType(), PMachine(), \
            VolatileMutex('dummy'), MDSServiceController

        # Configuration
        def _get(key):
            c = PersistentFactory.get_client()
            if c.exists(key):
                return c.get(key)
            return None

        Configuration.get = staticmethod(_get)

        # Cleaning storage
        VolatileFactory.store.clean()
        PersistentFactory.store.clean()
Пример #25
0
 def mark_offline(storagerouter_guid):
     """
     Marks all StorageDrivers on this StorageRouter offline
     :param storagerouter_guid: Guid of the Storage Router
     :type storagerouter_guid: str
     :return: None
     """
     for storagedriver in StorageRouter(storagerouter_guid).storagedrivers:
         vpool = storagedriver.vpool
         if len(vpool.storagedrivers) > 1:
             storagedriver_client = StorageDriverClient.load(vpool, excluded_storagedrivers=[storagedriver])
             storagedriver_client.mark_node_offline(str(storagedriver.storagedriver_id))
Пример #26
0
 def mountpoint_exists(name, storagerouter_guid):
     """
     Checks whether a given mount point for a vPool exists
     :param name: Name of the mount point to check
     :type name: str
     :param storagerouter_guid: Guid of the StorageRouter on which to check for mount point existence
     :type storagerouter_guid: str
     :return: True if mount point not in use else False
     :rtype: bool
     """
     client = SSHClient(StorageRouter(storagerouter_guid))
     return client.dir_exists(directory='/mnt/{0}'.format(name))
Пример #27
0
    def update_storagedrivers(self,
                              vpool,
                              storagedriver_guid,
                              storagerouter_guids=None,
                              storagedriver_guids=None):
        """
        Update Storage Drivers for a given vPool (both adding and removing Storage Drivers)
        """
        storagerouters = []
        if storagerouter_guids is not None:
            if storagerouter_guids.strip() != '':
                for storagerouter_guid in storagerouter_guids.strip().split(
                        ','):
                    storagerouter = StorageRouter(storagerouter_guid)
                    storagerouters.append(
                        (storagerouter.ip, storagerouter.machine_id))
        valid_storagedriver_guids = []
        if storagedriver_guids is not None:
            if storagedriver_guids.strip() != '':
                for storagedriver_guid in storagedriver_guids.strip().split(
                        ','):
                    storagedriver = StorageDriver(storagedriver_guid)
                    if storagedriver.vpool_guid != vpool.guid:
                        raise NotAcceptable(
                            'Given Storage Driver does not belong to this vPool'
                        )
                    valid_storagedriver_guids.append(storagedriver.guid)

        storagedriver = StorageDriver(storagedriver_guid)
        parameters = {
            'connection_host':
            None
            if vpool.connection is None else vpool.connection.split(':')[0],
            'connection_port':
            None if vpool.connection is None else int(
                vpool.connection.split(':')[1]),
            'connection_username':
            vpool.login,
            'connection_password':
            vpool.password,
            'storage_ip':
            storagedriver.storage_ip,
            'type':
            vpool.backend_type.code,
            'vpool_name':
            vpool.name
        }
        for field in parameters:
            if isinstance(parameters[field], basestring):
                parameters[field] = str(parameters[field])

        return StorageRouterController.update_storagedrivers.delay(
            valid_storagedriver_guids, storagerouters, parameters)
Пример #28
0
    def get_my_storagerouter():
        """
        Returns unique machine storagerouter id
        """

        from ovs.dal.hybrids.storagerouter import StorageRouter
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        if not System.my_storagerouter_guid:
            for storagerouter in StorageRouterList.get_storagerouters():
                if storagerouter.machine_id == System.get_my_machine_id():
                    System.my_storagerouter_guid = storagerouter.guid
        return StorageRouter(System.my_storagerouter_guid)
Пример #29
0
    def test_from_single_node_to_multi_node(self):
        """
        Deploy a vDisk on a single node --> This should result in no DTL configured
        Add an additional node and verify DTL will be set
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        structure = DalHelper.build_dal_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Add a Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        storagerouter = StorageRouter()
        storagerouter.name = '2'
        storagerouter.ip = '10.0.0.2'
        storagerouter.rdma_capable = False
        storagerouter.save()
        storagerouters[2] = storagerouter
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storagerouter
        storagedriver.name = '2'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouter.ip
        storagedriver.storage_ip = '10.0.1.2'
        storagedriver.storagedriver_id = '2'
        storagedriver.ports = {'management': 1,
                               'xmlrpc': 2,
                               'dtl': 3,
                               'edge': 4}
        storagedriver.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Пример #30
0
 def clone(self, vdisk, name, storagerouter_guid, snapshot_id=None):
     """
     Clones a vDisk
     :param vdisk: Guid of the virtual disk to clone
     :param name: Name for the clone
     :param storagerouter_guid: Guid of the storagerouter hosting the virtual disk
     :param snapshot_id: ID of the snapshot to clone from
     """
     storagerouter = StorageRouter(storagerouter_guid)
     return VDiskController.clone.delay(
         diskguid=vdisk.guid,
         snapshotid=snapshot_id,
         devicename=name,
         pmachineguid=storagerouter.pmachine_guid,
         detached=True)
Пример #31
0
 def get_version_info(storagerouter_guid):
     """
     Returns version information regarding a given StorageRouter
     :param storagerouter_guid: StorageRouter guid to get version information for
     :type storagerouter_guid: str
     :return: Version information
     :rtype: dict
     """
     package_manager = PackageFactory.get_manager()
     client = SSHClient(StorageRouter(storagerouter_guid))
     return {
         'storagerouter_guid':
         storagerouter_guid,
         'versions':
         dict((pkg_name, str(version)) for pkg_name, version in
              package_manager.get_installed_versions(client).iteritems())
     }
Пример #32
0
    def test_from_single_node_to_multi_node(self):
        """
        Deploy a vDisk on a single node --> This should result in no DTL configured
        Add an additional node and verify DTL will be set
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Add a Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        storagerouter = StorageRouter()
        storagerouter.name = '2'
        storagerouter.ip = '10.0.0.2'
        storagerouter.rdma_capable = False
        storagerouter.save()
        storagerouters[2] = storagerouter
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storagerouter
        storagedriver.name = '2'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouter.ip
        storagedriver.storage_ip = '10.0.1.2'
        storagedriver.storagedriver_id = '2'
        storagedriver.ports = {'management': 1,
                               'xmlrpc': 2,
                               'dtl': 3,
                               'edge': 4}
        storagedriver.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Пример #33
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now an then. The delete policy is executed every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and a stand-alone additional disk
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'VMWARE'
        pmachine.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.path = '/dev/non-existent'
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.id = 'disk_partition_id'
        disk_partition.disk = disk
        disk_partition.path = '/dev/disk/non-existent'
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = 'vdisk_1_2'
        vdisk_1_2.volume_id = 'vdisk_1_2'
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = 'dummy'
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = 'vmachine_2'
        vmachine_2.devicename = 'dummy'
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = 'vdisk_2_1'
        vdisk_2_1.volume_id = 'vdisk_2_1'
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = 'dummy'
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = 'vdisk_3'
        vdisk_3.volume_id = 'vdisk_3'
        vdisk_3.vpool = vpool
        vdisk_3.devicename = 'dummy'
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            print ''
            print 'Day cycle: {0}: {1}'.format(d, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))

            # At the start of the day, delete snapshot policy runs at 00:30
            print '- Deleting snapshots'
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print '- Validating snapshots'
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print '- Creating snapshots'
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(machineguid=vm.guid,
                                                label='ss_i_{0}:00'.format(str(h)),
                                                is_consistent=False,
                                                timestamp=timestamp)
                    if h in [6, 12, 18]:
                        ts = (timestamp + (minute * 30))
                        VMachineController.snapshot(machineguid=vm.guid,
                                                    label='ss_c_{0}:30'.format(str(h)),
                                                    is_consistent=True,
                                                    timestamp=ts)

                VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                metadata={'label': 'ss_i_{0}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': str(timestamp),
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                    metadata={'label': 'ss_c_{0}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': str(ts),
                                                              'machineguid': None})
Пример #34
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now and then. The delete policy is executed every day
        """
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vdisk_1 = VDisk()
        vdisk_1.name = 'vdisk_1'
        vdisk_1.volume_id = 'vdisk_1'
        vdisk_1.vpool = vpool
        vdisk_1.devicename = 'dummy'
        vdisk_1.size = 0
        vdisk_1.save()
        vdisk_1.reload_client('storagedriver')

        [dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            self._print_message('Running in Travis, reducing output.')
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            self._print_message('')
            self._print_message('Day cycle: {0}: {1}'.format(d, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d')))

            # At the start of the day, delete snapshot policy runs at 00:30
            self._print_message('- Deleting snapshots')
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            self._print_message('- Validating snapshots')
            self._validate(vdisk_1, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            self._print_message('- Creating snapshots')
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                metadata={'label': 'ss_i_{0}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': str(timestamp),
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                    metadata={'label': 'ss_c_{0}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': str(ts),
                                                              'machineguid': None})
Пример #35
0
    def build_service_structure(structure, previous_structure=None):
        """
        Builds an MDS service structure
        Example:
            structure = Helper.build_service_structure(
                {'vpools': [1],
                 'domains': [],
                 'storagerouters': [1],
                 'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
                 'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
                 'storagerouter_domains': []}  # (<id>, <storagerouter_id>, <domain_id>)
            )
        """
        if previous_structure is None:
            previous_structure = {}
        vdisks = previous_structure.get("vdisks", {})
        vpools = previous_structure.get("vpools", {})
        domains = previous_structure.get("domains", {})
        services = previous_structure.get("services", {})
        mds_services = previous_structure.get("mds_services", {})
        storagerouters = previous_structure.get("storagerouters", {})
        storagedrivers = previous_structure.get("storagedrivers", {})
        storagerouter_domains = previous_structure.get("storagerouter_domains", {})

        service_type = ServiceTypeList.get_by_name("MetadataServer")
        if service_type is None:
            service_type = ServiceType()
            service_type.name = "MetadataServer"
            service_type.save()
        srclients = {}
        for domain_id in structure.get("domains", []):
            if domain_id not in domains:
                domain = Domain()
                domain.name = "domain_{0}".format(domain_id)
                domain.save()
                domains[domain_id] = domain
        for vpool_id in structure.get("vpools", []):
            if vpool_id not in vpools:
                vpool = VPool()
                vpool.name = str(vpool_id)
                vpool.status = "RUNNING"
                vpool.save()
                vpools[vpool_id] = vpool
            else:
                vpool = vpools[vpool_id]
            srclients[vpool_id] = StorageRouterClient(vpool.guid, None)
        for sr_id in structure.get("storagerouters", []):
            if sr_id not in storagerouters:
                storagerouter = StorageRouter()
                storagerouter.name = str(sr_id)
                storagerouter.ip = "10.0.0.{0}".format(sr_id)
                storagerouter.rdma_capable = False
                storagerouter.node_type = "MASTER"
                storagerouter.machine_id = str(sr_id)
                storagerouter.save()
                storagerouters[sr_id] = storagerouter
                disk = Disk()
                disk.storagerouter = storagerouter
                disk.state = "OK"
                disk.name = "/dev/uda"
                disk.size = 1 * 1024 ** 4
                disk.is_ssd = True
                disk.aliases = ["/dev/uda"]
                disk.save()
                partition = DiskPartition()
                partition.offset = 0
                partition.size = disk.size
                partition.aliases = ["/dev/uda-1"]
                partition.state = "OK"
                partition.mountpoint = "/tmp/unittest/sr_{0}/disk_1/partition_1".format(sr_id)
                partition.disk = disk
                partition.roles = [DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB]
                partition.save()
        for sd_id, vpool_id, sr_id in structure.get("storagedrivers", ()):
            if sd_id not in storagedrivers:
                storagedriver = StorageDriver()
                storagedriver.vpool = vpools[vpool_id]
                storagedriver.storagerouter = storagerouters[sr_id]
                storagedriver.name = str(sd_id)
                storagedriver.mountpoint = "/"
                storagedriver.cluster_ip = storagerouters[sr_id].ip
                storagedriver.storage_ip = "10.0.1.{0}".format(sr_id)
                storagedriver.storagedriver_id = str(sd_id)
                storagedriver.ports = {"management": 1, "xmlrpc": 2, "dtl": 3, "edge": 4}
                storagedriver.save()
                storagedrivers[sd_id] = storagedriver
                Helper._set_vpool_storage_driver_configuration(vpool=vpools[vpool_id], storagedriver=storagedriver)
        for mds_id, sd_id in structure.get("mds_services", ()):
            if mds_id not in mds_services:
                sd = storagedrivers[sd_id]
                s_id = "{0}-{1}".format(sd.storagerouter.name, mds_id)
                service = Service()
                service.name = s_id
                service.storagerouter = sd.storagerouter
                service.ports = [mds_id]
                service.type = service_type
                service.save()
                services[s_id] = service
                mds_service = MDSService()
                mds_service.service = service
                mds_service.number = 0
                mds_service.capacity = 10
                mds_service.vpool = sd.vpool
                mds_service.save()
                mds_services[mds_id] = mds_service
                StorageDriverController.add_storagedriverpartition(
                    sd,
                    {
                        "size": None,
                        "role": DiskPartition.ROLES.DB,
                        "sub_role": StorageDriverPartition.SUBROLE.MDS,
                        "partition": sd.storagerouter.disks[0].partitions[0],
                        "mds_service": mds_service,
                    },
                )
        for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get("vdisks", ()):
            if vdisk_id not in vdisks:
                vpool = vpools[vpool_id]
                devicename = "vdisk_{0}".format(vdisk_id)
                mds_backend_config = Helper._generate_mdsmetadatabackendconfig(
                    [] if mds_id is None else [mds_services[mds_id]]
                )
                volume_id = srclients[vpool_id].create_volume(devicename, mds_backend_config, 0, str(storage_driver_id))
                vdisk = VDisk()
                vdisk.name = str(vdisk_id)
                vdisk.devicename = devicename
                vdisk.volume_id = volume_id
                vdisk.vpool = vpool
                vdisk.size = 0
                vdisk.save()
                vdisk.reload_client("storagedriver")
                vdisks[vdisk_id] = vdisk
        for srd_id, sr_id, domain_id, backup in structure.get("storagerouter_domains", ()):
            if srd_id not in storagerouter_domains:
                sr_domain = StorageRouterDomain()
                sr_domain.backup = backup
                sr_domain.domain = domains[domain_id]
                sr_domain.storagerouter = storagerouters[sr_id]
                sr_domain.save()
                storagerouter_domains[srd_id] = sr_domain
        return {
            "vdisks": vdisks,
            "vpools": vpools,
            "domains": domains,
            "services": services,
            "service_type": service_type,
            "mds_services": mds_services,
            "storagerouters": storagerouters,
            "storagedrivers": storagedrivers,
            "storagerouter_domains": storagerouter_domains,
        }
Пример #36
0
    def _prepare(self):
        # Setup
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'KVM'
        pmachine.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.is_vtemplate = True
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storage_router
        storagedriver.name = '1'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storage_router.ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '1'
        storagedriver.ports = [1, 2, 3]
        storagedriver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1')
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        def ensure_safety(vdisk):
            pass
        class Dtl_Checkup():
            @staticmethod
            def delay(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None):
                pass
        MDSServiceController.ensure_safety = staticmethod(ensure_safety)
        VDiskController.dtl_checkup = Dtl_Checkup
        return vdisk_1_1, pmachine
Пример #37
0
 def _build_service_structure(self, structure):
     """
     Builds an MDS service structure
     """
     vpools = {}
     storagerouters = {}
     storagedrivers = {}
     services = {}
     mds_services = {}
     service_type = ServiceType()
     service_type.name = 'MetadataServer'
     service_type.save()
     backend_type = BackendType()
     backend_type.name = 'BackendType'
     backend_type.code = 'BT'
     backend_type.save()
     pmachine = PMachine()
     pmachine.name = 'PMachine'
     pmachine.username = '******'
     pmachine.ip = '127.0.0.1'
     pmachine.hvtype = 'VMWARE'
     pmachine.save()
     for vpool_id in structure['vpools']:
         vpool = VPool()
         vpool.name = str(vpool_id)
         vpool.backend_type = backend_type
         vpool.save()
         vpools[vpool_id] = vpool
     for sr_id in structure['storagerouters']:
         storagerouter = StorageRouter()
         storagerouter.name = str(sr_id)
         storagerouter.ip = '10.0.0.{0}'.format(sr_id)
         storagerouter.pmachine = pmachine
         storagerouter.save()
         storagerouters[sr_id] = storagerouter
     for sd_info in structure['storagedrivers']:
         sd_id, vpool_id, sr_id = sd_info
         storagedriver = StorageDriver()
         storagedriver.vpool = vpools[vpool_id]
         storagedriver.storagerouter = storagerouters[sr_id]
         storagedriver.name = str(sd_id)
         storagedriver.mountpoint = '/'
         storagedriver.cluster_ip = storagerouters[sr_id].ip
         storagedriver.storage_ip = '127.0.0.1'
         storagedriver.storagedriver_id = str(sd_id)
         storagedriver.ports = [1, 2, 3]
         storagedriver.save()
         storagedrivers[sd_id] = storagedriver
     for mds_info in structure['mds_services']:
         mds_id, sd_id = mds_info
         sd = storagedrivers[sd_id]
         s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id)
         service = Service()
         service.name = s_id
         service.storagerouter = sd.storagerouter
         service.ports = [mds_id]
         service.type = service_type
         service.save()
         services[s_id] = service
         mds_service = MDSService()
         mds_service.service = service
         mds_service.number = 0
         mds_service.capacity = 10
         mds_service.vpool = sd.vpool
         mds_service.save()
         mds_services[mds_id] = mds_service
     return vpools, storagerouters, storagedrivers, services, mds_services, service_type
Пример #38
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now an then. The delete policy is executed every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and an additional disk
        failure_domain = FailureDomain()
        failure_domain.name = "Test"
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = "BackendType"
        backend_type.code = "BT"
        backend_type.save()
        vpool = VPool()
        vpool.name = "vpool"
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = "PMachine"
        pmachine.username = "******"
        pmachine.ip = "127.0.0.1"
        pmachine.hvtype = "VMWARE"
        pmachine.save()
        storage_router = StorageRouter()
        storage_router.name = "storage_router"
        storage_router.ip = "127.0.0.1"
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        disk = Disk()
        disk.name = "physical_disk_1"
        disk.path = "/dev/non-existent"
        disk.size = 500 * 1024 ** 3
        disk.state = "OK"
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.id = "disk_partition_id"
        disk_partition.disk = disk
        disk_partition.path = "/dev/disk/non-existent"
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = "OK"
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = "/var/tmp"
        disk_partition.save()
        vmachine_1 = VMachine()
        vmachine_1.name = "vmachine_1"
        vmachine_1.devicename = "dummy"
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = "vdisk_1_1"
        vdisk_1_1.volume_id = "vdisk_1_1"
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = "dummy"
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = "vdisk_1_2"
        vdisk_1_2.volume_id = "vdisk_1_2"
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = "dummy"
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = "vmachine_2"
        vmachine_2.devicename = "dummy"
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = "vdisk_2_1"
        vdisk_2_1.volume_id = "vdisk_2_1"
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = "dummy"
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = "vdisk_3"
        vdisk_3.volume_id = "vdisk_3"
        vdisk_3.vpool = vpool
        vdisk_3.devicename = "dummy"
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == "snapshots"][0].timeout = 0

        # Run the testing scenario
        debug = True
        amount_of_days = 50
        base = datetime.now().date()
        day = timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = DeleteSnapshots._make_timestamp(base, day * d)
            print ""
            print "Day cycle: {0}: {1}".format(d, datetime.fromtimestamp(base_timestamp).strftime("%Y-%m-%d"))

            # At the start of the day, delete snapshot policy runs at 00:30
            print "- Deleting snapshots"
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print "- Validating snapshots"
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print "- Creating snapshots"
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(
                        machineguid=vm.guid,
                        label="ss_i_{0}:00".format(str(h)),
                        is_consistent=False,
                        timestamp=timestamp,
                    )
                    if h in [6, 12, 18]:
                        ts = timestamp + (minute * 30)
                        VMachineController.snapshot(
                            machineguid=vm.guid, label="ss_c_{0}:30".format(str(h)), is_consistent=True, timestamp=ts
                        )

                VDiskController.create_snapshot(
                    diskguid=vdisk_3.guid,
                    metadata={
                        "label": "ss_i_{0}:00".format(str(h)),
                        "is_consistent": False,
                        "timestamp": str(timestamp),
                        "machineguid": None,
                    },
                )
                if h in [6, 12, 18]:
                    ts = timestamp + (minute * 30)
                    VDiskController.create_snapshot(
                        diskguid=vdisk_3.guid,
                        metadata={
                            "label": "ss_c_{0}:30".format(str(h)),
                            "is_consistent": True,
                            "timestamp": str(ts),
                            "machineguid": None,
                        },
                    )
Пример #39
0
    def test_clone_snapshot(self):
        """
        Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted
        """
        # Setup
        # There are 2 disks, second one cloned from a snapshot of the first
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        storage_driver = StorageDriver()
        storage_driver.vpool = vpool
        storage_driver.storagerouter = storage_router
        storage_driver.name = 'storage_driver_1'
        storage_driver.mountpoint = '/'
        storage_driver.cluster_ip = storage_router.ip
        storage_driver.storage_ip = '127.0.0.1'
        storage_driver.storagedriver_id = 'storage_driver_1'
        storage_driver.ports = {'management': 1,
                                'xmlrpc': 2,
                                'dtl': 3,
                                'edge': 4}
        storage_driver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        service = Service()
        service.name = 'service_1'
        service.storagerouter = storage_driver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storage_driver.vpool
        mds_service.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client('storagedriver')

        [dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'

        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        base_timestamp = self._make_timestamp(base, day)
        minute = 60
        hour = minute * 60
        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid,
                                            metadata={'label': 'snapshot_{0}:30'.format(str(h)),
                                                      'is_consistent': True,
                                                      'timestamp': str(timestamp),
                                                      'machineguid': None})

        base_snapshot_guid = vdisk_1_1.snapshots[0]['guid']  # Oldest
        clone_vdisk = VDisk()
        clone_vdisk.name = 'clone_vdisk'
        clone_vdisk.volume_id = 'clone_vdisk'
        clone_vdisk.vpool = vpool
        clone_vdisk.devicename = 'dummy'
        clone_vdisk.parentsnapshot = base_snapshot_guid
        clone_vdisk.size = 0
        clone_vdisk.save()
        clone_vdisk.reload_client('storagedriver')

        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid,
                                            metadata={'label': 'snapshot_{0}:30'.format(str(h)),
                                                      'is_consistent': True,
                                                      'timestamp': str(timestamp),
                                                      'machineguid': None})

        base_timestamp = self._make_timestamp(base, day * 2)
        ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))
        self.assertIn(base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
Пример #40
0
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.factory = None
        PersistentFactory.get_client().clean()
        VolatileFactory.get_client().clean()

        admin_group = Group()
        admin_group.name = 'administrators'
        admin_group.description = 'Administrators'
        admin_group.save()
        viewers_group = Group()
        viewers_group.name = 'viewers'
        viewers_group.description = 'Viewers'
        viewers_group.save()

        # Create users
        admin = User()
        admin.username = '******'
        admin.password = hashlib.sha256('admin').hexdigest()
        admin.is_active = True
        admin.group = admin_group
        admin.save()
        admin_npg = User()
        admin_npg.username = '******'
        admin_npg.password = hashlib.sha256('admin_npg').hexdigest()
        admin_npg.is_active = True
        admin_npg.group = admin_group
        admin_npg.save()
        admin_na = User()
        admin_na.username = '******'
        admin_na.password = hashlib.sha256('admin_na').hexdigest()
        admin_na.is_active = False
        admin_na.group = admin_group
        admin_na.save()
        user = User()
        user.username = '******'
        user.password = hashlib.sha256('user').hexdigest()
        user.is_active = True
        user.group = viewers_group
        user.save()

        # Create internal OAuth 2 clients
        admin_client = Client()
        admin_client.ovs_type = 'INTERNAL'
        admin_client.grant_type = 'PASSWORD'
        admin_client.user = admin
        admin_client.save()
        admin_na_client = Client()
        admin_na_client.ovs_type = 'INTERNAL'
        admin_na_client.grant_type = 'PASSWORD'
        admin_na_client.user = admin_na
        admin_na_client.save()
        user_client = Client()
        user_client.ovs_type = 'INTERNAL'
        user_client.grant_type = 'PASSWORD'
        user_client.user = user
        user_client.save()

        # Create roles
        read_role = Role()
        read_role.code = 'read'
        read_role.name = 'Read'
        read_role.description = 'Can read objects'
        read_role.save()
        write_role = Role()
        write_role.code = 'write'
        write_role.name = 'Write'
        write_role.description = 'Can write objects'
        write_role.save()
        manage_role = Role()
        manage_role.code = 'manage'
        manage_role.name = 'Manage'
        manage_role.description = 'Can manage the system'
        manage_role.save()

        # Attach groups to roles
        mapping = [
            (admin_group, [read_role, write_role, manage_role]),
            (viewers_group, [read_role])
        ]
        for setting in mapping:
            for role in setting[1]:
                rolegroup = RoleGroup()
                rolegroup.group = setting[0]
                rolegroup.role = role
                rolegroup.save()
            for user in setting[0].users:
                for role in setting[1]:
                    for client in user.clients:
                        roleclient = RoleClient()
                        roleclient.client = client
                        roleclient.role = role
                        roleclient.save()

        storagerouter = StorageRouter()
        storagerouter.machine_id = 'storagerouter'
        storagerouter.ip = '127.0.0.1'
        storagerouter.machine_id = '1'
        storagerouter.rdma_capable = False
        storagerouter.name = 'storagerouter'
        storagerouter.save()

        from django.test import RequestFactory
        cls.factory = RequestFactory()

        fakesleep.monkey_patch()

        Configuration.set('/ovs/framework/plugins/installed', {'generic': [],
                                                               'backends': []})
        Configuration.set('/ovs/framework/cluster_id', 'cluster_id')

        System._machine_id = {'none': '1'}