Пример #1
0
    def __init__(self, vpool_guid, storagedriver_id):
        """
        Initializes the class
        """
        _log_level = LOG_LEVEL_MAPPING[OVSLogger(
            'extensions').getEffectiveLevel()]
        # noinspection PyCallByClass,PyTypeChecker
        storagerouterclient.Logger.setupLogging(
            OVSLogger.load_path('storagerouterclient'), _log_level)
        # noinspection PyArgumentList
        storagerouterclient.Logger.enableLogging()

        self._key = '/ovs/vpools/{0}/hosts/{1}/config'.format(
            vpool_guid, storagedriver_id)
        self._logger = OVSLogger('extensions')
        self._dirty_entries = []

        self.remote_path = Configuration.get_configuration_path(
            self._key).strip('/')
        # Load configuration
        if Configuration.exists(self._key):
            self.configuration = Configuration.get(self._key)
            self.config_missing = False
        else:
            self.configuration = {}
            self.config_missing = True
            self._logger.debug(
                'Could not find config {0}, a new one will be created'.format(
                    self._key))
Пример #2
0
    def __init__(self, ensure_single_container, task):
        """
        Initialize a EnsureSingle container
        :param ensure_single_container: Ensure single arguments container
        :type ensure_single_container: EnsureSingleContainer
        :param task: Task instance
        :type task: celery.AsyncResult
        """
        self.ensure_single_container = ensure_single_container
        # Storage
        self.persistent_key = self.generate_key_for_task(
            ensure_single_container.task_name)
        self.persistent_client = PersistentFactory.get_client()
        self.task_id, self.async_task = self.get_task_id_and_async(task)

        # Logging
        self.logger = Logger('lib')

        # Runtime
        self.now = None
        self.thread_name = None
        self.unittest_mode = None
        self.message = None
        self.runtime_hooks = {}
        self.gather_run_time_info()
Пример #3
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = Logger('extensions-generic')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(
                        node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(
                    node.guid, current_time).apply_async(
                        routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats[
                                'process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(
                                node.name.replace(r"'", r"'\''")),
                                         shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Пример #4
0
 def process_exception(self, request, exception):
     """
     Logs information about the given error
     """
     _ = self, request
     logger = Logger('api')
     if OVSMiddleware.is_own_httpexception(exception):
         return HttpResponse(exception.data,
                             status=exception.status_code,
                             content_type='application/json')
     if isinstance(exception, MissingMandatoryFieldsException):
         return HttpResponse(json.dumps({
             'error':
             'invalid_data',
             'error_description':
             exception.message
         }),
                             status=400,
                             content_type='application/json')
     logger.exception(
         'An unhandled exception occurred: {0}'.format(exception))
     return HttpResponse(json.dumps({
         'error': 'internal_server',
         'error_description': exception.message
     }),
                         status=500,
                         content_type='application/json')
Пример #5
0
 def sync_disk_with_reality(cls,
                            guid=None,
                            ip=None,
                            timeout=None,
                            *args,
                            **kwargs):
     """
     :param guid: guid of the storagerouter
     :type guid: str
     :param ip: ip of the storagerouter
     :type ip: str
     :param timeout: timeout time in seconds
     :type timeout: int
     """
     if guid is not None:
         if ip is not None:
             Logger.warning(
                 'Both storagerouter guid and ip passed, using guid for sync.'
             )
         storagerouter_guid = guid
     elif ip is not None:
         storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(
             ip).guid
     else:
         raise ValueError('No guid or ip passed.')
     task_id = cls.api.post(
         api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid),
         data=None)
     return cls.api.wait_for_task(task_id=task_id, timeout=timeout)
Пример #6
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._mutex = volatile_mutex('celery_beat', 10)
     self._logger = Logger('celery')
     self._has_lock = False
     self._lock_name = 'ovs_celery_beat_lock'
     self._entry_name = 'ovs_celery_beat_entries'
     self._persistent = PersistentFactory.get_client()
     self._schedule_info = {}
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     self._logger.debug('DS init')
Пример #7
0
def _log(task, kwargs, storagedriver_id):
    """
    Log an event
    """
    metadata = {
        'storagedriver':
        StorageDriverList.get_by_storagedriver_id(storagedriver_id).guid
    }
    _logger = Logger('volumedriver_event')
    _logger.info('[{0}.{1}] - {2} - {3}'.format(task.__class__.__module__,
                                                task.__class__.__name__,
                                                json.dumps(kwargs),
                                                json.dumps(metadata)))
Пример #8
0
class LogrotateChecks(CIConstants):

    CASE_TYPE = 'AT_QUICK'
    TEST_NAME = "ci_scenario_test_basic_logrotate"
    LOGGER = Logger('scenario-{0}'.format(TEST_NAME))

    def __init__(self):
        pass

    @staticmethod
    @gather_results(CASE_TYPE, LOGGER, TEST_NAME, log_components=[])
    def main(blocked):
        """
        Run all required methods for the test
        :param blocked: was the test blocked by other test?
        :type blocked: bool
        :return: results of test
        :rtype: dict
        """
        _ = blocked
        return LogrotateChecks.validate_basic_log_rotate()

    @staticmethod
    def validate_basic_log_rotate():
        """
        Validate that a basic logrotate script works

        :return:
        """

        LogrotateChecks.LOGGER.info('Starting validating basic logrotate')
        storagerouter_ips = StoragerouterHelper.get_storagerouter_ips()
        assert len(
            storagerouter_ips) >= 1, "We need at least 1 storagerouters!"
Пример #9
0
def log(log_slow=True):
    """
    Task logger
    :param log_slow: Indicates whether a slow call should be logged
    """
    logger = Logger('api')

    def wrap(f):
        """
        Wrapper function
        """
        @wraps(f)
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)
            logging_start = time.time()

            method_args = list(args)[:]
            method_args = method_args[method_args.index(request) + 1:]

            # Log the call
            metadata = {
                'meta':
                dict((str(key), str(value))
                     for key, value in request.META.iteritems()),
                'request':
                dict((str(key), str(value))
                     for key, value in request.REQUEST.iteritems()),
                'cookies':
                dict((str(key), str(value))
                     for key, value in request.COOKIES.iteritems())
            }
            # Stripping password traces
            for mtype in metadata:
                for key in metadata[mtype]:
                    if 'password' in key:
                        metadata[mtype][key] = '**********************'
            logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__, f.__name__,
                getattr(request, 'client').user_guid if hasattr(
                    request, 'client') else None, json.dumps(method_args),
                json.dumps(kwargs), json.dumps(metadata)))
            logging_duration = time.time() - logging_start

            # Call the function
            start = time.time()
            return_value = f(*args, **kwargs)
            duration = time.time() - start
            if duration > 5 and log_slow is True:
                logger.warning('API call {0}.{1} took {2}s'.format(
                    f.__module__, f.__name__, round(duration, 2)))
            if isinstance(return_value, OVSResponse):
                return_value.timings['logging'] = [logging_duration, 'Logging']
            return return_value

        return new_function

    return wrap
Пример #10
0
class ArakoonRemover(object):

    LOGGER = Logger("remove-ci_arakoon_remover")

    def __init__(self):
        pass

    @staticmethod
    @required_arakoon_cluster
    def remove_arakoon_cluster(cluster_name, master_storagerouter_ip):
        """
        Delete a whole arakoon cluster

        :param cluster_name: name of a existing arakoon cluster
        :type cluster_name: str
        :param master_storagerouter_ip: master ip address of a existing arakoon cluster
        :type master_storagerouter_ip: str
        """
        ArakoonRemover.LOGGER.info(
            "Starting removing arakoon cluster with name `{0}`, master_ip `{1}`"
            .format(cluster_name, master_storagerouter_ip))
        arakoon_installer = ArakoonInstaller(cluster_name)
        arakoon_installer.load()
        arakoon_installer.delete_cluster()
        ArakoonRemover.LOGGER.info(
            "Finished removing arakoon cluster with name `{0}`, master_ip `{1}`"
            .format(cluster_name, master_storagerouter_ip))
Пример #11
0
class VPoolValidation(object):

    LOGGER = Logger("validate-ci_role_validate")

    def __init__(self):
        pass

    @staticmethod
    def check_vpool_on_storagerouter(storagerouter_ip, vpool_name):
        """
        Check if the required roles are satisfied

        :param storagerouter_ip: ip address of a storagerouter
        :type storagerouter_ip: str
        :param vpool_name: name of a vpool
        :type vpool_name: str
        :return: is vpool available? True = YES, False = NO
        :rtype: bool
        """

        storagerouter = StoragerouterHelper.get_storagerouter_by_ip(
            storagerouter_ip)
        try:
            return next(
                True for storagedriver in StoragedriverHelper.
                get_storagedrivers_by_storagerouterguid(storagerouter.guid)
                if vpool_name in storagedriver.name)
        except StopIteration:
            return False
class ServiceFactory(_ServiceFactory):
    """
    Service Factory for the ALBA plugin
    """
    RUN_FILE_DIR = '/opt/OpenvStorage/run'
    SERVICE_CONFIG_KEY = '/ovs/framework/hosts/{0}/services/{1}'
    CONFIG_TEMPLATE_DIR = '/opt/OpenvStorage/config/templates/{0}'
    MONITOR_PREFIXES = ['ovs-']

    _logger = Logger('extensions-service_factory')

    def __init__(self):
        """Init method"""
        raise Exception('This class cannot be instantiated')

    @classmethod
    def _get_system(cls):
        return System

    @classmethod
    def _get_configuration(cls):
        return Configuration

    @classmethod
    def _get_logger_instance(cls):
        return cls._logger
Пример #13
0
class MetadataServerClient(object):
    """
    Builds a MDSClient
    """
    _logger = OVSLogger('extensions')
    _log_level = LOG_LEVEL_MAPPING[_logger.getEffectiveLevel()]
    # noinspection PyCallByClass,PyTypeChecker
    storagerouterclient.Logger.setupLogging(
        OVSLogger.load_path('storagerouterclient'), _log_level)
    # noinspection PyArgumentList
    storagerouterclient.Logger.enableLogging()

    MDS_ROLE = type('MDSRole', (), {
        'MASTER': Role.Master,
        'SLAVE': Role.Slave
    })

    def __init__(self):
        """
        Dummy init method
        """
        pass

    @staticmethod
    def load(service, timeout=20):
        """
        Loads a MDSClient
        :param service: Service for which the MDSClient needs to be loaded
        :type service: ovs.dal.hybrids.service.Service
        :param timeout: All calls performed by this MDSClient instance will time out after this period (in seconds)
        :type timeout: int
        :return: An MDSClient instance for the specified Service
        :rtype: MDSClient
        """
        if service.storagerouter is None:
            raise ValueError(
                'Service {0} does not have a StorageRouter linked to it'.
                format(service.name))

        key = service.guid
        # Create MDSClient instance if no instance has been cached yet or if another timeout has been specified
        if key not in mdsclient_service_cache or timeout != mdsclient_service_cache[
                key]['timeout']:
            try:
                # noinspection PyArgumentList
                mdsclient_service_cache[key] = {
                    'client':
                    MDSClient(timeout_secs=timeout,
                              mds_node_config=MDSNodeConfig(
                                  address=str(service.storagerouter.ip),
                                  port=service.ports[0])),
                    'timeout':
                    timeout
                }
            except RuntimeError:
                MetadataServerClient._logger.exception(
                    'Error loading MDSClient on {0}'.format(
                        service.storagerouter.ip))
                return None
        return mdsclient_service_cache[key]['client']
Пример #14
0
def limit(amount, per, timeout):
    """
    Rate-limits the decorated call
    """
    logger = Logger('oauth2')

    def wrap(f):
        """
        Wrapper function
        """
        @wraps(f)
        def new_function(self, request, *args, **kwargs):
            """
            Wrapped function
            """
            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
            client = VolatileFactory.get_client()
            mutex = volatile_mutex(key)
            try:
                mutex.acquire()
                rate_info = client.get(key, {'calls': [], 'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning(
                            'Call {0} is being throttled with a wait of {1}'.
                            format(key, active_timeout - now))
                        raise HttpTooManyRequestsException(
                            error='rate_limit_timeout',
                            error_description=
                            'Rate limit timeout ({0}s remaining)'.format(
                                round(active_timeout - now, 2)))
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [
                    call for call in rate_info['calls'] if call > (now - per)
                ] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning(
                        'Call {0} is being throttled with a wait of {1}'.
                        format(key, timeout))
                    raise HttpTooManyRequestsException(
                        error='rate_limit_reached',
                        error_description=
                        'Rate limit reached ({0} in last {1}s)'.format(
                            calls, per))
                client.set(key, rate_info)
            finally:
                mutex.release()
            return f(self, request, *args, **kwargs)

        return new_function

    return wrap
Пример #15
0
class ThreadHelper(object):
    LOGGER = Logger('helpers-ci_threading')

    @staticmethod
    def start_thread_with_event(target, name, args=(), kwargs=None):
        """
        Starts a thread and an event to it.
        The passed target function needs to accept an param 'event' which will contain the stopEvent object
        :param target: target - usually a method
        :type target: object
        :param name: name of the thread
        :type name: str
        :param args: tuple of arguments
        :type args: tuple
        :return: a tuple with the thread and event
        :rtype: tuple(threading.Thread, threading.Event)
        """
        if kwargs is None:
            kwargs = {}
        if 'event' in kwargs:
            raise ValueError('event is a reserved keyword of this function')
        ThreadHelper.LOGGER.info('Starting thread with target {0}'.format(target))
        event = threading.Event()
        kwargs['event'] = event
        thread = threading.Thread(target=target, args=tuple(args), kwargs=kwargs)
        thread.setName(str(name))
        thread.setDaemon(True)
        thread.start()
        return thread, event

    @staticmethod
    def start_thread(target, name, args=(), kwargs=None):
        if kwargs is None:
            kwargs = {}
        ThreadHelper.LOGGER.info('Starting thread with target {0}'.format(target))
        thread = threading.Thread(target=target, args=tuple(args), kwargs=kwargs)
        thread.setName(str(name))
        thread.setDaemon(True)
        thread.start()
        return thread

    @staticmethod
    def stop_evented_threads(thread_pairs, r_semaphore=None, logger=LOGGER, timeout=300):
        for thread_pair in thread_pairs:
            if thread_pair[0].isAlive():
                thread_pair[1].set()
            # Wait again to sync
            logger.info('Syncing threads')
        if r_semaphore is not None:
            start = time.time()
            while r_semaphore.get_counter() < len(thread_pairs):  # Wait for the number of threads we currently have.
                if time.time() - start > timeout:
                    raise RuntimeError('Synching the thread with the r_semaphore has timed out.')
                time.sleep(0.05)
            r_semaphore.wait()  # Unlock them to let them stop (the object is set -> wont loop)
        # Wait for threads to die
        for thread_pair in thread_pairs:
            thread_pair[0].join()
Пример #16
0
class Schedule(object):
    """
    This decorator adds a schedule to a function. All arguments are these from celery's "crontab" class
    """
    _logger = Logger('lib')

    def __init__(self, **kwargs):
        self.kwargs = kwargs

    def generate_schedule(self, name):
        """
        Generate a schedule for a Celery task
        :param name: Name of the Celery task to generate a schedule for
        :type name: str
        :return: Crontab and additional information about scheduling
        :rtype: tuple
        """
        Schedule._logger.debug('Generating schedule for {0}'.format(name))
        schedule_key = '/ovs/framework/scheduling/celery'
        try:
            schedules = Configuration.get(key=schedule_key, default={})
        except Exception:
            Schedule._logger.exception(
                'Error loading celery scheduling configuration for {0}'.format(
                    name))
            schedules = {}

        if schedules in [
                '', None
        ]:  # Can occur when key has once been set and afterwards been emptied
            schedules = {}
        if not isinstance(schedules, dict):
            raise ValueError(
                'Value for key "{0}" should be a dictionary'.format(
                    schedule_key))

        if name in schedules:
            schedule = schedules[name]
            if schedule is None:
                return None, 'disabled by configuration'
            source = 'scheduled from configuration'
        else:
            schedule = self.kwargs
            source = 'scheduled from code'

        schedule_msg = ', '.join([
            '{0}="{1}"'.format(key, value)
            for key, value in schedule.iteritems()
        ])
        Schedule._logger.debug('Generated schedule for {0}: {1}'.format(
            name, schedule_msg))
        try:
            return crontab(**schedule), '{0}: {1}'.format(source, schedule_msg)
        except TypeError:
            Schedule._logger.error(
                'Invalid crontab schedule specified for task name {0}. Schedule: {1}'
                .format(name, schedule_msg))
            raise
Пример #17
0
class StatisticsHelper(object):
    """
    Statistics Helper class
    """
    LOGGER = Logger("helpers-ci_statistics")

    def __init__(self):
        pass

    @staticmethod
    def get_current_memory_usage(storagerouter_ip):
        """
        get residential memory usage of a certain storagerouter (through free -m)

        :param storagerouter_ip: ip address of a existing storagerouter
        :type storagerouter_ip: str
        :return: (current usage, max. total usage)
        :rtype: tuple
        """
        client = SSHClient(storagerouter_ip, username='******')
        result = client.run(
            "MEM=$(free -m | tr -s ' ' | grep Mem); "
            "echo $MEM | cut -d ' ' -f 3; echo $MEM | cut -d ' ' -f 2",
            allow_insecure=True).split()
        return int(result[0]), int(result[1])

    @staticmethod
    def get_current_memory_usage_of_process(storagerouter_ip, pid):
        """
        get residential memory usage of a certain storagerouter (through /proc/<PID>/status)

        VmPeak:   8110620 kB
        VmSize:  3252752 kB
        VmLck:   0 kB
        VmPin:   0 kB
        VmHWM:   4959820 kB
        VmRSS:   570764 kB
        VmData:  3019468 kB
        VmStk:   136 kB
        VmExe:   12464 kB
        VmLib:   58852 kB
        VmPTE:   2644 kB
        VmPMD:   24 kB
        VmSwap:  394224 kB

        :param storagerouter_ip: ip address of a existing storagerouter
        :type storagerouter_ip: str
        :param pid: process ID of the process you want to monitor
        :type pid: int
        :return: current usage
        :rtype: str
        """
        client = SSHClient(storagerouter_ip, username='******')
        return client.run("grep Vm /proc/{0}/status | tr -s ' '".format(pid),
                          allow_insecure=True)
Пример #18
0
class DiskTools(_DiskTools):
    """
    This class contains various helper methods wrt Disk maintenance
    """
    logger = Logger('extensions-generic')

    def __init__(self):
        super(DiskTools, self).__init__()

    @classmethod
    def _get_os_manager(cls):
        return OSFactory.get_manager()
Пример #19
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            :param args: Arguments without default values
            :param kwargs: Arguments with default values
            """
            # Log the call
            if event_type == 'VOLUMEDRIVER_TASK' and 'storagedriver_id' in kwargs:
                metadata = {
                    'storagedriver':
                    StorageDriverList.get_by_storagedriver_id(
                        kwargs['storagedriver_id']).guid
                }
            else:
                metadata = {}
            _logger = Logger(event_type.lower())
            _logger.info('[{0}.{1}] - {2} - {3} - {4}'.format(
                f.__module__, f.__name__, json.dumps(list(args)),
                json.dumps(kwargs), json.dumps(metadata)))

            # Call the function
            return f(*args, **kwargs)
Пример #20
0
class AddRemoveBackend(CIConstants):

    CASE_TYPE = 'FUNCTIONAL'
    TEST_NAME = "ci_scenario_add_remove_backend"
    LOGGER = Logger("scenario-{0}".format(TEST_NAME))

    def __init__(self):
        pass

    @staticmethod
    @gather_results(CASE_TYPE, LOGGER, TEST_NAME, log_components=[{'framework': ['ovs-workers']}])
    def main(blocked):
        """
        Run all required methods for the test
        :param blocked: was the test blocked by other test?
        :type blocked: bool
        :return: results of test
        :rtype: dict
        """
        _ = blocked
        AddRemoveBackend.validate_add_remove_backend()
        AddRemoveBackend.validate_add_remove_backend(local=False)

    @classmethod
    def validate_add_remove_backend(cls, backend_name='integrationtests', local=True):
        """
        Validate if a add & remove local backend works

        :param backend_name: name of a new alba backend (DEFAULT=integrationtests)
        :type backend_name: str
        :return:
        """
        if local:
            backend_name+='-local'
            scaling = 'LOCAL'
        else:
            backend_name+='-global'
            scaling = 'GLOBAL'
        backend_name+='-backend'

        cls.LOGGER.info("Starting creation of backend `{0}`".format(backend_name))
        assert BackendSetup.add_backend(backend_name=backend_name, scaling=scaling), \
            "Backend `{0}` has failed to create".format(backend_name)
        cls.LOGGER.info("Finished creation of backend `{0}`".format(backend_name))
        cls.LOGGER.info("Starting removal of backend `{0}`".format(backend_name))
        assert BackendRemover.remove_backend(albabackend_name=backend_name), \
            "Backend `{0}` has failed to be removed".format(backend_name)
        cls.LOGGER.info("Finished removal of backend `{0}`".format(backend_name))
Пример #21
0
class FwkHandler(object):
    """
    Class handling fwk actions
    """
    LOGGER = Logger('scenario_helpers-fwk_handler')

    @classmethod
    def restart(cls, storagerouters, logger=LOGGER):
        """
        Restarts a list of storagerouters
        :param storagerouters: list of storagerouters
        :param logger: logging instance
        :return: None
        """
        for storagerouter in storagerouters:
            logger.info("Restarting ovs-workers on {0}".format(
                storagerouter.ip))
            client = SSHClient(str(storagerouter.ip),
                               username='******',
                               cached=False)
            client.run(['systemctl', 'restart', 'ovs-workers.service'])

    @classmethod
    def restart_masters(cls):
        """
        Will restart ovs-workers on all master nodes
        :return: None
        """
        cls.restart([sr for sr in StorageRouterList.get_masters()])

    @classmethod
    def restart_slaves(cls):
        """
        Will restart ovs-workers on all slave nodes
        :return: None
        """
        cls.restart([sr for sr in StorageRouterList.get_slaves()])

    @classmethod
    def restart_all(cls):
        """
        Will restart ovs-workers on all nodes
        :return: None
        """
        cls.restart_masters()
        cls.restart_slaves()
Пример #22
0
def log():
    """
    Task logger
    """
    logger = Logger('oauth2')

    def wrap(f):
        """
        Wrapper function
        """
        @wraps(f)
        def new_function(self, request, *args, **kwargs):
            """
            Wrapped function
            """
            # Log the call
            metadata = {
                'meta':
                dict((str(key), str(value))
                     for key, value in request.META.iteritems()),
                'request':
                dict((str(key), str(value))
                     for key, value in request.REQUEST.iteritems()),
                'cookies':
                dict((str(key), str(value))
                     for key, value in request.COOKIES.iteritems())
            }
            # Stripping password traces
            for mtype in metadata:
                for key in metadata[mtype]:
                    if 'password' in key:
                        metadata[mtype][key] = '**********************'
            logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__, f.__name__,
                getattr(request, 'client').user_guid if hasattr(
                    request, 'client') else None, json.dumps(list(args)),
                json.dumps(kwargs), json.dumps(metadata)))

            # Call the function
            return f(self, request, *args, **kwargs)

        return new_function

    return wrap
Пример #23
0
class SupportAgentCache(object):
    """
    This function tries to refresh the cashed output of a function.
    Refreshing occurs when the cache its timestamp is older then the cache refresh time
    The cache object should look like this:
        CACHE = {'object_name': {'time': 15136942,
                                 'content': 'xyz'}}
    """
    # @TODO Think about clearing the cache when an update has been issued

    logger = Logger('extensions-support')

    CACHE_REFRESH_TIME = 60 * 5

    def __init__(self, support_agent):
        self.cache = {}
        self.support_agent = support_agent

    def try_refresh_object(self, object_name, refresh_time=CACHE_REFRESH_TIME):
        """
        :param object_name: name of the function in SupportAgent to call
        :type object_name: str
        :param refresh_time: time after which the content should be refreshed
        :type refresh_time: int
        :return: cache object[object_name]['content']
        :raises AttributeError function 'object_name' is no function of the SupportAgent
        """
        if object_name not in self.cache:
            self.cache[object_name] = {'time': 0, 'content': None}
        if time.time() - self.cache[object_name]['time'] >= refresh_time:
            self.cache[object_name]['content'] = getattr(
                self.support_agent, object_name)()
            self.cache[object_name]['time'] = time.time()
            self.logger.debug(
                'Refreshing caching for function {0}. Timestamp {1}'.format(
                    object_name, self.cache[object_name]['time']))
        else:
            self.logger.debug(
                'Returned cached results for function {0}. Timestamp {1}'.
                format(object_name, self.cache[object_name]['time']))
        return self.cache[object_name]['content']
Пример #24
0
class RoleChecks(CIConstants):

    CASE_TYPE = 'AT_QUICK'
    TEST_NAME = "ci_scenario_add_append_remove_roles"
    LOGGER = Logger('scenario-{0}'.format(TEST_NAME))

    def __init__(self):
        pass

    @staticmethod
    @gather_results(CASE_TYPE,
                    LOGGER,
                    TEST_NAME,
                    log_components=[{
                        'framework': ['ovs-workers']
                    }])
    def main(blocked):
        """
        Run all required methods for the test
        :param blocked: was the test blocked by other test?
        :type blocked: bool
        :return: results of test
        :rtype: dict
        """
        _ = blocked
        return RoleChecks.validate_add_append_remove_roles()

    @staticmethod
    def validate_add_append_remove_roles():
        """
        Validate a add role, remove roles and append

        You need at least 1 free partition on a storagerouter

        :return:
        """

        RoleChecks.LOGGER.info('Starting validating add-append-remove roles')
        storagerouter_ips = StoragerouterHelper.get_storagerouter_ips()
        assert len(
            storagerouter_ips) >= 1, "We need at least 1 storagerouters!"
Пример #25
0
class AlbaComponentUpdater(_AlbacomponentUpdater):
    """
    Implementation of abstract class to update alba
    """
    logger = Logger('update-alba')

    @staticmethod
    def get_persistent_client():
        """
        Retrieve a persistent client which needs
        Needs to be implemented by the callee
        """
        return PersistentFactory.get_client()

    @classmethod
    def get_node_id(cls):
        """
        use a factory to provide the machine id
        :return:
        """
        return System.get_my_machine_id()
Пример #26
0
class VPoolRemover(CIConstants):

    LOGGER = Logger("remove-ci_vpool_remover")
    REMOVE_VPOOL_TIMEOUT = 500

    @classmethod
    def remove_vpool(cls,
                     vpool_name,
                     storagerouter_ip,
                     timeout=REMOVE_VPOOL_TIMEOUT,
                     *args,
                     **kwargs):
        """
        Removes a existing vpool from a storagerouter
        :param vpool_name: the name of a existing vpool
        :type vpool_name: str
        :param storagerouter_ip: the ip address of a existing storagerouter
        :type storagerouter_ip: str
        :param timeout: max. time to wait for a task to complete
        :type timeout: int
        :return: None
        :rtype: NoneType
        """
        vpool_guid = VPoolHelper.get_vpool_by_name(vpool_name).guid
        storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip(
            storagerouter_ip).guid
        data = {"storagerouter_guid": storagerouter_guid}
        task_guid = cls.api.post(
            api='/vpools/{0}/shrink_vpool/'.format(vpool_guid), data=data)
        task_result = cls.api.wait_for_task(task_id=task_guid, timeout=timeout)

        if not task_result[0]:
            error_msg = "Deleting vPool `{0}` on storagerouter `{1}` has failed with error {2}".format(
                vpool_name, storagerouter_ip, task_result[1])
            VPoolRemover.LOGGER.error(error_msg)
            raise RuntimeError(error_msg)
        else:
            VPoolRemover.LOGGER.info(
                "Deleting vPool `{0}` on storagerouter `{1}` should have succeeded"
                .format(vpool_name, storagerouter_ip))
Пример #27
0
class CelerySetup(object):

    LOGGER = Logger("setup-ci_celery_setup")
    SCHEDULED_TASK_CFG = "/ovs/framework/scheduling/celery"

    def __init__(self):
        pass

    @staticmethod
    def override_scheduletasks(configuration):
        """
        Override the scheduled tasks crontab with your own confguration
        :param configuration: configuration to override scheduled tasks
        :type configuration: dict
        :return:
        """
        service_name = 'ovs-watcher-framework'
        Configuration.set(CelerySetup.SCHEDULED_TASK_CFG, configuration)
        fetched_cfg = Configuration.get(CelerySetup.SCHEDULED_TASK_CFG,
                                        configuration)
        if cmp(fetched_cfg, configuration) == 0:
            # restart ovs-watcher-framework on all nodes
            for sr_ip in StoragerouterHelper.get_storagerouter_ips():
                client = SSHClient(sr_ip, username='******')
                service_manager = ServiceFactory.get_manager()
                try:

                    service_manager.restart_service(service_name, client)
                except:
                    return False
            CelerySetup.LOGGER.info(
                "Successfully restarted all `{0}` services!".format(
                    service_name))
            return True
        else:
            CelerySetup.LOGGER.warning(
                "`{0}` config is `{1}` but should be `{2}`".format(
                    CelerySetup.SCHEDULED_TASK_CFG, fetched_cfg,
                    configuration))
            return False
Пример #28
0
class VDiskValidation(object):

    LOGGER = Logger("validate-ci_vdisk_validate")

    def __init__(self):
        pass

    @staticmethod
    @required_vdisk
    def check_required_vdisk(vdisk_name, vpool_name):
        """
        Checks if the given vdisk is present on the vpool

        :param vdisk_name: location of a vdisk on a vpool
                           (e.g. /mnt/vpool/test.raw = test.raw, /mnt/vpool/volumes/test.raw = volumes/test.raw )
        :type vdisk_name: str
        :param vpool_name: name of a existing vpool
        :type vpool_name: str
        :return:
        """

        return vdisk_name, vpool_name
Пример #29
0
class NetworkHelper(object):
    """
    NetworkHelper class
    """
    LOGGER = Logger("helpers-ci_network_helper")

    def __init__(self):
        pass

    @staticmethod
    def validate_ip(ip):
        pattern = re.compile(r"^(?<!\S)((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\b|\.\b){7}(?!\S)$")
        if not pattern.match(ip):
            raise ValueError('Not a valid IP address')

    @staticmethod
    def get_free_port(listener_ip, logger=LOGGER):
        """
        Returns a free port
        :param listener_ip: ip to listen on
        :type listener_ip: str
        :param logger: logging instance
        :type logger: ovs.extensions.generic.logger.Logger
        :return: port number
        :rtype: int
        """
        with remote(listener_ip, [socket]) as rem:
            listening_socket = rem.socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                # Bind to first available port
                listening_socket.bind(('', 0))
                port = listening_socket.getsockname()[1]
                return port
            except socket.error as ex:
                logger.error('Could not bind the socket. Got {0}'.format(str(ex)))
                raise
            finally:
                listening_socket.close()
Пример #30
0
class StoragedriverSetup(object):
    LOGGER = Logger('setup-ci_storagedriver_setup')

    # These will be all possible settings for the StorageDriver. Messing them up is their own responsibility (they should not bypass the API by default!!)
    STORAGEDRIVER_PARAMS = {
        "volume_manager": (dict, None, False),
        "backend_connection_manager": (dict, None, False)
    }

    @staticmethod
    def change_config(vpool_name, vpool_details, storagerouter_ip, *args,
                      **kwargs):

        # Settings volumedriver
        storagedriver_config = vpool_details.get('storagedriver')
        if storagedriver_config is not None:
            ExtensionsToolbox.verify_required_params(
                StoragedriverSetup.STORAGEDRIVER_PARAMS, storagedriver_config)
            StoragedriverSetup.LOGGER.info(
                'Updating volumedriver configuration of vPool `{0}` on storagerouter `{1}`.'
                .format(vpool_name, storagerouter_ip))
            vpool = VPoolHelper.get_vpool_by_name(vpool_name)
            storagedriver = [
                sd for sd in vpool.storagedrivers
                if sd.storagerouter.ip == storagerouter_ip
            ][0]
            if not storagedriver:
                error_msg = 'Unable to find the storagedriver of vPool {0} on storagerouter {1}'.format(
                    vpool_name, storagerouter_ip)
                raise RuntimeError(error_msg)
            StoragedriverHelper.change_config(storagedriver,
                                              storagedriver_config)
            vpool.invalidate_dynamics('configuration')
            StoragedriverSetup.LOGGER.info(
                'Updating volumedriver config of vPool `{0}` should have succeeded on storagerouter `{1}`'
                .format(vpool_name, storagerouter_ip))