Esempio n. 1
0
def worker_process_init_handler(args=None, kwargs=None, **kwds):
    """
    Hook for process init
    """
    _ = args, kwargs, kwds
    VolatileFactory.store = None
    PersistentFactory.store = None
    LogHandler.get('extensions', name='ovs_extensions')  # Initiate extensions logger
def authenticated(force=False):
    """
    Decorator to make that a login is executed in case the current session isn't valid anymore
    :param force: Force a (re)login, as some methods also work when not logged in
    """
    logger = LogHandler.get('extensions', name='vmware sdk')

    def wrapper(function):
        def new_function(self, *args, **kwargs):
            self.__doc__ = function.__doc__
            try:
                if force:
                    self._login()
                return function(self, *args, **kwargs)
            except WebFault as fault:
                if 'The session is not authenticated' in str(fault):
                    logger.debug('Received WebFault authentication failure, logging in...')
                    self._login()
                    return function(self, *args, **kwargs)
                raise
            except NotAuthenticatedException:
                logger.debug('Received NotAuthenticatedException, logging in...')
                self._login()
                return function(self, *args, **kwargs)
        return new_function
    return wrapper
Esempio n. 3
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)
            method_args = list(args)[:]
            method_args = method_args[method_args.index(request) + 1:]

            # Log the call
            metadata = {'meta': dict((str(key), str(value)) for key, value in request.META.iteritems()),
                        'request': dict((str(key), str(value)) for key, value in request.REQUEST.iteritems()),
                        'cookies': dict((str(key), str(value)) for key, value in request.COOKIES.iteritems())}
            _logger = LogHandler.get('log', name='api')
            _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__,
                f.__name__,
                getattr(request, 'client').user_guid if hasattr(request, 'client') else None,
                json.dumps(method_args),
                json.dumps(kwargs),
                json.dumps(metadata)
            ))

            # Call the function
            start = time.time()
            return_value = f(*args, **kwargs)
            duration = time.time() - start
            if duration > 5 and log_slow is True:
                logger.warning('API call {0}.{1} took {2}s'.format(f.__module__, f.__name__, round(duration, 2)))
            return return_value
Esempio n. 4
0
 def process_exception(self, request, exception):
     """
     Logs information about the given error
     """
     _ = self, request
     logger = LogHandler.get('api', 'middleware')
     logger.exception('An unhandled exception occurred: {0}'.format(exception))
Esempio n. 5
0
 def process_exception(self, request, exception):
     """
     Logs information about the given error
     """
     _ = self, request
     logger = LogHandler.get('api', 'middleware')
     if OVSMiddleware.is_own_httpexception(exception):
         return HttpResponse(exception.data,
                             status=exception.status_code,
                             content_type='application/json')
     if isinstance(exception, MissingMandatoryFieldsException):
         return HttpResponse(json.dumps({
             'error':
             'invalid_data',
             'error_description':
             exception.message
         }),
                             status=400,
                             content_type='application/json')
     logger.exception(
         'An unhandled exception occurred: {0}'.format(exception))
     return HttpResponse(json.dumps({
         'error': 'internal_server',
         'error_description': exception.message
     }),
                         status=500,
                         content_type='application/json')
Esempio n. 6
0
def log_slow_calls(f):
    """
    Wrapper to print duration when call takes > 1s
    :param f: Function to wrap
    :return: Wrapped function
    """
    logger = LogHandler.get('extensions', name='etcdconfiguration')

    def new_function(*args, **kwargs):
        """
        Execute function
        :return: Function output
        """
        start = time.time()
        try:
            return f(*args, **kwargs)
        finally:
            key_info = ''
            if 'key' in kwargs:
                key_info = ' (key: {0})'.format(kwargs['key'])
            elif len(args) > 0:
                key_info = ' (key: {0})'.format(args[0])
            duration = time.time() - start
            if duration > 1:
                logger.warning('Call to {0}{1} took {2}s'.format(f.__name__, key_info, duration))
    new_function.__name__ = f.__name__
    new_function.__module__ = f.__module__
    return new_function
Esempio n. 7
0
def relay(*args, **kwargs):
    """
    Relays any call to another node.
    Assume this example:
    * A user wants to execute a HTTP GET on /api/storagerouters/
    ** /api/<call>
    * He'll have to execute a HTTP GET on /api/relay/<call>
    ** This will translate to /apt/relay/storagerouters/
    Parameters:
    * Mandatory: ip, port, client_id, client_secret
    * All other parameters will be passed through to the specified node
    """
    @authenticated()
    @required_roles(['read'])
    @load()
    def _relay(_, ip, port, client_id, client_secret, raw_version, request):
        path = '/{0}'.format(request.path.replace('/api/relay/', ''))
        method = request.META['REQUEST_METHOD'].lower()
        client = OVSClient(ip,
                           port,
                           credentials=(client_id, client_secret),
                           version=raw_version,
                           raw_response=True)
        if not hasattr(client, method):
            raise HttpBadRequestException(
                error='unavailable_call',
                error_description='Method not available in relay')
        client_kwargs = {'params': request.GET}
        if method != 'get':
            client_kwargs['data'] = request.POST
        call_response = getattr(client, method)(path, **client_kwargs)
        response = HttpResponse(call_response.text,
                                content_type='application/json',
                                status=call_response.status_code)
        for header, value in call_response.headers.iteritems():
            response[header] = value
        response['OVS-Relay'] = '{0}:{1}'.format(ip, port)
        return response

    try:
        return _relay(*args, **kwargs)
    except Exception as ex:
        if OVSMiddleware.is_own_httpexception(ex):
            return HttpResponse(ex.data,
                                status=ex.status_code,
                                content_type='application/json')
        message = str(ex)
        status_code = 400
        if hasattr(ex, 'detail'):
            message = ex.detail
        if hasattr(ex, 'status_code'):
            status_code = ex.status_code
        logger = LogHandler.get('api', name='metadata')
        logger.exception('Error relaying call: {0}'.format(message))
        return HttpResponse(json.dumps({
            'error_descirption': message,
            'error': 'relay_error'
        }),
                            content_type='application/json',
                            status=status_code)
Esempio n. 8
0
        def new_function(self, request, *args, **kwargs):
            """
            Wrapped function
            """
            # Log the call
            metadata = {
                'meta':
                dict((str(key), str(value))
                     for key, value in request.META.iteritems()),
                'request':
                dict((str(key), str(value))
                     for key, value in request.REQUEST.iteritems()),
                'cookies':
                dict((str(key), str(value))
                     for key, value in request.COOKIES.iteritems())
            }
            # Stripping password traces
            for mtype in metadata:
                for key in metadata[mtype]:
                    if 'password' in key:
                        metadata[mtype][key] = '**********************'
            _logger = LogHandler.get('log', name='api')
            _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__, f.__name__,
                getattr(request, 'client').user_guid if hasattr(
                    request, 'client') else None, json.dumps(list(args)),
                json.dumps(kwargs), json.dumps(metadata)))

            # Call the function
            return f(self, request, *args, **kwargs)
Esempio n. 9
0
def authenticated(force=False):
    """
    Decorator to make that a login is executed in case the current session isn't valid anymore
    :param force: Force a (re)login, as some methods also work when not logged in
    """
    logger = LogHandler.get('extensions', name='vmware sdk')

    def wrapper(function):
        def new_function(self, *args, **kwargs):
            self.__doc__ = function.__doc__
            try:
                if force:
                    self._login()
                return function(self, *args, **kwargs)
            except WebFault as fault:
                if 'The session is not authenticated' in str(fault):
                    logger.debug('Received WebFault authentication failure, logging in...')
                    self._login()
                    return function(self, *args, **kwargs)
                raise
            except NotAuthenticatedException:
                logger.debug('Received NotAuthenticatedException, logging in...')
                self._login()
                return function(self, *args, **kwargs)
        return new_function
    return wrapper
Esempio n. 10
0
    def pulse():
        """
        Update the heartbeats for the Current Routers
        :return: None
        """
        logger = LogHandler.get('extensions', name='heartbeat')
        machine_id = System.get_my_machine_id()
        current_time = int(time.time())

        routers = StorageRouterList.get_storagerouters()
        for node in routers:
            if node.machine_id == machine_id:
                with volatile_mutex('storagerouter_heartbeat_{0}'.format(node.guid)):
                    node_save = StorageRouter(node.guid)
                    node_save.heartbeats['process'] = current_time
                    node_save.save()
                StorageRouterController.ping.s(node.guid, current_time).apply_async(routing_key='sr.{0}'.format(machine_id))
            else:
                try:
                    # check timeout of other nodes and clear arp cache
                    if node.heartbeats and 'process' in node.heartbeats:
                        if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT:
                            check_output("/usr/sbin/arp -d '{0}'".format(node.name.replace(r"'", r"'\''")), shell=True)
                except CalledProcessError:
                    logger.exception('Error clearing ARP cache')
Esempio n. 11
0
class OSManager(object):
    """
    Factory class returning specialized classes
    """
    ImplementationClass = None
    _logger = LogHandler.get('extensions', name='osmanager')

    class MetaClass(type):
        """
        Metaclass
        """
        def __getattr__(cls, item):
            """
            Returns the appropriate class
            """
            _ = cls
            if OSManager.ImplementationClass is None:
                try:
                    dist_info = check_output('cat /etc/os-release', shell=True)
                    # All OS distribution classes used in below code should share the exact same interface!
                    if 'Ubuntu' in dist_info:
                        OSManager.ImplementationClass = Ubuntu
                    elif 'CentOS Linux' in dist_info:
                        OSManager.ImplementationClass = Centos
                    else:
                        raise RuntimeError(
                            'There was no known OSManager detected')
                except Exception as ex:
                    OSManager._logger.exception(
                        'Error loading OSManager: {0}'.format(ex))
                    raise
            return getattr(OSManager.ImplementationClass, item)

    __metaclass__ = MetaClass
Esempio n. 12
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)
            method_args = list(args)[:]
            method_args = method_args[method_args.index(request) + 1:]

            # Log the call
            metadata = {
                'meta':
                dict((str(key), str(value))
                     for key, value in request.META.iteritems()),
                'request':
                dict((str(key), str(value))
                     for key, value in request.REQUEST.iteritems()),
                'cookies':
                dict((str(key), str(value))
                     for key, value in request.COOKIES.iteritems())
            }
            _logger = LogHandler.get('log', name='api')
            _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__, f.__name__,
                getattr(request, 'client').user_guid if hasattr(
                    request, 'client') else None, json.dumps(method_args),
                json.dumps(kwargs), json.dumps(metadata)))

            # Call the function
            start = time.time()
            return_value = f(*args, **kwargs)
            duration = time.time() - start
            if duration > 5 and log_slow is True:
                logger.warning('API call {0}.{1} took {2}s'.format(
                    f.__module__, f.__name__, round(duration, 2)))
            return return_value
Esempio n. 13
0
def log_slow_calls(f):
    """
    Wrapper to print duration when call takes > 1s
    :param f: Function to wrap
    :return: Wrapped function
    """
    logger = LogHandler.get("extensions", name="etcdconfiguration")

    def new_function(*args, **kwargs):
        """
        Execute function
        :return: Function output
        """
        start = time.time()
        try:
            return f(*args, **kwargs)
        finally:
            key_info = ""
            if "key" in kwargs:
                key_info = " (key: {0})".format(kwargs["key"])
            elif len(args) > 0:
                key_info = " (key: {0})".format(args[0])
            duration = time.time() - start
            if duration > 1:
                logger.warning("Call to {0}{1} took {2}s".format(f.__name__, key_info, duration))

    new_function.__name__ = f.__name__
    new_function.__module__ = f.__module__
    return new_function
Esempio n. 14
0
def _clean_cache():
    loghandler = LogHandler.get('celery', name='celery')
    loghandler.info('Executing celery "clear_cache" startup script...')
    from ovs.lib.helpers.decorators import ENSURE_SINGLE_KEY
    active = inspect().active()
    active_tasks = []
    if active is not None:
        for tasks in active.itervalues():
            active_tasks += [task['id'] for task in tasks]
    cache = PersistentFactory.get_client()
    for key in cache.prefix(ENSURE_SINGLE_KEY):
        try:
            with volatile_mutex(name=key, wait=5):
                entry = cache.get(key)
                values = entry.get('values', [])
                new_values = []
                for v in values:
                    task_id = v.get('task_id')
                    if task_id is not None and task_id in active_tasks:
                        new_values.append(v)
                if len(new_values) > 0:
                    entry['values'] = new_values
                    cache.set(key, entry)
                    loghandler.info('Updated key {0}'.format(key))
                else:
                    cache.delete(key)
                    loghandler.info('Deleted key {0}'.format(key))
        except KeyNotFoundException:
            pass
    loghandler.info('Executing celery "clear_cache" startup script... done')
Esempio n. 15
0
def limit(amount, per, timeout):
    """
    Rate-limits the decorated call
    """
    logger = LogHandler.get('api', 'oauth2')

    def wrap(f):
        """
        Wrapper function
        """
        @wraps(f)
        def new_function(self, request, *args, **kwargs):
            """
            Wrapped function
            """
            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
            client = VolatileFactory.get_client()
            mutex = volatile_mutex(key)
            try:
                mutex.acquire()
                rate_info = client.get(key, {'calls': [], 'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning(
                            'Call {0} is being throttled with a wait of {1}'.
                            format(key, active_timeout - now))
                        raise HttpTooManyRequestsException(
                            error='rate_limit_timeout',
                            error_description=
                            'Rate limit timeout ({0}s remaining)'.format(
                                round(active_timeout - now, 2)))
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [
                    call for call in rate_info['calls'] if call > (now - per)
                ] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning(
                        'Call {0} is being throttled with a wait of {1}'.
                        format(key, timeout))
                    raise HttpTooManyRequestsException(
                        error='rate_limit_reached',
                        error_description=
                        'Rate limit reached ({0} in last {1}s)'.format(
                            calls, per))
                client.set(key, rate_info)
            finally:
                mutex.release()
            return f(self, request, *args, **kwargs)

        return new_function

    return wrap
Esempio n. 16
0
 def __init__(self, nodes):
     """
     Initializes the client
     """
     self._logger = LogHandler.get('extensions', 'memcache store')
     self._nodes = nodes
     self._client = memcache.Client(self._nodes, cache_cas=True, socket_timeout=0.5)
     self._lock = Lock()
     self._validate = True
Esempio n. 17
0
class SupportAgent(object):
    """
    Represents the Support client
    """
    _logger = LogHandler.get('support', name='agent')

    def __init__(self):
        """
        Initializes the client
        """
        self._enable_support = Configuration.get('/ovs/framework/support|enablesupport')
        self.interval = Configuration.get('/ovs/framework/support|interval')
        self._url = 'https://monitoring.openvstorage.com/api/support/heartbeat/'
        init_info = check_output('cat /proc/1/comm', shell=True)
        # All service classes used in below code should share the exact same interface!
        if 'init' in init_info:
            version_info = check_output('init --version', shell=True)
            if 'upstart' in version_info:
                self.servicemanager = 'upstart'
            else:
                RuntimeError('There was no known service manager detected in /proc/1/comm')
        elif 'systemd' in init_info:
            self.servicemanager = 'systemd'
        else:
            raise RuntimeError('There was no known service manager detected in /proc/1/comm')

    def get_heartbeat_data(self):
        """
        Returns heartbeat data
        """
        data = {'cid': Configuration.get('/ovs/framework/cluster_id'),
                'nid': System.get_my_machine_id(),
                'metadata': {},
                'errors': []}

        try:
            # Versions
            manager = PackageFactory.get_manager()
            data['metadata']['versions'] = manager.get_installed_versions()  # Fallback to check_output
        except Exception, ex:
            data['errors'].append(str(ex))
        try:
            if self.servicemanager == 'upstart':
                services = check_output('initctl list | grep ovs-', shell=True).strip().splitlines()
            else:
                services = check_output('systemctl -l | grep ovs- | tr -s " "', shell=True).strip().splitlines()
            # Service status
            service_data = {}
            for service in services:
                split = service.strip().split(' ')
                split = [part.strip() for part in split if part.strip()]
                while split and not split[0].strip().startswith('ovs-'):
                    split.pop(0)
                service_data[split[0]] = ' '.join(split[1:])
            data['metadata']['services'] = service_data
        except Exception, ex:
            data['errors'].append(str(ex))
Esempio n. 18
0
 def manage_running_tasks(tasklist, timesleep=10):
     """
     Manage a list of running celery task
     - discard PENDING tasks after a certain timeout
     - validate RUNNING tasks are actually running
     :param tasklist: Dictionary of tasks to wait {IP address: AsyncResult}
     :type tasklist: dict
     :param timesleep: leep between checks -
       -for long running tasks it's better to sleep for a longer period of time to reduce number of ssh calls
     :type timesleep: int
     :return: results
     :rtype: dict
     """
     logger = LogHandler.get('lib', name='celery toolbox')
     ssh_clients = {}
     tasks_pending = {}
     tasks_pending_timeout = 1800  # 30 minutes
     results = {}
     failed_nodes = []
     while len(tasklist.keys()) > 0:
         for ip, task in tasklist.items():
             if task.state in ('SUCCESS', 'FAILURE'):
                 logger.info('Task {0} finished: {1}'.format(task.id, task.state))
                 results[ip] = task.get(propagate=False)
                 del tasklist[ip]
             elif task.state == 'PENDING':
                 if task.id not in tasks_pending:
                     tasks_pending[task.id] = time.time()
                 else:
                     task_pending_since = tasks_pending[task.id]
                     if time.time() - task_pending_since > tasks_pending_timeout:
                         logger.warning('Task {0} is pending since {1} on node {2}. Task will be revoked'.format(task.id, datetime.datetime.fromtimestamp(task_pending_since), ip))
                         revoke(task.id)
                         del tasklist[ip]
                         del tasks_pending[task.id]
                         failed_nodes.append(ip)
             elif task.state == 'STARTED':
                 if ip not in ssh_clients:
                     ssh_clients[ip] = SSHClient(ip, username='******')
                 client = ssh_clients[ip]
                 if ServiceManager.get_service_status('workers', client) is False:
                     logger.error('Service ovs-workers on node {0} appears halted while there is a task PENDING for it {1}. Task will be revoked.'.format(ip, task.id))
                     revoke(task.id)
                     del tasklist[ip]
                     failed_nodes.append(ip)
                 else:
                     ping_result = task.app.control.inspect().ping()
                     storage_router = StorageRouterList.get_by_ip(ip)
                     if "celery@{0}".format(storage_router.name) not in ping_result:
                         logger.error('Service ovs-workers on node {0} is not reachable via rabbitmq while there is a task STARTED for it {1}. Task will be revoked.'.format(ip, task.id))
                         revoke(task.id)
                         del tasklist[ip]
                         failed_nodes.append(ip)
         if len(tasklist.keys()) > 0:
             time.sleep(timesleep)
     return results, failed_nodes
Esempio n. 19
0
def task_postrun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, **kwds):
    """
    Hook for celery postrun event
    """
    _ = sender, task, args, kwargs, kwds
    try:
        MessageController.fire(MessageController.Type.TASK_COMPLETE, task_id)
    except Exception as ex:
        loghandler = LogHandler.get('celery', name='celery')
        loghandler.error('Caught error during postrun handler: {0}'.format(ex))
Esempio n. 20
0
 def __init__(self, name, wait=None):
     """
     Creates a volatile mutex object
     """
     self._logger = LogHandler.get('extensions', 'volatile mutex')
     self._volatile = VolatileFactory.get_client()
     self.name = name
     self._has_lock = False
     self._start = 0
     self._wait = wait
Esempio n. 21
0
def task_postrun_handler(sender=None, task_id=None, task=None, args=None, kwargs=None, **kwds):
    """
    Hook for celery postrun event
    """
    _ = sender, task, args, kwargs, kwds
    try:
        MessageController.fire(MessageController.Type.TASK_COMPLETE, task_id)
    except Exception as ex:
        loghandler = LogHandler.get('celery', name='celery')
        loghandler.error('Caught error during postrun handler: {0}'.format(ex))
Esempio n. 22
0
def relay(*args, **kwargs):
    """
    Relays any call to another node.
    Assume this example:
    * A user wants to execute a HTTP GET on /api/storagerouters/
    ** /api/<call>
    * He'll have to execute a HTTP GET on /api/relay/<call>
    ** This will translate to /apt/relay/storagerouters/
    Parameters:
    * Mandatory: ip, port, client_id, client_secret
    * All other parameters will be passed through to the specified node
    """

    @authenticated()
    @required_roles(['read'])
    @load()
    def _relay(_, ip, port, client_id, client_secret, raw_version, request):
        path = '/{0}'.format(request.path.replace('/api/relay/', ''))
        method = request.META['REQUEST_METHOD'].lower()
        client = OVSClient(ip, port, credentials=(client_id, client_secret), version=raw_version, raw_response=True)
        if not hasattr(client, method):
            raise HttpBadRequestException(error='unavailable_call',
                                          error_description='Method not available in relay')
        client_kwargs = {'params': request.GET}
        if method != 'get':
            client_kwargs['data'] = request.POST
        call_response = getattr(client, method)(path, **client_kwargs)
        response = HttpResponse(call_response.text,
                                content_type='application/json',
                                status=call_response.status_code)
        for header, value in call_response.headers.iteritems():
            response[header] = value
        response['OVS-Relay'] = '{0}:{1}'.format(ip, port)
        return response

    try:
        return _relay(*args, **kwargs)
    except Exception as ex:
        if OVSMiddleware.is_own_httpexception(ex):
            return HttpResponse(ex.data,
                                status=ex.status_code,
                                content_type='application/json')
        message = str(ex)
        status_code = 400
        if hasattr(ex, 'detail'):
            message = ex.detail
        if hasattr(ex, 'status_code'):
            status_code = ex.status_code
        logger = LogHandler.get('api', name='metadata')
        logger.exception('Error relaying call: {0}'.format(message))
        return HttpResponse(json.dumps({'error_descirption': message,
                                        'error': 'relay_error'}),
                            content_type='application/json',
                            status=status_code)
Esempio n. 23
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._logger = LogHandler.get('celery', name='celery beat')
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = volatile_mutex('celery_beat', 10)
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     self._logger.debug('DS init')
Esempio n. 24
0
def _log(task, kwargs, storagedriver_id):
    """
    Log an event
    """
    metadata = {'storagedriver': StorageDriverList.get_by_storagedriver_id(storagedriver_id).guid}
    _logger = LogHandler.get('log', name='volumedriver_event')
    _logger.info('[{0}.{1}] - {2} - {3}'.format(
        task.__class__.__module__,
        task.__class__.__name__,
        json.dumps(kwargs),
        json.dumps(metadata)
    ))
Esempio n. 25
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._logger = LogHandler.get('celery', name='celery beat')
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = volatile_mutex('celery_beat', 10)
     self._schedule_info = {}
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     self._logger.debug('DS init')
Esempio n. 26
0
def _log(task, kwargs, storagedriver_id):
    """
    Log an event
    """
    metadata = {
        'storagedriver':
        StorageDriverList.get_by_storagedriver_id(storagedriver_id).guid
    }
    _logger = LogHandler.get('log', name='volumedriver_event')
    _logger.info('[{0}.{1}] - {2} - {3}'.format(task.__class__.__module__,
                                                task.__class__.__name__,
                                                json.dumps(kwargs),
                                                json.dumps(metadata)))
Esempio n. 27
0
def limit(amount, per, timeout):
    """
    Rate-limits the decorated call
    """
    logger = LogHandler.get('api')

    def wrap(f):
        """
        Wrapper function
        """
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
            client = VolatileFactory.get_client()
            with volatile_mutex(key):
                rate_info = client.get(key, {'calls': [], 'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning(
                            'Call {0} is being throttled with a wait of {1}'.
                            format(key, active_timeout - now))
                        raise Throttled(wait=active_timeout - now)
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [
                    call for call in rate_info['calls'] if call > (now - per)
                ] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning(
                        'Call {0} is being throttled with a wait of {1}'.
                        format(key, timeout))
                    raise Throttled(wait=timeout)
                client.set(key, rate_info)
            return f(*args, **kwargs)

        new_function.__name__ = f.__name__
        new_function.__module__ = f.__module__
        return new_function

    return wrap
    def __init__(self, host, login, passwd):
        """
        Initializes the SDK
        """
        self._logger = LogHandler.get('extensions', name='vmware sdk')
        self._host = host
        self._username = login
        self._password = passwd
        self._sessionID = None
        self._check_session = True

        self._cache = ObjectCache()
        self._cache.setduration(weeks=1)

        self._client = Client('https://{0}/sdk/vimService?wsdl'.format(host),
                              cache=self._cache,
                              cachingpolicy=1)
        self._client.set_options(location='https://{0}/sdk'.format(host),
                                 plugins=[ValueExtender()])

        service_reference = self._build_property('ServiceInstance')
        self._serviceContent = self._client.service.RetrieveServiceContent(
            service_reference
        )

        # In case of an ESXi host, this would be 'HostAgent'
        self.is_vcenter = self._serviceContent.about.apiType == 'VirtualCenter'
        if not self.is_vcenter:
            self._login()
            self._esxHost = self._get_object(
                self._serviceContent.rootFolder,
                prop_type='HostSystem',
                traversal={'name': 'FolderTraversalSpec',
                           'type': 'Folder',
                           'path': 'childEntity',
                           'traversal': {'name': 'DatacenterTraversalSpec',
                                         'type': 'Datacenter',
                                         'path': 'hostFolder',
                                         'traversal': {'name': 'DFolderTraversalSpec',
                                                       'type': 'Folder',
                                                       'path': 'childEntity',
                                                       'traversal': {'name': 'ComputeResourceTravelSpec',
                                                                     'type': 'ComputeResource',
                                                                     'path': 'host'}}}},
                properties=['name']
            ).obj_identifier
        else:
            # @TODO: We need to extend all calls to specify the ESXi host where the action needs to be executed.
            # We cannot just assume an ESXi host here, as this is important for certain calls like creating a VM.
            self._esxHost = None
Esempio n. 29
0
 def __init__(self):
     self._logger = LogHandler.get('extensions', name='exportfs')
     self._exports_file = '/etc/exports'
     self._cmd = ['/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs']
     self._restart = [
         '/usr/bin/sudo', '-u', 'root', '/usr/sbin/exportfs', '-ra'
     ]
     self._rpcmountd_stop = [
         '/usr/bin/sudo', '-u', 'root', 'pkill', 'rpc.mountd'
     ]
     self._rpcmountd_start = [
         '/usr/bin/sudo', '-u', 'root', '/usr/sbin/rpc.mountd',
         '--manage-gids'
     ]
Esempio n. 30
0
    def __init__(self, host, login, passwd):
        """
        Initializes the SDK
        """
        self._logger = LogHandler.get('extensions', name='vmware sdk')
        self._host = host
        self._username = login
        self._password = passwd
        self._sessionID = None
        self._check_session = True

        self._cache = ObjectCache()
        self._cache.setduration(weeks=1)

        self._client = Client('https://{0}/sdk/vimService?wsdl'.format(host),
                              cache=self._cache,
                              cachingpolicy=1)
        self._client.set_options(location='https://{0}/sdk'.format(host),
                                 plugins=[ValueExtender()])

        service_reference = self._build_property('ServiceInstance')
        self._serviceContent = self._client.service.RetrieveServiceContent(
            service_reference
        )

        # In case of an ESXi host, this would be 'HostAgent'
        self.is_vcenter = self._serviceContent.about.apiType == 'VirtualCenter'
        if not self.is_vcenter:
            self._login()
            self._esxHost = self._get_object(
                self._serviceContent.rootFolder,
                prop_type='HostSystem',
                traversal={'name': 'FolderTraversalSpec',
                           'type': 'Folder',
                           'path': 'childEntity',
                           'traversal': {'name': 'DatacenterTraversalSpec',
                                         'type': 'Datacenter',
                                         'path': 'hostFolder',
                                         'traversal': {'name': 'DFolderTraversalSpec',
                                                       'type': 'Folder',
                                                       'path': 'childEntity',
                                                       'traversal': {'name': 'ComputeResourceTravelSpec',
                                                                     'type': 'ComputeResource',
                                                                     'path': 'host'}}}},
                properties=['name']
            ).obj_identifier
        else:
            # @TODO: We need to extend all calls to specify the ESXi host where the action needs to be executed.
            # We cannot just assume an ESXi host here, as this is important for certain calls like creating a VM.
            self._esxHost = None
Esempio n. 31
0
class LicenseController(object):
    """
    Validates licenses
    """
    _logger = LogHandler.get('lib', name='license')

    @staticmethod
    @celery.task(name='ovs.license.validate')
    def validate(license_string):
        """
        Validates a license with the various components
        """
        try:
            result = {}
            data = LicenseController._decode(license_string)
            for component in data:
                cdata = data[component]
                name = cdata['name']
                data = cdata['data']
                _ = cdata['token']
                valid_until = float(cdata['valid_until']) if 'valid_until' in cdata else None
                if valid_until is not None and valid_until <= time.time():
                    result[component] = False
                    continue
                signature = cdata['signature'] if 'signature' in cdata else None
                validate_functions = Toolbox.fetch_hooks('license', '{0}.validate'.format(component))
                apply_functions = Toolbox.fetch_hooks('license', '{0}.apply'.format(component))
                if len(validate_functions) == 1 and len(apply_functions) == 1:
                    try:
                        valid, metadata = validate_functions[0](component=component, data=data, signature=signature)
                    except Exception, ex:
                        LicenseController._logger.debug('Error validating license for {0}: {1}'.format(component, ex))
                        valid = False
                        metadata = None
                    if valid is False:
                        LicenseController._logger.debug('Invalid license for {0}: {1}'.format(component, license_string))
                        result[component] = False
                    else:
                        result[component] = {'valid_until': valid_until,
                                             'metadata': metadata,
                                             'name': name}
                else:
                    LicenseController._logger.debug('No validate nor apply functions found for {0}'.format(component))
                    result[component] = False
            return result
        except Exception, ex:
            LicenseController._logger.exception('Error validating license: {0}'.format(ex))
            raise
Esempio n. 32
0
def limit(amount, per, timeout):
    """
    Rate-limits the decorated call
    """
    logger = LogHandler.get('api')

    def wrap(f):
        """
        Wrapper function
        """

        @wraps(f)
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__,
                request.META['HTTP_X_REAL_IP']
            )
            client = VolatileFactory.get_client()
            with volatile_mutex(key):
                rate_info = client.get(key, {'calls': [],
                                             'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now))
                        raise HttpTooManyRequestsException(error='rate_limit_timeout',
                                                           error_description='Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2)))
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout))
                    raise HttpTooManyRequestsException(error='rate_limit_reached',
                                                       error_description='Rate limit reached ({0} in last {1}s)'.format(calls, per))
                client.set(key, rate_info)
            return f(*args, **kwargs)

        return new_function
    return wrap
Esempio n. 33
0
 def __init__(self, name, wait=None):
     """
     Creates a file mutex object
     """
     self._logger = LogHandler.get('extensions', 'file mutex')
     self.name = name
     self._has_lock = False
     self._start = 0
     self._handle = open(self.key(), 'w')
     self._wait = wait
     try:
         os.chmod(
             self.key(), stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP
             | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH)
     except OSError:
         pass
Esempio n. 34
0
    def __init__(self, host='127.0.0.1', login='******'):
        self._logger = LogHandler.get('extensions', name='kvm sdk')
        self._logger.debug('Init libvirt')
        self.states = {libvirt.VIR_DOMAIN_NOSTATE: 'NO STATE',
                       libvirt.VIR_DOMAIN_RUNNING: 'RUNNING',
                       libvirt.VIR_DOMAIN_BLOCKED: 'BLOCKED',
                       libvirt.VIR_DOMAIN_PAUSED: 'PAUSED',
                       libvirt.VIR_DOMAIN_SHUTDOWN: 'SHUTDOWN',
                       libvirt.VIR_DOMAIN_SHUTOFF: 'TURNEDOFF',
                       libvirt.VIR_DOMAIN_CRASHED: 'CRASHED'}

        self.libvirt = libvirt
        self.host = host
        self.login = login
        self._conn = None
        self.ssh_client = SSHClient(self.host, username='******')
        self._logger.debug('Init complete')
Esempio n. 35
0
class MetadataServerClient(object):
    """
    Builds a MDSClient
    """
    _logger = LogHandler.get('extensions', name='storagedriver')
    storagerouterclient.Logger.setupLogging(
        LogHandler.load_path('storagerouterclient'))
    # noinspection PyArgumentList
    storagerouterclient.Logger.enableLogging()

    MDS_ROLE = type('MDSRole', (), {
        'MASTER': Role.Master,
        'SLAVE': Role.Slave
    })

    def __init__(self):
        """
        Dummy init method
        """
        pass

    @staticmethod
    def load(service):
        """
        Loads a MDSClient
        :param service: Service for which the MDSClient needs to be loaded
        """
        if service.storagerouter is None:
            raise ValueError(
                'MDS service {0} does not have a Storage Router linked to it'.
                format(service.name))

        key = service.guid
        if key not in mdsclient_service_cache:
            try:
                # noinspection PyArgumentList
                client = MDSClient(
                    MDSNodeConfig(address=str(service.storagerouter.ip),
                                  port=service.ports[0]))
                mdsclient_service_cache[key] = client
            except RuntimeError as ex:
                MetadataServerClient._logger.error(
                    'Error loading MDSClient on {0}: {1}'.format(
                        service.storagerouter.ip, ex))
                return None
        return mdsclient_service_cache[key]
Esempio n. 36
0
    def __init__(self, host='127.0.0.1', login='******'):
        self._logger = LogHandler.get('extensions', name='kvm sdk')
        self._logger.debug('Init libvirt')
        self.states = {libvirt.VIR_DOMAIN_NOSTATE: 'NO STATE',
                       libvirt.VIR_DOMAIN_RUNNING: 'RUNNING',
                       libvirt.VIR_DOMAIN_BLOCKED: 'BLOCKED',
                       libvirt.VIR_DOMAIN_PAUSED: 'PAUSED',
                       libvirt.VIR_DOMAIN_SHUTDOWN: 'SHUTDOWN',
                       libvirt.VIR_DOMAIN_SHUTOFF: 'TURNEDOFF',
                       libvirt.VIR_DOMAIN_CRASHED: 'CRASHED'}

        self.libvirt = libvirt
        self.host = host
        self.login = login
        self._conn = None
        self.ssh_client = SSHClient(self.host, username='******')
        self._logger.debug('Init complete')
Esempio n. 37
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)
            logging_start = time.time()

            method_args = list(args)[:]
            method_args = method_args[method_args.index(request) + 1:]

            # Log the call
            metadata = {
                'meta':
                dict((str(key), str(value))
                     for key, value in request.META.iteritems()),
                'request':
                dict((str(key), str(value))
                     for key, value in request.REQUEST.iteritems()),
                'cookies':
                dict((str(key), str(value))
                     for key, value in request.COOKIES.iteritems())
            }
            # Stripping password traces
            for mtype in metadata:
                for key in metadata[mtype]:
                    if 'password' in key:
                        metadata[mtype][key] = '**********************'
            _logger = LogHandler.get('log', name='api')
            _logger.info('[{0}.{1}] - {2} - {3} - {4} - {5}'.format(
                f.__module__, f.__name__,
                getattr(request, 'client').user_guid if hasattr(
                    request, 'client') else None, json.dumps(method_args),
                json.dumps(kwargs), json.dumps(metadata)))
            logging_duration = time.time() - logging_start

            # Call the function
            start = time.time()
            return_value = f(*args, **kwargs)
            duration = time.time() - start
            if duration > 5 and log_slow is True:
                logger.warning('API call {0}.{1} took {2}s'.format(
                    f.__module__, f.__name__, round(duration, 2)))
            if isinstance(return_value, OVSResponse):
                return_value.timings['logging'] = [logging_duration, 'Logging']
            return return_value
Esempio n. 38
0
    def __init__(self, config_type, vpool_guid, storagedriver_id):
        """
        Initializes the class
        """
        if config_type != 'storagedriver':
            raise RuntimeError('Invalid configuration type. Allowed: storagedriver')

        storagerouterclient.Logger.setupLogging(LogHandler.load_path('storagerouterclient'))
        # noinspection PyArgumentList
        storagerouterclient.Logger.enableLogging()

        self._logger = LogHandler.get('extensions', name='storagedriver')
        self.config_type = config_type
        self.configuration = {}
        self.key = '/ovs/vpools/{0}/hosts/{1}/config'.format(vpool_guid, storagedriver_id)
        self.remote_path = Configuration.get_configuration_path(self.key).strip('/')
        self.is_new = True
        self.dirty_entries = []
Esempio n. 39
0
class ServiceManager(object):
    """
    Factory class returning specialized classes
    """
    _logger = LogHandler.get('extensions', name='service-manager')
    ImplementationClass = None

    class MetaClass(type):
        """
        Metaclass
        """

        def __getattr__(cls, item):
            """
            Returns the appropriate class
            """
            _ = cls
            if ServiceManager.ImplementationClass is None:
                if os.environ.get('RUNNING_UNITTESTS') == 'True':
                    ServiceManager.ImplementationClass = Upstart
                else:
                    try:
                        init_info = check_output('cat /proc/1/comm', shell=True)
                        # All service classes used in below code should share the exact same interface!
                        if 'init' in init_info:
                            version_info = check_output('init --version', shell=True)
                            if 'upstart' in version_info:
                                ServiceManager.ImplementationClass = Upstart
                            else:
                                raise RuntimeError('The ServiceManager is unrecognizable')
                        elif 'systemd' in init_info:
                            ServiceManager.ImplementationClass = Systemd
                        else:
                            raise RuntimeError('There was no known ServiceManager detected')
                    except Exception as ex:
                        ServiceManager._logger.exception('Error loading ServiceManager: {0}'.format(ex))
                        raise
            return getattr(ServiceManager.ImplementationClass, item)

    __metaclass__ = MetaClass

    @staticmethod
    def reload():
        ServiceManager.ImplementationClass = None
Esempio n. 40
0
    def __init__(self, cinder_client):
        self._logger = LogHandler.get('extensions', name='openstack_mgmt')
        self.client = SSHClient('127.0.0.1', username='******')
        self.cinder_client = cinder_client

        self._NOVA_CONF = '/etc/nova/nova.conf'
        self._CINDER_CONF = '/etc/cinder/cinder.conf'
        self._is_openstack = ServiceManager.has_service(
            OSManager.get_openstack_cinder_service_name(), self.client)
        self._nova_installed = self.client.file_exists(self._NOVA_CONF)
        self._cinder_installed = self.client.file_exists(self._CINDER_CONF)
        self._driver_location = OSManager.get_openstack_package_base_path()
        self._openstack_users = OSManager.get_openstack_users()
        self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py'

        try:
            self._is_devstack = 'stack' in str(
                self.client.run(
                    'ps aux | grep SCREEN | grep stack | grep -v grep || true')
            )
        except SystemExit:  # ssh client raises system exit 1
            self._is_devstack = False
        except Exception:
            self._is_devstack = False

        try:
            from cinder import version
            version_string = version.version_string()
            if version_string.startswith('9.0'):
                self._stack_version = 'newton'
            elif version_string.startswith('8.0'):
                self._stack_version = 'mitaka'
            elif version_string.startswith(
                    '2015.2') or version_string.startswith('7.0'):
                self._stack_version = 'liberty'
            elif version_string.startswith('2015.1'):
                self._stack_version = 'kilo'
            elif version_string.startswith('2014.2'):
                self._stack_version = 'juno'
            else:
                raise ValueError(
                    'Unsupported cinder version: {0}'.format(version_string))
        except Exception as ex:
            raise ValueError('Cannot determine cinder version: {0}'.format(ex))
Esempio n. 41
0
 def __init__(self, name, wait=None):
     """
     Creates a file mutex object
     """
     self._logger = LogHandler.get('extensions', 'file mutex')
     self.name = name
     self._has_lock = False
     self._start = 0
     self._handle = open(self.key(), 'w')
     self._wait = wait
     try:
         os.chmod(
             self.key(),
             stat.S_IRUSR | stat.S_IWUSR |
             stat.S_IRGRP | stat.S_IWGRP |
             stat.S_IROTH | stat.S_IWOTH
         )
     except OSError:
         pass
Esempio n. 42
0
class Schedule(object):
    """
    This decorator adds a schedule to a function. All arguments are these from celery's "crontab" class
    """
    _logger = LogHandler.get('lib', name='scheduler')

    def __init__(self, **kwargs):
        self.kwargs = kwargs

    def generate_schedule(self, name):
        """
        Generate a schedule
        :param name: Name to generate
        :return: Crontab and additional information about scheduling
        """
        try:
            schedules = Configuration.get('/ovs/framework/scheduling/celery',
                                          default={})
        except Exception as ex:
            Schedule._logger.error(
                'Error loading celery scheduling configuration for {0}: {1}'.
                format(name, ex))
            schedules = {}
        if name in schedules:
            schedule = schedules[name]
            if schedule is None:
                return None, 'disabled by configuration'
            source = 'scheduled from configuration'
        else:
            schedule = self.kwargs
            source = 'scheduled from code'
        try:
            return crontab(**schedule), '{0}: {1}'.format(
                source, ', '.join([
                    '{0}="{1}"'.format(key, value)
                    for key, value in schedule.iteritems()
                ]))
        except Exception as ex:
            Schedule._logger.error(
                'Could not generate crontab for {0} with data {1} {2}: {3}'.
                format(name, schedule, source, ex))
            return None, 'error in configuration'
Esempio n. 43
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            :param args: Arguments without default values
            :param kwargs: Arguments with default values
            """
            # Log the call
            if event_type == 'VOLUMEDRIVER_TASK' and 'storagedriver_id' in kwargs:
                metadata = {'storagedriver': StorageDriverList.get_by_storagedriver_id(kwargs['storagedriver_id']).guid}
            else:
                metadata = {}
            _logger = LogHandler.get('log', name=event_type.lower())
            _logger.info('[{0}.{1}] - {2} - {3} - {4}'.format(
                function.__module__,
                function.__name__,
                json.dumps(list(args)),
                json.dumps(kwargs),
                json.dumps(metadata)
            ))

            # Call the function
            return function(*args, **kwargs)
Esempio n. 44
0
 def process_exception(self, request, exception):
     """
     Logs information about the given error
     """
     _ = self, request
     logger = LogHandler.get('api', 'middleware')
     if OVSMiddleware.is_own_httpexception(exception):
         return HttpResponse(exception.data,
                             status=exception.status_code,
                             content_type='application/json')
     if isinstance(exception, MissingMandatoryFieldsException):
         return HttpResponse(json.dumps({'error': 'invalid_data',
                                         'error_description': exception.message}),
                             status=400,
                             content_type='application/json')
     logger.exception('An unhandled exception occurred: {0}'.format(exception))
     return HttpResponse(
         json.dumps({'error': 'internal_server',
                     'error_description': exception.message}),
         status=500,
         content_type='application/json'
     )
Esempio n. 45
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

'''Utility functions'''

import __builtin__
import uuid
import functools
import itertools
from ovs.log.log_handler import LogHandler

LOGGER = LogHandler.get('arakoon_client', 'pyrakoon')
'''Logger for code in this module''' #pylint: disable=W0105


def update_argspec(*argnames): #pylint: disable=R0912
    '''Wrap a callable to use real argument names

    When generating functions at runtime, one often needs to fall back to
    ``*args`` and ``**kwargs`` usage. Using these features require
    well-documented code though, and renders API documentation tools less
    useful.

    The decorator generated by this function wraps a decorated function,
    which takes ``**kwargs``, into a function which takes the given argument
    names as parameters, and passes them to the decorated function as keyword
    arguments.
Esempio n. 46
0
def process(queue, body, mapping):
    """
    Processes the actual received body
    :param queue:   Type of queue to be used
    :param body:    Body of the message
    :param mapping:
    """
    logger = LogHandler.get('extensions', name='processor')
    if queue == Configuration.get('/ovs/framework/messagequeue|queues.storagedriver'):
        cache = VolatileFactory.get_client()
        all_extensions = []

        message = FileSystemEvents.EventMessage()
        message.ParseFromString(body)

        # Possible special tags used as `arguments` key:
        # - [NODE_ID]: Replaced by the storagedriver_id as reported by the event
        # - [CLUSTER_ID]: Replaced by the clusterid as reported by the event
        # Possible dedupe key tags:
        # - [EVENT_NAME]: The name of the event message type
        # - [TASK_NAME]: Task method name
        # - [<argument value>]: Any value of the `arguments` dictionary.

        logger.info('Got event, processing...')
        event = None
        for extension in mapping.keys():
            if not message.event.HasExtension(extension):
                continue
            event = message.event.Extensions[extension]
            node_id = message.node_id
            cluster_id = message.cluster_id
            for current_map in mapping[extension]:
                task = current_map['task']
                kwargs = {}
                delay = 0
                routing_key = 'generic'
                for field, target in current_map['arguments'].iteritems():
                    if field == '[NODE_ID]':
                        kwargs[target] = node_id
                    elif field == '[CLUSTER_ID]':
                        kwargs[target] = cluster_id
                    else:
                        kwargs[target] = getattr(event, field)
                if 'options' in current_map:
                    options = current_map['options']
                    if options.get('execonstoragerouter', False):
                        storagedriver = StorageDriverList.get_by_storagedriver_id(node_id)
                        if storagedriver is not None:
                            routing_key = 'sr.{0}'.format(storagedriver.storagerouter.machine_id)
                    delay = options.get('delay', 0)
                    dedupe = options.get('dedupe', False)
                    dedupe_key = options.get('dedupe_key', None)
                    if dedupe is True and dedupe_key is not None:  # We can't dedupe without a key
                        key = 'ovs_dedupe_volumedriver_events_{0}'.format(dedupe_key)
                        key = key.replace('[EVENT_NAME]', extension.full_name)
                        key = key.replace('[TASK_NAME]', task.__class__.__name__)
                        for kwarg_key in kwargs:
                            key = key.replace('[{0}]'.format(kwarg_key), str(kwargs[kwarg_key]))
                        key = key.replace(' ', '_')
                        task_id = cache.get(key)
                        if task_id:
                            # Key exists, task was already scheduled
                            # If task is already running, the revoke message will
                            # be ignored
                            revoke(task_id)
                        _log(task, kwargs, node_id)
                        async_result = task.s(**kwargs).apply_async(
                            countdown=delay,
                            routing_key=routing_key
                        )
                        cache.set(key, async_result.id, 600)  # Store the task id
                        new_task_id = async_result.id
                    else:
                        _log(task, kwargs, node_id)
                        async_result = task.s(**kwargs).apply_async(
                            countdown=delay,
                            routing_key=routing_key
                        )
                        new_task_id = async_result.id
                else:
                    async_result = task.delay(**kwargs)
                    new_task_id = async_result.id
                logger.info('[{0}] {1}({2}) started on {3} with taskid {4}. Delay: {5}s'.format(
                    queue,
                    task.__name__,
                    json.dumps(kwargs),
                    routing_key,
                    new_task_id,
                    delay
                ))
        if event is None:
            message_type = 'unknown'
            if len(all_extensions) == 0:
                all_extensions = _load_extensions()
            for extension in all_extensions:
                if message.event.HasExtension(extension):
                    message_type = extension.full_name
            logger.info('A message with type {0} was received. Skipped.'.format(message_type))
    elif queue == 'notifications.info':
        logger.info('Received notification from openstack...')
        try:
            body = json.loads(body)
            print(body)
            event_type = body['event_type']
            logger.info('Processing notification for event {0}'.format(event_type))
            if event_type == 'volume.update.start':
                volume_id = body['payload']['volume_id']
                display_name = body['payload']['display_name']
                CINDER_VOLUME_UPDATE_CACHE[volume_id] = display_name
            # elif event_type == 'volume.update.end':
            #     volume_id = body['payload']['volume_id']
            #     display_name = body['payload']['display_name']
            #     old_display_name = CINDER_VOLUME_UPDATE_CACHE.get(volume_id)
            #     if old_display_name and old_display_name != display_name:
            #         logger.info('Caught volume rename event')
            #         VDiskController.update_vdisk_name.apply_async(kwargs={'volume_id': volume_id, 'old_name': old_display_name, 'new_name': display_name})
            #         del CINDER_VOLUME_UPDATE_CACHE[volume_id]
        except Exception as ex:
            logger.error('Processing notification failed {0}'.format(ex))
        logger.info('Processed notification from openstack.')
    else:
        raise NotImplementedError('Queue {0} is not yet implemented'.format(queue))
Esempio n. 47
0
def load_ovs_logger(**kwargs):
    """Load a logger."""
    if 'logger' in kwargs:
        kwargs['logger'] = LogHandler.get('celery', name='celery')
Esempio n. 48
0
 def post(self, request, *args, **kwargs):
     """
     Handles token post
     """
     logger = LogHandler.get('api', 'oauth2')
     _ = args, kwargs
     if 'grant_type' not in request.POST:
         raise HttpBadRequestException(error='invalid_request',
                                       error_description='No grant type specified')
     grant_type = request.POST['grant_type']
     scopes = None
     if 'scope' in request.POST:
         scopes = RoleList.get_roles_by_codes(request.POST['scope'].split(' '))
     if grant_type == 'password':
         # Resource Owner Password Credentials Grant
         if 'username' not in request.POST or 'password' not in request.POST:
             raise HttpBadRequestException(error='invalid_request',
                                           error_description='Invalid request')
         username = request.POST['username']
         password = request.POST['password']
         user = UserList.get_user_by_username(username)
         if user is None or user.password != hashlib.sha256(password).hexdigest():
             raise HttpBadRequestException(error='invalid_client',
                                           error_description='Invalid client')
         if user.is_active is False:
             raise HttpBadRequestException(error='inactive_user',
                                           error_description='User is inactive')
         clients = [client for client in user.clients if client.ovs_type == 'INTERNAL' and client.grant_type == 'PASSWORD']
         if len(clients) != 1:
             raise HttpBadRequestException(error='unauthorized_client',
                                           error_description='Client is unautorized')
         client = clients[0]
         try:
             access_token, _ = Toolbox.generate_tokens(client, generate_access=True, scopes=scopes)
             access_token.expiration = int(time.time() + 86400)
             access_token.save()
         except ValueError as error:
             if error.message == 'invalid_scope':
                 raise HttpBadRequestException(error='invalid_scope',
                                               error_description='Invalid scope requested')
             raise
         Toolbox.clean_tokens(client)
         return HttpResponse(json.dumps({'access_token': access_token.access_token,
                                         'token_type': 'bearer',
                                         'expires_in': 86400}),
                             content_type='application/json')
     elif grant_type == 'client_credentials':
         # Client Credentials
         if 'HTTP_AUTHORIZATION' not in request.META:
             raise HttpBadRequestException(error='missing_header',
                                           error_description='Authorization header missing')
         _, password_hash = request.META['HTTP_AUTHORIZATION'].split(' ')
         client_id, client_secret = base64.b64decode(password_hash).split(':', 1)
         try:
             client = Client(client_id)
             if client.grant_type != 'CLIENT_CREDENTIALS':
                 raise HttpBadRequestException(error='invalid_grant',
                                               error_description='The given grant type is not supported')
             if client.client_secret != client_secret:
                 raise HttpBadRequestException(error='invalid_client',
                                               error_description='Invalid client')
             if not client.user.is_active:
                 raise HttpBadRequestException(error='inactive_user',
                                               error_description='User is inactive')
             try:
                 access_token, _ = Toolbox.generate_tokens(client, generate_access=True, scopes=scopes)
             except ValueError as error:
                 if error.message == 'invalid_scope':
                     raise HttpBadRequestException(error='invalid_scope',
                                                   error_description='Invalid scope requested')
                 raise
             try:
                 Toolbox.clean_tokens(client)
             except Exception as error:
                 logger.error('Error during session cleanup: {0}'.format(error))
             return HttpResponse(json.dumps({'access_token': access_token.access_token,
                                             'token_type': 'bearer',
                                             'expires_in': 3600}),
                                 content_type='application/json')
         except HttpBadRequestException:
             raise
         except ObjectNotFoundException as ex:
             logger.warning('Error matching client: {0}'.format(ex))
             raise HttpBadRequestException(error='invalid_client',
                                           error_description='Client could not be found')
         except Exception as ex:
             logger.exception('Error matching client: {0}'.format(ex))
             raise HttpBadRequestException(error='invalid_client',
                                           error_description='Error loading client')
     else:
         raise HttpBadRequestException(error='unsupported_grant_type',
                                       error_description='Unsupported grant type')
Esempio n. 49
0
    import argparse

    parser = argparse.ArgumentParser(
        description="Rabbitmq Event Processor for OVS", formatter_class=argparse.RawDescriptionHelpFormatter
    )
    parser.add_argument("rabbitmq_queue", type=str, help="Rabbitmq queue name")
    parser.add_argument(
        "--durable",
        dest="queue_durable",
        action="store_const",
        default=False,
        const=True,
        help="Declare queue as durable",
    )

    logger = LogHandler.get("extensions", name="consumer")

    args = parser.parse_args()
    try:
        run_event_consumer = False
        my_ip = Configuration.get("/ovs/framework/hosts/{0}/ip".format(System.get_my_machine_id()))
        for endpoint in Configuration.get("/ovs/framework/messagequeue|endpoints"):
            if endpoint.startswith(my_ip):
                run_event_consumer = True

        if run_event_consumer is True:
            # Load mapping
            mapping = {}
            path = "/".join([os.path.dirname(__file__), "mappings"])
            for filename in os.listdir(path):
                if os.path.isfile("/".join([path, filename])) and filename.endswith(".py"):
Esempio n. 50
0
 def __init__(self):
     """
     Init
     """
     self._logger = LogHandler.get('extensions', name='fstab')
     self.fstab_file = '/etc/fstab'
Esempio n. 51
0
 def __init__(self):
     """
     Dummy init method
     """
     self._logger = LogHandler.get('extensions', name='watcher')
Esempio n. 52
0
def ensure_single(task_name, extra_task_names=None, mode='DEFAULT', global_timeout=300):
    """
    Decorator ensuring a new task cannot be started in case a certain task is
    running, scheduled or reserved.

    Keep also in mind that validation will be executed by the worker itself, so if the task is scheduled on
    a worker currently processing a "duplicate" task, it will only get validated after the first
    one completes, which will result in the fact that the task will execute normally.

    Allowed modes:
     - DEFAULT: De-duplication based on the task's name. If any new task with the same name is scheduled it will be
                discarded
     - DEDUPED: De-duplication based on the task's name and arguments. If a new task with the same name and arguments
                is scheduled while the first one is currently being executed, it will be allowed on the queue (to make
                sure there will be at least one new execution). All subsequent identical tasks will be discarded.
                Tasks with different arguments will be executed in parallel
     - CHAINED: Identical as DEDUPED with the exception that all tasks will be executed in serial.

    :param task_name:        Name of the task to ensure its singularity
    :type task_name:         String

    :param extra_task_names: Extra tasks to take into account
    :type extra_task_names:  List

    :param mode:             Mode of the ensure single. Allowed values: DEFAULT, CHAINED
    :type mode:              String

    :param global_timeout:   Timeout before raising error (Only applicable in CHAINED mode)
    :type global_timeout:    Integer

    :return:                 Pointer to function
    """
    logger = LogHandler.get('lib', name='ensure single')

    def wrap(function):
        """
        Wrapper function
        :param function: Function to check
        """
        def new_function(*args, **kwargs):
            """
            Wrapped function
            :param args: Arguments without default values
            :param kwargs: Arguments with default values
            """
            def log_message(message, level='info'):
                """
                Log a message with some additional information
                :param message: Message to log
                :param level:   Log level
                :return:        None
                """
                if level not in ('info', 'warning', 'debug', 'error', 'exception'):
                    raise ValueError('Unsupported log level "{0}" specified'.format(level))
                complete_message = 'Ensure single {0} mode - ID {1} - {2}'.format(mode, now, message)
                getattr(logger, level)(complete_message)

            def update_value(key, append, value_to_update=None):
                """
                Store the specified value in the PersistentFactory
                :param key:             Key to store the value for
                :param append:          If True, the specified value will be appended else element at index 0 will be popped
                :param value_to_update: Value to append to the list or remove from the list
                :return:                Updated value
                """
                with volatile_mutex(name=key, wait=5):
                    if persistent_client.exists(key):
                        val = persistent_client.get(key)
                        if append is True and value_to_update is not None:
                            val['values'].append(value_to_update)
                        elif append is False and value_to_update is not None:
                            for value_item in val['values']:
                                if value_item == value_to_update:
                                    val['values'].remove(value_item)
                                    break
                        elif append is False and len(val['values']) > 0:
                            val['values'].pop(0)
                        log_message('Amount of jobs pending for key {0}: {1}'.format(key, len(val['values'])))
                        for kwarg in val['values']:
                            log_message('  KWARGS: {0}'.format(kwarg['kwargs']))
                    else:
                        log_message('Setting initial value for key {0}'.format(key))
                        val = {'mode': mode,
                               'values': []}
                    persistent_client.set(key, val)
                return val

            now = '{0}_{1}'.format(int(time.time()), ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10)))
            task_names = [task_name] if extra_task_names is None else [task_name] + extra_task_names
            persistent_key = '{0}_{1}'.format(ENSURE_SINGLE_KEY, task_name)
            persistent_client = PersistentFactory.get_client()

            if mode == 'DEFAULT':
                with volatile_mutex(persistent_key, wait=5):
                    for task in task_names:
                        key_to_check = '{0}_{1}'.format(ENSURE_SINGLE_KEY, task)
                        if persistent_client.exists(key_to_check):
                            log_message('Execution of task {0} discarded'.format(task_name))
                            return None
                    log_message('Setting key {0}'.format(persistent_key))
                    persistent_client.set(persistent_key, {'mode': mode})

                try:
                    output = function(*args, **kwargs)
                    log_message('Task {0} finished successfully'.format(task_name))
                    return output
                finally:
                    with volatile_mutex(persistent_key, wait=5):
                        if persistent_client.exists(persistent_key):
                            log_message('Deleting key {0}'.format(persistent_key))
                            persistent_client.delete(persistent_key)
            elif mode == 'DEDUPED':
                with volatile_mutex(persistent_key, wait=5):
                    if extra_task_names is not None:
                        for task in extra_task_names:
                            key_to_check = '{0}_{1}'.format(ENSURE_SINGLE_KEY, task)
                            if persistent_client.exists(key_to_check):
                                log_message('Execution of task {0} discarded'.format(task_name))
                                return None
                    log_message('Setting key {0}'.format(persistent_key))

                # Update kwargs with args
                timeout = kwargs.pop('ensure_single_timeout') if 'ensure_single_timeout' in kwargs else global_timeout
                function_info = inspect.getargspec(function)
                kwargs_dict = {}
                for index, arg in enumerate(args):
                    kwargs_dict[function_info.args[index]] = arg
                kwargs_dict.update(kwargs)
                params_info = 'with params {0}'.format(kwargs_dict) if kwargs_dict else 'with default params'

                # Set the key in arakoon if non-existent
                value = update_value(key=persistent_key,
                                     append=True)

                # Validate whether another job with same params is being executed
                job_counter = 0
                for item in value['values']:
                    if item['kwargs'] == kwargs_dict:
                        job_counter += 1
                        if job_counter == 2:  # 1st job with same params is being executed, 2nd is scheduled for execution ==> Discard current
                            log_message('Execution of task {0} {1} discarded because of identical parameters'.format(task_name, params_info))
                            return None
                log_message('New task {0} {1} scheduled for execution'.format(task_name, params_info))
                update_value(key=persistent_key,
                             append=True,
                             value_to_update={'kwargs': kwargs_dict})

                # Poll the arakoon to see whether this call is the only in list, if so --> execute, else wait
                counter = 0
                while counter < timeout:
                    if persistent_client.exists(persistent_key):
                        values = persistent_client.get(persistent_key)['values']
                        queued_jobs = [v for v in values if v['kwargs'] == kwargs_dict]
                        if len(queued_jobs) == 1:
                            try:
                                if counter != 0:
                                    current_time = int(time.time())
                                    starting_time = int(now.split('_')[0])
                                    log_message('Task {0} {1} had to wait {2} seconds before being able to start'.format(task_name,
                                                                                                                         params_info,
                                                                                                                         current_time - starting_time))
                                output = function(*args, **kwargs)
                                log_message('Task {0} finished successfully'.format(task_name))
                                return output
                            finally:
                                update_value(key=persistent_key,
                                             append=False,
                                             value_to_update={'kwargs': kwargs_dict})
                        counter += 1
                        time.sleep(1)
                        if counter == timeout:
                            update_value(key=persistent_key,
                                         append=False,
                                         value_to_update={'kwargs': kwargs_dict})
                            log_message('Could not start task {0} {1}, within expected time ({2}s). Removed it from queue'.format(task_name, params_info, timeout),
                                        level='error')
                            raise EnsureSingleTimeoutReached('Ensure single {0} mode - ID {1} - Task {2} could not be started within timeout of {3}s'.format(mode,
                                                                                                                                                             now,
                                                                                                                                                             task_name,
                                                                                                                                                             timeout))
            elif mode == 'CHAINED':
                if extra_task_names is not None:
                    log_message('Extra tasks are not allowed in this mode',
                                level='error')
                    raise ValueError('Ensure single {0} mode - ID {1} - Extra tasks are not allowed in this mode'.format(mode, now))

                # Create key to be stored in arakoon and update kwargs with args
                timeout = kwargs.pop('ensure_single_timeout') if 'ensure_single_timeout' in kwargs else global_timeout
                function_info = inspect.getargspec(function)
                kwargs_dict = {}
                for index, arg in enumerate(args):
                    kwargs_dict[function_info.args[index]] = arg
                kwargs_dict.update(kwargs)
                params_info = 'with params {0}'.format(kwargs_dict) if kwargs_dict else 'with default params'

                # Set the key in arakoon if non-existent
                value = update_value(key=persistent_key,
                                     append=True)

                # Validate whether another job with same params is being executed, skip if so
                for item in value['values'][1:]:  # 1st element is processing job, we check all other queued jobs for identical params
                    if item['kwargs'] == kwargs_dict:
                        log_message('Execution of task {0} {1} discarded because of identical parameters'.format(task_name, params_info))
                        return None
                log_message('New task {0} {1} scheduled for execution'.format(task_name, params_info))
                update_value(key=persistent_key,
                             append=True,
                             value_to_update={'kwargs': kwargs_dict,
                                              'timestamp': now})

                # Poll the arakoon to see whether this call is the first in list, if so --> execute, else wait
                first_element = None
                counter = 0
                while counter < timeout:
                    if persistent_client.exists(persistent_key):
                        value = persistent_client.get(persistent_key)
                        first_element = value['values'][0]['timestamp'] if len(value['values']) > 0 else None

                    if first_element == now:
                        output = None
                        try:
                            if counter != 0:
                                current_time = int(time.time())
                                starting_time = int(now.split('_')[0])
                                log_message('Task {0} {1} had to wait {2} seconds before being able to start'.format(task_name,
                                                                                                                     params_info,
                                                                                                                     current_time - starting_time))
                            output = function(*args, **kwargs)
                            log_message('Task {0} finished successfully'.format(task_name))
                        except Exception:
                            log_message('Task {0} {1} failed'.format(task_name, params_info), level='exception')
                            raise
                        finally:
                            update_value(key=persistent_key,
                                         append=False)
                        return output
                    counter += 1
                    time.sleep(1)
                    if counter == timeout:
                        update_value(key=persistent_key,
                                     append=False)
                        log_message('Could not start task {0} {1}, within expected time ({2}s). Removed it from queue'.format(task_name, params_info, timeout),
                                    level='error')
                        raise EnsureSingleTimeoutReached('Ensure single {0} mode - ID {1} - Task {2} could not be started within timeout of {3}s'.format(mode,
                                                                                                                                                         now,
                                                                                                                                                         task_name,
                                                                                                                                                         timeout))
            else:
                raise ValueError('Unsupported mode "{0}" provided'.format(mode))

        new_function.__name__ = function.__name__
        new_function.__module__ = function.__module__
        return new_function
    return wrap
Esempio n. 53
0
# C0111: Missing docstring
# W0142: Used * or ** magic
# R0912: Too many branches
# C0103: Invalid name
# W0212: Access to a protected member
# R0913: Too many arguments
# W0201: Attributed defined outside __init__
# W0231: __init__ method from base class X is not called
# R0903: Too few public methods
# W0223: Method X is abstract in class Y but not overridden
# R0201: Method could be a function
# W0703: Catch "Exception"
# E1121: Too many positional arguments for function call
# R0904: Too many public methods

LOGGER = LogHandler.get("arakoon_client", "pyrakoon")


class Consistency:
    pass


class Consistent(Consistency):
    def __str__(self):
        return "Consistent"


class NoGuarantee(Consistency):
    def __str__(self):
        return "NoGuarantee"
Esempio n. 54
0
            try:
                for task in return_data['tasks']:
                    self._process_task(task['code'], task['metadata'], self.servicemanager)
            except Exception, ex:
                SupportAgent._logger.exception('Unexpected error processing tasks: {0}'.format(ex))
                raise
        if 'interval' in return_data:
            interval = return_data['interval']
            if interval != self.interval:
                self.interval = interval
                self._update_config('interval', str(interval))
            self.interval = return_data['interval']


if __name__ == '__main__':
    logger = LogHandler.get('support', name='agent')
    try:
        if Configuration.get('/ovs/framework/support|enabled') is False:
            print 'Support not enabled'
            sys.exit(0)
        logger.info('Starting up')
        client = SupportAgent()
        while True:
            try:
                client.run()
                time.sleep(client.interval)
            except KeyboardInterrupt:
                raise
            except Exception, exception:
                logger.exception('Unexpected error during run: {0}'.format(exception))
                time.sleep(10)
 def __init__(self, node):
     self._logger = LogHandler.get("extensions", name="asdmanagerclient")
     self.node = node
     self.timeout = 20
     self._unittest_mode = os.environ.get("RUNNING_UNITTESTS") == "True"
     self._log_min_duration = 1
    def run(command, config=None, named_params=None, extra_params=None, client=None, debug=False):
        """
        Executes a command on ALBA
        When --to-json is NOT passed:
            * An error occurs --> exitcode != 0
            * It worked --> exitcode == 0

        When --to-json is passed:
            * An errors occurs during verification of parameters passed  -> exitcode != 0
            * An error occurs while executing the command --> exitcode == 0 (error in json output)
            * It worked --> exitcode == 0

        :param command: The command to execute, eg: 'list-namespaces'
        :type command: str
        :param config: The configuration location to be used, eg: 'arakoon://config/ovs/arakoon/ovsdb/config?ini=%2Fopt%2FOpenvStorage%2Fconfig%2Farakoon_cacc.ini'
        :type config: str
        :param named_params: Additional parameters to be given to the command, eg: {'long-id': ','.join(asd_ids)}
        :type named_params: dict
        :param extra_params: Additional parameters to be given to the command, eg: [name]
        :type extra_params: list
        :param client: A client on which to execute the command
        :type client: ovs.extensions.generic.sshclient.SSHClient
        :param debug: Log additional output
        :type debug: bool
        :return: The output of the command
        :rtype: dict
        """
        if named_params is None:
            named_params = {}
        if extra_params is None:
            extra_params = []

        logger = LogHandler.get('extensions', name='alba-cli')
        if os.environ.get('RUNNING_UNITTESTS') == 'True':
            # For the unittest, all commands are passed to a mocked Alba
            from ovs.extensions.plugins.tests.alba_mockups import VirtualAlbaBackend
            named_params.update({'config': config})
            named_params.update({'extra_params': extra_params})
            return getattr(VirtualAlbaBackend, command.replace('-', '_'))(**named_params)

        debug_log = []
        try:
            cmd_list = ['/usr/bin/alba', command, '--to-json']
            if config is not None:
                cmd_list.append('--config={0}'.format(config))
            for key, value in named_params.iteritems():
                cmd_list.append('--{0}={1}'.format(key, value))
            cmd_list.extend(extra_params)
            cmd_string = ' '.join(cmd_list)
            debug_log.append('Command: {0}'.format(cmd_string))

            start = time.time()
            try:
                if client is None:
                    try:
                        if not hasattr(select, 'poll'):
                            import subprocess
                            subprocess._has_poll = False  # Damn 'monkey patching'
                        channel = Popen(cmd_list, stdout=PIPE, stderr=PIPE, universal_newlines=True)
                    except OSError as ose:
                        raise CalledProcessError(1, cmd_string, str(ose))
                    output, stderr = channel.communicate()
                    output = re.sub(r'[^\x00-\x7F]+', '', output)
                    stderr_debug = 'stderr: {0}'.format(stderr)
                    stdout_debug = 'stdout: {0}'.format(output)
                    if debug is True:
                        logger.debug(stderr_debug)
                        logger.debug(stdout_debug)
                    debug_log.append(stderr_debug)
                    debug_log.append(stdout_debug)
                    exit_code = channel.returncode
                    if exit_code != 0:  # Raise same error as check_output
                        raise CalledProcessError(exit_code, cmd_string, output)
                else:
                    if debug is True:
                        output, stderr = client.run(cmd_list, return_stderr=True)
                        debug_log.append('stderr: {0}'.format(stderr))
                    else:
                        output = client.run(cmd_list).strip()
                    debug_log.append('stdout: {0}'.format(output))

                output = json.loads(output)
                duration = time.time() - start
                if duration > 0.5:
                    logger.warning('AlbaCLI call {0} took {1}s'.format(command, round(duration, 2)))
            except CalledProcessError as cpe:
                try:
                    output = json.loads(cpe.output)
                except Exception:
                    raise RuntimeError('Executing command {0} failed with output {1}'.format(cmd_string, cpe.output))

            if output['success'] is True:
                return output['result']
            raise RuntimeError(output['error']['message'])

        except Exception as ex:
            logger.exception('Error: {0}'.format(ex))
            # In case there's an exception, we always log
            for debug_line in debug_log:
                logger.debug(debug_line)
            raise