Esempio n. 1
0
 def test_ensure_iterable(self):
     """Test the ensure_iterable function.
     """
     self.assertEqual(ensure_iterable(2), [2])
     self.assertEqual(ensure_iterable("2"), ["2"])
     self.assertEqual(ensure_iterable([2]), [2])
     self.assertEqual(ensure_iterable([2], iterable=set), {2})
Esempio n. 2
0
    def register_middleware(cls, protocol=None, data_type=None):
        """Register a middleware class with input protocol name and data_type.

        :param protocol: one or many protocols to add to protocols of input cls
        :type protocol: str or Iterable(protocol)

        :param data_type: one or many data_types to add to data_types of input
            cls.
        :type data_type: str or Iterable(data_type)
        """

        protocols = cls.get_protocols()
        data_types = ensure_iterable(cls.__datatype__, iterable=set)

        if protocol is not None:
            protocol = ensure_iterable(protocol, iterable=set)
            protocols |= protocol

        if data_type is not None:
            data_type = ensure_iterable(data_type, iterable=set)
            data_types |= data_type

        for protocol in protocols:
            _data_types = Middleware.__MIDDLEWARES__.setdefault(protocol, {})

            for datatype in data_types:
                _data_types[datatype] = cls
Esempio n. 3
0
 def test_ensure_iterable(self):
     """Test the ensure_iterable function.
     """
     self.assertEqual(ensure_iterable(2), [2])
     self.assertEqual(ensure_iterable("2"), ["2"])
     self.assertEqual(ensure_iterable([2]), [2])
     self.assertEqual(ensure_iterable([2], iterable=set), {2})
Esempio n. 4
0
def response(data, adapt=True):
    """Construct a REST response from input data.

    :param data: data to convert into a REST response.
    :param kwargs: service function parameters.
    :param bool adapt: adapt Canopsis data to Ember (default: True)
    """

    # calculate result_data and total related to data type
    if isinstance(data, tuple):
        result_data = ensure_iterable(data[0])
        total = data[1]

    else:
        result_data = None if data is None else ensure_iterable(data)
        total = 0 if result_data is None else len(result_data)

    if adapt:
        # apply transformation for client
        adapt_canopsis_data_to_ember(result_data)

    result = {
        'total': total,
        'data': result_data,
        'success': True
    }

    return result
Esempio n. 5
0
def response(data, adapt=True, success=True):
    """Construct a REST response from input data.

    :param data: data to convert into a REST response.
    :param bool adapt: adapt Canopsis data to Ember (default: True)
    :param bool success: is the response a success (default: True)
    :rtype: dict
    """

    # calculate result_data and total related to data type
    if isinstance(data, tuple):
        result_data = ensure_iterable(data[0])
        total = data[1]

    else:
        result_data = None if data is None else ensure_iterable(data)
        total = 0 if result_data is None else len(result_data)

    if adapt:
        adapt_canopsis_data_to_ember(result_data)

    result = {'total': total, 'data': result_data, 'success': success}

    headers = {
        'Cache-Control': 'no-cache, no-store, must-revalidate',
        'Pragma': 'no-cache',
        'Expires': 0
    }

    for hname in headers:
        BottleResponse.set_header(hname, headers[hname])

    return result
Esempio n. 6
0
    def get_alarms(
        self,
        resolved=True,
        tags=None,
        exclude_tags=None,
        timewindow=None
    ):
        """
        Get alarms from TimedStorage.

        :param resolved: If ``True``, returns only resolved alarms, else
                         returns only unresolved alarms (default: ``True``).
        :type resolved: bool
        :param tags: Tags which must be set on alarm (optional)
        :type tags: str or list

        :param exclude_tags: Tags which must not be set on alarm (optional)
        :type tags: str or list

        :param timewindow: Time Window used for fetching (optional)
        :type timewindow: canopsis.timeserie.timewindow.TimeWindow

        :returns: Iterable of alarms matching
        """

        query = {}

        if resolved:
            query['resolved'] = {'$ne': None}

        else:
            query['resolved'] = None

        tags_cond = None

        if tags is not None:
            tags_cond = {'$in': ensure_iterable(tags)}

        notags_cond = None

        if exclude_tags is not None:
            notags_cond = {'$nin': ensure_iterable(exclude_tags)}

        if tags_cond is None and notags_cond is not None:
            query['tags'] = notags_cond

        elif tags_cond is not None and notags_cond is None:
            query['tags'] = tags_cond

        elif tags_cond is not None and notags_cond is not None:
            query = {'$and': [query, tags_cond, notags_cond]}

        return self[Alerts.ALARM_STORAGE].find(
            _filter=query,
            timewindow=timewindow
        )
Esempio n. 7
0
    def get_shared_data(self, shared_ids):
        """Get all shared data related to input shared ids.

        :param shared_ids: one or more data.
        :type shared_ids: list or str

        :return: depending on input shared_ids::

            - one shared id: one list of shared data
            - list of shared ids: list of list of shared data
        """

        result = []

        sids = ensure_iterable(shared_ids, iterable=set)

        for shared_id in sids:
            query = {CompositeStorage.SHARED: shared_id}
            shared_data = self.get_elements(query=query)
            result.append(shared_data)

        # return first or data if data is not an iterable
        if not isiterable(shared_ids, is_str=False):
            result = get_first(result)

        return result
Esempio n. 8
0
def send_events(ws, events, exchange='canopsis.events'):
    events = ensure_iterable(events)

    sent_events = []
    failed_events = []
    retry_events = []

    for event in events:
        try:
            ws.amqp_pub.canopsis_event(event, exchange)
            sent_events.append(event)

        except KeyError as exc:
            ws.logger.error('bad event: {}'.format(exc))
            failed_events.append(event)

        except AmqpPublishError as exc:
            ws.logger.error('publish error: {}'.format(exc))
            retry_events.append(event)

    return {
        'sent_events': sent_events,
        'failed_events': failed_events,
        'retry_events': retry_events
    }
Esempio n. 9
0
    def load_documents(self, data, collection, filename):
        storage = self.storage.get_backend(collection)
        data = ensure_iterable(data)

        for doc in data:
            if 'loader_id' not in doc:
                self.logger.error(
                    'Missing "loader_id" key in document, skipping')
                self.logger.debug(str(doc))

                continue

            mfilter = {'loader_id': doc['loader_id']}
            doc_exists = storage.find(mfilter).count()

            if doc_exists:
                if not doc.get('loader_no_update', True):
                    storage.update(mfilter, doc, upsert=True)

                else:
                    self.logger.info(u'Document "{0}" not updatable'.format(
                        doc['loader_id']))

            else:
                storage.update(mfilter, doc, upsert=True)
Esempio n. 10
0
    def update_current_alarm(self, alarm, new_value, tags=None):
        """
        Update alarm's history and tags.

        :param alarm: Alarm to update
        :type alarm: dict

        :param new_value: New history to set on alarm
        :type new_value: dict

        :param tags: Tags to add on alarm (optional)
        :type tags: str or list
        """

        storage = self[Alerts.ALARM_STORAGE]

        alarm_id = alarm[storage.DATA_ID]
        alarm_ts = alarm[storage.TIMESTAMP]

        if tags is not None:
            for tag in ensure_iterable(tags):
                if tag not in new_value['tags']:
                    new_value['tags'].append(tag)

        storage.put(alarm_id, new_value, alarm_ts)
Esempio n. 11
0
    def __init__(self,
                 op,
                 name=None,
                 raw_body=False,
                 payload=None,
                 wsgi_params=None,
                 response=response,
                 adapt=True):
        """
        :param op: ws operation for routing a function
        :param str name: ws name
        :param bool raw_body: if True, will set kwargs body to raw request body
        :param payload: body parameter names (won't be generated in routes)
        :type payload: str or list of str
        :param function response: response to apply on decorated function
            result
        :param dict wsgi_params: wsgi parameters which will be given to the
            wsgi such as a keyword
        :param bool adapt: Adapt Canopsis<->Ember data (default: True)
        """

        super(route, self).__init__()

        # logger is initialized by WebServer
        self.logger = logging.getLogger('webserver')

        self.op = op
        self.name = name
        self.raw_body = raw_body
        self.payload = ensure_iterable(payload)
        self.response = response
        self.wsgi_params = wsgi_params
        self.adapt = adapt
        self.url = ''
Esempio n. 12
0
    def get_shared_data(self, shared_ids):
        """Get all shared data related to input shared ids.

        :param shared_ids: one or more data.
        :type shared_ids: list or str

        :return: depending on input shared_ids::

            - one shared id: one list of shared data
            - list of shared ids: list of list of shared data
        """

        result = []

        sids = ensure_iterable(shared_ids, iterable=set)

        for shared_id in sids:
            query = {CompositeStorage.SHARED: shared_id}
            shared_data = self.get_elements(query=query)
            result.append(shared_data)

        # return first or data if data is not an iterable
        if not isiterable(shared_ids, is_str=False):
            result = get_first(result)

        return result
Esempio n. 13
0
def send_events(ws, events, exchange='canopsis.events'):
    events = ensure_iterable(events)

    sent_events = []
    failed_events = []
    retry_events = []

    for event in events:
        try:
            ws.amqp_pub.canopsis_event(event, exchange)
            sent_events.append(event)

        except KeyError as exc:
            ws.logger.error('bad event: {}'.format(exc))
            failed_events.append(event)

        except AmqpPublishError as exc:
            ws.logger.error('publish error: {}'.format(exc))
            retry_events.append(event)

    return {
        'sent_events': sent_events,
        'failed_events': failed_events,
        'retry_events': retry_events
    }
Esempio n. 14
0
    def __init__(
            self, op, name=None, raw_body=False, payload=None, wsgi_params=None,
            response=response, adapt=True
    ):
        """
        :param op: ws operation for routing a function
        :param str name: ws name
        :param bool raw_body: if True, will set kwargs body to raw request body
        :param payload: body parameter names (won't be generated in routes)
        :type payload: str or list of str
        :param function response: response to apply on decorated function
            result
        :param dict wsgi_params: wsgi parameters which will be given to the
            wsgi such as a keyword
        :param bool adapt: Adapt Canopsis<->Ember data (default: True)
        """

        super(route, self).__init__()

        # logger is initialized by WebServer
        self.logger = logging.getLogger('webserver')

        self.op = op
        self.name = name
        self.raw_body = raw_body
        self.payload = ensure_iterable(payload)
        self.response = response
        self.wsgi_params = wsgi_params
        self.adapt = adapt
        self.url = ''
Esempio n. 15
0
def get_previous_step(alarm, steptypes, ts=None):
    """
    Get last step in alarm history.

    :param alarm: Alarm history
    :type alarm: dict

    :param steptypes: Step types wanted
    :type steptypes: str or list

    :param ts: Timestamp to look from (optional)
    :type ts: int

    :returns: Most recent step
    """

    if len(alarm[AlarmField.steps.value]) > 0:
        if ts is None:
            ts = alarm[AlarmField.steps.value][-1]['t'] + 1

        steptypes = ensure_iterable(steptypes)

        for step in reversed(alarm[AlarmField.steps.value]):
            if step['t'] < ts and step['_t'] in steptypes:
                return step

    return None
Esempio n. 16
0
def get_previous_step(alarm, steptypes, ts=None):
    """
    Get last step in alarm history.

    :param alarm: Alarm history
    :type alarm: dict

    :param steptypes: Step types wanted
    :type steptypes: str or list

    :param ts: Timestamp to look from (optional)
    :type ts: int

    :returns: Most recent step
    """

    if len(alarm[AlarmField.steps.value]) > 0:
        if ts is None:
            ts = alarm[AlarmField.steps.value][-1]['t'] + 1

        steptypes = ensure_iterable(steptypes)

        for step in reversed(alarm[AlarmField.steps.value]):
            if step['t'] < ts and step['_t'] in steptypes:
                return step

    return None
Esempio n. 17
0
    def load_documents(self, data, collection, filename):
        storage = self.storage.get_backend(collection)
        data = ensure_iterable(data)

        for doc in data:
            if 'loader_id' not in doc:
                self.logger.error(
                    'Missing "loader_id" key in document, skipping'
                )
                self.logger.debug(str(doc))

                continue

            mfilter = {'loader_id': doc['loader_id']}
            doc_exists = storage.find(mfilter).count()

            if doc_exists:
                if not doc.get('loader_no_update', True):
                    storage.update(mfilter, doc, upsert=True)

                else:
                    self.logger.info(u'Document "{0}" not updatable'.format(
                        doc['loader_id']
                    ))

            else:
                storage.update(mfilter, doc, upsert=True)
Esempio n. 18
0
    def update_current_alarm(self, alarm, new_value, tags=None):
        """
        Update alarm's history and tags.

        :param alarm: Alarm to update
        :type alarm: dict

        :param new_value: New history to set on alarm
        :type new_value: dict

        :param tags: Tags to add on alarm (optional)
        :type tags: str or list
        """
        storage = self.alerts_storage

        alarm_id = alarm[storage.DATA_ID]
        alarm_ts = alarm[storage.TIMESTAMP]

        if AlarmField.display_name.value not in new_value:
            display_name = gen_id()
            while self.check_if_display_name_exists(display_name):
                display_name = gen_id()
            new_value[AlarmField.display_name.value] = display_name

        if tags is not None:
            for tag in ensure_iterable(tags):
                if tag not in new_value[AlarmField.tags.value]:
                    new_value[AlarmField.tags.value].append(tag)

        storage.put(alarm_id, new_value, alarm_ts)

        self.watcher_manager.alarm_changed(alarm['data_id'])
Esempio n. 19
0
    def update_current_alarm(self, alarm, new_value, tags=None):
        """
        Update alarm's history and tags.

        :param alarm: Alarm to update
        :type alarm: dict

        :param new_value: New history to set on alarm
        :type new_value: dict

        :param tags: Tags to add on alarm (optional)
        :type tags: str or list
        """
        storage = self.alerts_storage

        alarm_id = alarm[storage.DATA_ID]
        alarm_ts = alarm[storage.TIMESTAMP]

        if AlarmField.display_name.value not in new_value:
            display_name = gen_id()
            while self.check_if_display_name_exists(display_name):
                display_name = gen_id()
            new_value[AlarmField.display_name.value] = display_name

        if tags is not None:
            for tag in ensure_iterable(tags):
                if tag not in new_value[AlarmField.tags.value]:
                    new_value[AlarmField.tags.value].append(tag)

        storage.put(alarm_id, new_value, alarm_ts)

        self.watcher_manager.alarm_changed(alarm['data_id'])
Esempio n. 20
0
    def delete(self, names=None):
        if names is None:
            names = self.list()

        names = ensure_iterable(names)

        for name in names:
            os.path.remove(self._path(name))
Esempio n. 21
0
    def rest(namespace, ctype, _id=None, body='[]', **kwargs):
        try:
            items = ensure_iterable(loads(body))

        except ValueError as err:
            return HTTPError(500, 'Impossible to parse body: {0}'.format(err))

        return save_records(ws, namespace, ctype, _id, items)
Esempio n. 22
0
    def rest(namespace, ctype, _id=None, body='[]', **kwargs):
        try:
            items = ensure_iterable(json.loads(body))

        except ValueError as err:
            return HTTPError(500, 'Impossible to parse body: {0}'.format(err))

        return save_records(ws, namespace, ctype, _id, items)
Esempio n. 23
0
    def _influx_query(
            self,
            metric_id,
            aggregations=[],
            condition=None,
            groupby=[]
    ):
        query = 'select'

        if not aggregations:
            query = '{} value'.format(query)

        else:
            for aggr in ensure_iterable(aggregations):
                query = '{} {}(value),'.format(query, aggr)

            else:
                # Remove last ','
                query = query[:-1]

        query = '{} from {}'.format(query, metric_id)

        if condition is not None:
            query = '{} where {}'.format(query, condition)

        if groupby:
            query = '{} group by'.format(query)
            for gb in ensure_iterable(groupby):
                query = '{} {},'.format(query, gb)

            # Remove last ','
            else:
                query = query[:-1]

        self.logger.debug(u'Processing query `{}`'.format(query))
        result = self.influxdbstg.raw_query(query)

        # If something went wrong (bad metric_id, bad aggregation...),
        # InfluxDBStorage will just return None
        if result is None:
            raise ValueError(
                'Query `{}` failed : unable to retrieve stats'.format(query)
            )

        return result
Esempio n. 24
0
    def get(self, names, version=-1):

        names = ensure_iterable(names)
        result = []
        for name in names:
            gridout = self.gridfs.get_version(filename=name, version=version)
            fs = MongoFileStream(gridout)
            result.append(fs)

        return result
Esempio n. 25
0
    def unshare_data(self, data, cache=False):
        """Remove share property from input data.

        :param data: one or more data to unshare.
        :param bool cache: use query cache if True (False by default).
        """
        data = ensure_iterable(data)

        for d in data:
            if CompositeStorage.SHARED in d:
                d[CompositeStorage.SHARED] = str(uuid())
                path, name = self.get_path_with_name(d)
                self.put(path=path, name=name, data=d, cache=cache)
Esempio n. 26
0
    def unshare_data(self, data, cache=False):
        """Remove share property from input data.

        :param data: one or more data to unshare.
        :param bool cache: use query cache if True (False by default).
        """
        data = ensure_iterable(data)

        for d in data:
            if CompositeStorage.SHARED in d:
                d[CompositeStorage.SHARED] = str(uuid())
                path, name = self.get_path_with_name(d)
                self.put(path=path, name=name, data=d, cache=cache)
Esempio n. 27
0
def response(data, adapt=True, success=True):
    """Construct a REST response from input data.

    :param data: data to convert into a REST response.
    :param bool adapt: adapt Canopsis data to Ember (default: True)
    :param bool success: is the response a success (default: True)
    :rtype: dict
    """

    # calculate result_data and total related to data type
    if isinstance(data, tuple):
        result_data = ensure_iterable(data[0])
        total = data[1]

    else:
        result_data = None if data is None else ensure_iterable(data)
        total = 0 if result_data is None else len(result_data)

    if adapt:
        adapt_canopsis_data_to_ember(result_data)

    result = {
        'total': total,
        'data': result_data,
        'success': success
    }

    headers = {
        'Cache-Control': 'no-cache, no-store, must-revalidate',
        'Pragma': 'no-cache',
        'Expires': 0
    }

    for hname in headers:
        BottleResponse.set_header(hname, headers[hname])

    return result
Esempio n. 28
0
    def delete(self, names=None):

        if names is None:
            names = self.gridfs.list()

        names = ensure_iterable(names)

        for name in names:
            while True:
                fs = self.get(name)

                if fs is None:
                    break

                self.gridfs.delete(file_id=fs.get_inner_object()._id)
Esempio n. 29
0
    def send_event(event, url=None):
        if ws.enable_crossdomain_send_events and url is not None:
            payload = {
                'event': json.dumps(event)
            }

            response = requests.post(url, data=payload)

            if response.status_code != 200:
                api_response = json.loads(response.text)

                return (api_response['data'], api_response['total'])

            else:
                return HTTPError(response.status_code, response.text)

        else:
            events = ensure_iterable(event)
            exchange = ws.amqp.exchange_name_events

            for event in events:
                if schema.validate(event, 'cevent'):
                    sname = 'cevent.{0}'.format(event['event_type'])

                    if schema.validate(event, sname):
                        if event['event_type'] == 'eue':
                            sname = 'cevent.eue.{0}'.format(
                                event['type_message']
                            )

                            if not schema.validate(event, sname):
                                continue

                        rk = '{0}.{1}.{2}.{3}.{4}'.format(
                            u'{0}'.format(event['connector']),
                            u'{0}'.format(event['connector_name']),
                            u'{0}'.format(event['event_type']),
                            u'{0}'.format(event['source_type']),
                            u'{0}'.format(event['component'])
                        )

                        if event['source_type'] == 'resource':
                            rk = '{0}.{1}'.format(rk, event['resource'])

                        ws.amqp.publish(event, rk, exchange)

            return events
Esempio n. 30
0
    def put_elts(self, elts, graph_ids=None, cache=False):
        """Put graph elements in DB.

        :param elts: graph elements to put in DB.
        :type elts: dict, GraphElement or list of dict/GraphElement.
        :param str graph_ids: element graph id. None if elt is a graph.
        :param bool cache: use query cache if True (False by default).
        :return: corresponding graph elts.
        :rtype: list of GraphElements
        """

        result= []

        # ensure elts is a list
        if isinstance(elts, (dict, GraphElement)):
            elts= [elts]

        for elt in elts:
            gelt= elt
            # in case of dict, get the corresponding graph elt and save it
            if isinstance(gelt, dict):
                gelt= GraphElement.new(**gelt)

                gelt.process(event={})
                gelt.save(manager=self, cache=cache)

            else:  # in case of graphelt, save its serialized form in db
                serialized_elt= gelt.to_dict()
                # put elt value in storage
                self[GraphManager.STORAGE].put_element(
                    _id=elt.id, element=serialized_elt, cache=cache
                )
            # add the graphical element to the result
            result.append(gelt)
        # associate all elt ids with all graph ids
        if graph_ids is not None:
            # eliminate doublons of elts
            elt_ids= set([gelt.id for gelt in result])
            # ensure graph_ids is a basestring
            graph_ids= ensure_iterable(graph_ids)
            graphs= self[GraphManager.STORAGE].get_elements(ids=graph_ids)
            # add elt ids in elts of graphs
            for graph in graphs:
                graph[Graph.ELTS]= list(graph[Graph.ELTS] | elt_ids)
            # save all graphs
            self[GraphManager.STORAGE].put_elements(elements=graphs)
        return result
Esempio n. 31
0
    def get_protocols(cls):
        """Get all protocols declared in the class hierarchy of cls

        :return: set of protocols registered in the class tree
         of input cls
        :rtype: set([str])
        """

        protocols = ensure_iterable(cls.__protocol__, iterable=set)

        for base_cls in cls.__bases__:
            if issubclass(base_cls, Middleware):
                base_protocols = base_cls.get_protocols()

                protocols |= base_protocols

        result = protocols

        return result
Esempio n. 32
0
    def send_event(event, url=None):
        if ws.enable_crossdomain_send_events and url is not None:
            payload = {"event": json.dumps(event)}

            response = requests.post(url, data=payload)

            if response.status_code != 200:
                api_response = json.loads(response.text)

                return (api_response["data"], api_response["total"])

            else:
                return HTTPError(response.status_code, response.text)

        else:
            events = ensure_iterable(event)
            exchange = ws.amqp.exchange_name_events

            for event in events:
                if schema.validate(event, "cevent"):
                    sname = "cevent.{0}".format(event["event_type"])

                    if schema.validate(event, sname):
                        if event["event_type"] == "eue":
                            sname = "cevent.eue.{0}".format(event["type_message"])

                            if not schema.validate(event, sname):
                                continue

                        rk = "{0}.{1}.{2}.{3}.{4}".format(
                            event["connector"],
                            event["connector_name"],
                            event["event_type"],
                            event["source_type"],
                            event["component"],
                        )

                        if event["source_type"] == "resource":
                            rk = "{0}.{1}".format(rk, event["resource"])

                        ws.amqp.publish(event, rk, exchange)

            return events
Esempio n. 33
0
    def load(self, path):
        try:
            loaded = []

            for fpath in os.listdir(path):
                if fpath.endswith('.json'):
                    fullpath = os.path.join(path, fpath)

                    with open(fullpath) as f:
                        data = ensure_iterable(json.load(f))

                    loaded += data

        except Exception as err:
            self.logger.error(u'Unable to load JSON files "{0}": {1}'.format(
                path, err))

            loaded = []

        return loaded
Esempio n. 34
0
    def find(entity_ids=None, behaviors=None, start=None, end=None):
        """Find documents related to input entity id(s) and behavior(s).

        :param entity_ids:
        :type entity_ids: list or str
        :param behaviors:
        :type behaviors: list or str
        :param int start: start timestamp.
        :param int end: end timestamp.
        :return: entity documents with input behaviors.
        :rtype: list
        """

        query = PBehaviorManager.get_query(behaviors)

        entity_ids = ensure_iterable(entity_ids)

        result = pbm.values(
            sources=entity_ids, query=query, dtstart=start, dtend=end
        )

        return result
Esempio n. 35
0
    def find(
        self,
        names=None,
        meta=None,
        sort=None,
        limit=-1,
        skip=0,
        with_meta=False
    ):
        if names is not None:
            names = ensure_iterable(names)

        result = []

        for _, _, files in os.walk(self.uri):
            for filename in files:
                # ignore meta files
                if not filename.endswith(UnixFileStorage.META_EXT):
                    if names is not None and filename not in names:
                        continue

                    if meta is not None:
                        metadata = self.get_meta(filename)

                        if not check(meta, metadata):
                            continue

                    result.append(filename)

        result = result[skip:limit]
        result = [open(filename) for filename in result]

        if with_meta:
            result = [(f, self.get_meta(f.name)) for f in result]

        if sort is not None:
            raise NotImplementedError('sort is not yet supported')

        return result
Esempio n. 36
0
    def load(self, path):
        try:
            loaded = []

            for fpath in os.listdir(path):
                if fpath.endswith('.json'):
                    fullpath = os.path.join(path, fpath)

                    with open(fullpath) as f:
                        data = ensure_iterable(json.load(f))

                    loaded += data

        except Exception as err:
            self.logger.error(u'Unable to load JSON files "{0}": {1}'.format(
                path,
                err
            ))

            loaded = []

        return loaded
Esempio n. 37
0
def send_events(ws, am, events, exchange='canopsis.events'):
    events = ensure_iterable(events)

    sent_events = []
    failed_events = []
    retry_events = []

    for event in events:
        if not is_valid(ws, event):
            ws.logger.error("event {}/{} is invalid".format(
                event.get("resource"), event.get("component")))
            failed_events.append(event)
            continue

        try:
            transformed_event = transform_event(ws, am, event)
        except Exception as e:
            ws.logger.error('Failed to transform event : {}'.format(e))
            failed_events.append(event)
            continue

        try:
            ws.amqp_pub.canopsis_event(transformed_event, exchange)
            sent_events.append(transformed_event)

        except KeyError as exc:
            ws.logger.error('bad event: {}'.format(exc))
            failed_events.append(transformed_event)

        except AmqpPublishError as exc:
            ws.logger.error('publish error: {}'.format(exc))
            retry_events.append(transformed_event)

    return {
        'sent_events': sent_events,
        'failed_events': failed_events,
        'retry_events': retry_events
    }
Esempio n. 38
0
    def get_alarms(
            self,
            resolved=True,
            tags=None,
            exclude_tags=None,
            timewindow=None,
            snoozed=False
    ):
        """
        Get alarms from TimedStorage.

        :param resolved: If ``True``, returns only resolved alarms, else
                         returns only unresolved alarms (default: ``True``).
        :type resolved: bool

        :param tags: Tags which must be set on alarm (optional)
        :type tags: str or list

        :param exclude_tags: Tags which must not be set on alarm (optional)
        :type tags: str or list

        :param timewindow: Time Window used for fetching (optional)
        :type timewindow: canopsis.timeserie.timewindow.TimeWindow

        :param snoozed: If ``False``, return all non-snoozed alarms, else
                        returns alarms even if they are snoozed.
        :type snoozed: bool

        :returns: Iterable of alarms matching: {alarm_id: [alarm_dict]}
        """

        query = {}

        if resolved:
            query['resolved'] = {'$ne': None}

        else:
            query['resolved'] = None

        tags_cond = None

        if tags is not None:
            tags_cond = {'$all': ensure_iterable(tags)}

        notags_cond = None

        if exclude_tags is not None:
            notags_cond = {'$not': {'$all': ensure_iterable(exclude_tags)}}

        if tags_cond is None and notags_cond is not None:
            query['tags'] = notags_cond

        elif tags_cond is not None and notags_cond is None:
            query['tags'] = tags_cond

        elif tags_cond is not None and notags_cond is not None:
            query = {'$and': [
                query,
                {'tags': tags_cond},
                {'tags': notags_cond}
            ]}

        # used to fetch alarms that were never snoozed OR alarms for which the snooze has expired
        if not snoozed:
            no_snooze_cond = {
                '$or': [
                    {AlarmField.snooze.value: None},
                    {'snooze.val': {'$lte': int(time())}}
                ]
            }
            query = {'$and': [query, no_snooze_cond]}

        alarms_by_entity = self.alerts_storage.find(
            _filter=query,
            timewindow=timewindow
        )

        for entity_id, alarms in alarms_by_entity.items():
            entity = self.context_manager.get_entities_by_id(entity_id)
            try:
                entity = entity[0]
            except IndexError:
                entity = {}

            entity['entity_id'] = entity_id
            for alarm in alarms:
                alarm['entity'] = entity

        return alarms_by_entity
Esempio n. 39
0
class Formulas(object):
    """Class that reads formulas and parse it using EBNF grammar"""
    # map operator symbols to corresponding arithmetic operations
    global epsilon
    epsilon = 1e-12
    opn = {
        "+": operator.add,
        "-": operator.sub,
        "*": operator.mul,
        "/": operator.truediv,
        "^": operator.pow
    }

    fn = {
        "sin": math.sin,
        "cos": math.cos,
        "tan": math.tan,
        "abs": abs,
        "trunc": lambda a: int(a),
        "round": round,
        "max": lambda l: max(float(i) for i in l),
        "min": lambda l: min(float(i) for i in ensure_iterable(l)),
        "sum": lambda l: sum(float(i) for i in l),
        "sgn": lambda a: abs(a) > epsilon and ((a > 0) - (a < 0)) or 0
    }

    def __init__(self, _dict=None):
        self.exprStack = []
        self._bnf = None
        self._dict = _dict  # The dictionnary value as dictionnary {'x':2}
        self.variables = _dict

    def push_first(self, strg, loc, toks):
        '''
        Define an action to apply on the matched tokens
        :param strg: is the original parse string
        :param loc: is the location in the string where matching started
        :param toks: is the list of the matched tokens
        '''
        self.exprStack.append(toks[0])

    def push_minus(self, strg, loc, toks):
        '''
        Define an action to apply on the matched tokens
        :param strg: is the original parse string.
        :param loc: is the location in the string where matching started.
        :param toks: is the list of the matched tokens.
        '''
        if toks and toks[0] == '-':
            self.exprStack.append('unary -')

    def _import(self, _dict):
        '''
        set variables data.
        :param _dict: variables and thier values.
        '''
        self._dict = _dict

    def reset(self):
        '''
        Reset the variables and thier values.
        '''
        self._dict = {}

    def bnf(self):
        '''
        The BNF grammar is defined bellow.
        expop   :: '^'
        multop  :: '*' | '/'
        addop   :: '+' | '-'
        integer :: ['+' | '-'] '0'..'9'+
        atom    :: PI | E | real | fn '(' expr ')' | '(' expr ')'
        factor  :: atom [ expop factor ]*
        term    :: factor [ multop factor ]*
        expr    :: term [ addop term ]*
        '''
        if not self._bnf:
            point = Literal(".")
            e = CaselessLiteral("E")
            fnumber = Combine(
                Word("+-" + nums, nums) +
                Optional(point + Optional(Word(nums))) +
                Optional(e + Word("+-" + nums, nums)))
            ident = Word(alphas, alphas + nums + "_$")
            minus = Literal("-")
            plus = Literal("+")
            div = Literal("/")
            mult = Literal("*")
            rpar = Literal(")").suppress()
            lpar = Literal("(").suppress()
            addop = plus | minus
            multop = mult | div
            expop = Literal("^")
            pi = CaselessLiteral("PI")

            expr = Forward()
            atom = (Optional("-") +
                    (pi | e | fnumber | ident + lpar + delimitedList(expr) +
                     rpar).setParseAction(self.push_first) |
                    (lpar + expr.suppress() + rpar)).setParseAction(
                        self.push_minus)

            # The right way to define exponentiation is -> 2^3^2 = 2^(3^2),
            # not (2^3)^2.
            factor = Forward()
            factor << atom + ZeroOrMore(
                (expop + factor).setParseAction(self.push_first))

            term = factor + ZeroOrMore(
                (multop + factor).setParseAction(self.push_first))
            expr << term + ZeroOrMore(
                (addop + term).setParseAction(self.push_first))
            self._bnf = expr
        return self._bnf

    def evaluate_parsing(self, parsing_result):
        '''
        '''
        op = parsing_result.pop()
        if op == 'unary -':
            return -self.evaluate_parsing(parsing_result)

        if op in "+-*/^":
            op2 = self.evaluate_parsing(parsing_result)
            op1 = self.evaluate_parsing(parsing_result)
            return self.opn[op](op1, op2)

        elif op.lower() == "pi":
            return math.pi  # 3.1415926535

        elif op.lower() == "e":
            return math.e  # 2.718281828

        elif op.lower() in self.fn:
            t_op = op.lower()
            if t_op in ('max', 'min', 'sum'):
                if type(parsing_result) is list:
                    return self.fn[t_op](parsing_result)

                return self.fn[t_op](self.evaluate_parsing(parsing_result))

            return self.fn[op](self.evaluate_parsing(parsing_result))

        elif re.search('^[a-zA-Z][a-zA-Z0-9_]*$', op):
            if op in self._dict:
                return self._dict[op]

            else:
                return 0

        elif op[0].isalpha():
            return 0

        else:
            return float(op)

    def evaluate(self, formula):
        '''
        Evaluate the formula
        '''
        if self._dict is not None:
            for k, v in self._dict.iteritems():
                formula = formula.replace(str(k), str(v))

        self.exprStack = []  # reset the stack before each eval
        try:
            results = self.bnf().parseString(formula)
        except ParseException:
            results = ['Parse Failure', formula]
        if len(results) == 0 or results[0] == 'Parse Failure':
            return 'Parse Failure-{}'.format(formula)
        val = self.evaluate_parsing(self.exprStack[:])
        return val
Esempio n. 40
0
    def delete(self, names):

        names = ensure_iterable(names)
        for name in names:
            self.gridfs.delete(file_id=name)
Esempio n. 41
0
    def get_alarms(self,
                   resolved=True,
                   tags=None,
                   exclude_tags=None,
                   timewindow=None,
                   snoozed=False):
        """
        Get alarms from TimedStorage.

        :param resolved: If ``True``, returns only resolved alarms, else
                         returns only unresolved alarms (default: ``True``).
        :type resolved: bool

        :param tags: Tags which must be set on alarm (optional)
        :type tags: str or list

        :param exclude_tags: Tags which must not be set on alarm (optional)
        :type tags: str or list

        :param timewindow: Time Window used for fetching (optional)
        :type timewindow: canopsis.timeserie.timewindow.TimeWindow

        :param snoozed: If ``False``, return all non-snoozed alarms, else
                        returns alarms even if they are snoozed.
        :type snoozed: bool

        :returns: Iterable of alarms matching: {alarm_id: [alarm_dict]}
        """

        query = {}

        if resolved:
            query['resolved'] = {'$ne': None}

        else:
            query['resolved'] = None

        tags_cond = None

        if tags is not None:
            tags_cond = {'$all': ensure_iterable(tags)}

        notags_cond = None

        if exclude_tags is not None:
            notags_cond = {'$not': {'$all': ensure_iterable(exclude_tags)}}

        if tags_cond is None and notags_cond is not None:
            query['tags'] = notags_cond

        elif tags_cond is not None and notags_cond is None:
            query['tags'] = tags_cond

        elif tags_cond is not None and notags_cond is not None:
            query = {
                '$and': [query, {
                    'tags': tags_cond
                }, {
                    'tags': notags_cond
                }]
            }

        # used to fetch alarms that were never snoozed OR alarms for which the snooze has expired
        if not snoozed:
            no_snooze_cond = {
                '$or': [{
                    AlarmField.snooze.value: None
                }, {
                    'snooze.val': {
                        '$lte': int(time())
                    }
                }]
            }
            query = {'$and': [query, no_snooze_cond]}

        alarms_by_entity = self.alerts_storage.find(_filter=query,
                                                    timewindow=timewindow)

        for entity_id, alarms in alarms_by_entity.items():
            entity = self.context_manager.get_entities_by_id(entity_id)
            try:
                entity = entity[0]
            except IndexError:
                entity = {}

            entity['entity_id'] = entity_id
            for alarm in alarms:
                alarm['entity'] = entity

        return alarms_by_entity
Esempio n. 42
0
    def get_alarms(
            self,
            resolved=True,
            tags=None,
            exclude_tags=None,
            timewindow=None,
            snoozed=False
    ):
        """
        Get alarms from TimedStorage.

        :param resolved: If ``True``, returns only resolved alarms, else
                         returns only unresolved alarms (default: ``True``).
        :type resolved: bool

        :param tags: Tags which must be set on alarm (optional)
        :type tags: str or list

        :param exclude_tags: Tags which must not be set on alarm (optional)
        :type tags: str or list

        :param timewindow: Time Window used for fetching (optional)
        :type timewindow: canopsis.timeserie.timewindow.TimeWindow

        :param snoozed: If ``False``, return all non-snoozed alarms, else
                        returns alarms even if they are snoozed.
        :type snoozed: bool

        :returns: Iterable of alarms matching
        """

        query = {}

        if resolved:
            query['resolved'] = {'$ne': None}

        else:
            query['resolved'] = None

        tags_cond = None

        if tags is not None:
            tags_cond = {'$all': ensure_iterable(tags)}

        notags_cond = None

        if exclude_tags is not None:
            notags_cond = {'$not': {'$all': ensure_iterable(exclude_tags)}}

        if tags_cond is None and notags_cond is not None:
            query['tags'] = notags_cond

        elif tags_cond is not None and notags_cond is None:
            query['tags'] = tags_cond

        elif tags_cond is not None and notags_cond is not None:
            query = {'$and': [
                query,
                {'tags': tags_cond},
                {'tags': notags_cond}
            ]}

        if not snoozed:
            no_snooze_cond = {'$or': [
                    {'snooze': None},
                    {'snooze.val': {'$lte': int(time())}}
                ]
            }
            query = {'$and': [query, no_snooze_cond]}

        alarms_by_entity = self[Alerts.ALARM_STORAGE].find(
            _filter=query,
            timewindow=timewindow
        )

        cm = Context()
        for entity_id, alarms in alarms_by_entity.items():
            entity = cm.get_entity_by_id(entity_id)
            entity['entity_id'] = entity_id
            for alarm in alarms:
                alarm['entity'] = entity

        return alarms_by_entity
Esempio n. 43
0
def get_records(ws, namespace, ctype=None, _id=None, **params):
    options = {
        "limit": 20,
        "start": 0,
        "search": None,
        "filter": None,
        "sort": None,
        "query": None,
        "onlyWritable": False,
        "noInternal": False,
        "ids": [],
        "multi": None,
        "fields": {},
    }

    for key in options.keys():
        options[key] = params.get(key, options[key])

    # Ensure sort always evaluates to list
    sort = options["sort"]

    if not sort:
        sort = []

    else:
        sort = ensure_iterable(sort)

    if isinstance(sort, basestring):  # NOQA
        try:
            sort = loads(sort)
        except ValueError as json_error:
            ws.logger.warning("Unable to parse sort field : {} {}".format(sort, json_error))
            sort = []

    # Generate MongoDB sorting query
    msort = [
        (item["property"], 1 if item["direction"] == "DESC" else -1)
        for item in sort
        if item.get("property", None) is not None
    ]

    # Generate MongoDB filter
    mfilter = {}

    if isinstance(options["filter"], list):
        for item in options["filter"]:
            mfilter[item["property"]] = item["value"]

    elif isinstance(options["filter"], dict):
        mfilter = options["filter"]

    if options["multi"]:
        mfilter["crecord_type"] = {"$in": options["multi"].split(",")}

    elif ctype:
        mfilter["crecord_type"] = ctype

    if options["query"]:
        # FIXME: bad query can't be indexed
        mfilter["crecord_name"] = {"$regex": ".*{0}.*".format(options["query"]), "$options": "i"}

    if options["search"]:
        # FIXME: bad query can't be indexed
        mfilter["_id"] = {"$regex": ".*{0}.*".format(options["search"]), "$options": "i"}

    ids = options["ids"] if not _id else _id.split(",")

    # Perform query
    total = 0
    records = []

    if len(ids) > 0:
        try:
            records = ws.db.get(ids, namespace=namespace)

        except KeyError:
            records = []

        if isinstance(records, Record):
            records = [records]
            total = 1

        elif isinstance(records, list):
            total = len(records)

        else:
            total = 0

        if total == 0:
            return HTTPError(404, "IDs not found: {0}".format(ids))

    else:
        records, total = ws.db.find(
            mfilter, sort=msort, limit=options["limit"], offset=options["start"], with_total=True, namespace=namespace
        )

    # Generate output
    output = []
    noInternal = options["noInternal"]

    for record in records:
        if record:
            # TODO: make use of onlyWritable
            # This can be done with canopsis.old.account, but the goal is to
            # use the new permissions/rights system to do it.

            dump = record.data.get("internal", False) if noInternal else True

            if dump:
                data = record.dump(json=True)
                data["id"] = data["_id"]

                if "next_run_time" in data:
                    data["next_run_time"] = str(data["next_run_time"])

                # TODO: Handle projection in ws.db.find()
                if options["fields"]:
                    for item in data.keys():
                        if item not in options["fields"]:
                            del data[item]

                output.append(data)

    with open("/tmp/lolog", "a+") as fh:
        fh.write("output={},total={}\n".format(output, total))

    return output, total
Esempio n. 44
0
def get_records(ws, namespace, ctype=None, _id=None, **params):
    options = {
        'limit': 20,
        'start': 0,
        'search': None,
        'filter': None,
        'sort': None,
        'query': None,
        'onlyWritable': False,
        'noInternal': False,
        'ids': [],
        'multi': None,
        'fields': {}
    }

    for key in options.keys():
        options[key] = params.get(key, options[key])

    # Ensure sort always evaluates to list
    sort = options['sort']

    if not sort:
        sort = []

    else:
        sort = ensure_iterable(sort)

    if isinstance(sort, basestring):
        try:
            sort = json.loads(sort)
        except ValueError as json_error:
            ws.logger.warning('Unable to parse sort field : {} {}'.format(
                sort, json_error
            ))
            sort = []

    # Generate MongoDB sorting query
    msort = [
        (
            item['property'],
            1 if item['direction'] == 'DESC' else -1
        )
        for item in sort if item.get('property', None) is not None
    ]

    # Generate MongoDB filter
    mfilter = {}

    if isinstance(options['filter'], list):
        for item in options['filter']:
            mfilter[item['property']] = item['value']

    elif isinstance(options['filter'], dict):
        mfilter = options['filter']

    if options['multi']:
        mfilter['crecord_type'] = {
            '$in': options['multi'].split(',')
        }

    elif ctype:
        mfilter['crecord_type'] = ctype

    if options['query']:
        # FIXME: bad query can't be indexed
        mfilter['crecord_name'] = {
            '$regex': '.*{0}.*'.format(options['query']),
            '$options': 'i'
        }

    if options['search']:
        # FIXME: bad query can't be indexed
        mfilter['_id'] = {
            '$regex': '.*{0}.*'.format(options['search']),
            '$options': 'i'
        }

    ids = options['ids'] if not _id else _id.split(',')

    # Perform query
    total = 0
    records = []

    if len(ids) > 0:
        try:
            records = ws.db.get(ids, namespace=namespace)

        except KeyError:
            records = []

        if isinstance(records, Record):
            records = [records]
            total = 1

        elif isinstance(records, list):
            total = len(records)

        else:
            total = 0

        if total == 0:
            return HTTPError(404, 'IDs not found: {0}'.format(ids))

    else:
        records, total = ws.db.find(
            mfilter,
            sort=msort,
            limit=options['limit'],
            offset=options['start'],
            with_total=True,
            namespace=namespace
        )

    # Generate output
    output = []
    noInternal = options['noInternal']

    for record in records:
        if record:
            # TODO: make use of onlyWritable
            # This can be done with canopsis.old.account, but the goal is to
            # use the new permissions/rights system to do it.

            dump = record.data.get('internal', False) if noInternal else True

            if dump:
                data = record.dump(json=True)
                data['id'] = data['_id']

                if 'next_run_time' in data:
                    data['next_run_time'] = str(data['next_run_time'])

                # TODO: Handle projection in ws.db.find()
                if options['fields']:
                    for item in data.keys():
                        if item not in options['fields']:
                            del data[item]

                output.append(data)

    return output, total
Esempio n. 45
0
def get_records(ws, namespace, ctype=None, _id=None, **params):
    options = {
        'limit': 20,
        'start': 0,
        'search': None,
        'filter': None,
        'sort': None,
        'query': None,
        'onlyWritable': False,
        'noInternal': False,
        'ids': [],
        'multi': None,
        'fields': {}
    }

    for key in options.keys():
        options[key] = params.get(key, options[key])

    # Ensure sort always evaluates to list
    sort = options['sort']

    if not sort:
        sort = []

    else:
        sort = ensure_iterable(sort)

    # Generate MongoDB sorting query
    msort = [(item['property'], 1 if item['direction'] == 'DESC' else -1)
             for item in sort if item.get('property', None) is not None]

    # Generate MongoDB filter
    mfilter = {}

    if isinstance(options['filter'], list):
        for item in options['filter']:
            mfilter[item['property']] = item['value']

    elif isinstance(options['filter'], dict):
        mfilter = options['filter']

    if options['multi']:
        mfilter['crecord_type'] = {'$in': options['multi'].split(',')}

    elif ctype:
        mfilter['crecord_type'] = ctype

    if options['query']:
        # FIXME: bad query can't be indexed
        mfilter['crecord_name'] = {
            '$regex': '.*{0}.*'.format(options['query']),
            '$options': 'i'
        }

    if options['search']:
        # FIXME: bad query can't be indexed
        mfilter['_id'] = {
            '$regex': '.*{0}.*'.format(options['search']),
            '$options': 'i'
        }

    ids = options['ids'] if not _id else _id.split(',')

    # Perform query
    total = 0
    records = []

    if len(ids) > 0:
        try:
            records = ws.db.get(ids, namespace=namespace)

        except KeyError:
            records = []

        if isinstance(records, Record):
            records = [records]
            total = 1

        elif isinstance(records, list):
            total = len(records)

        # TODO: ne peut pas fonctionner
        if total == 0:
            return HTTPError(404, 'IDs not found: {0}'.format(ids))

    else:
        records, total = ws.db.find(mfilter,
                                    sort=msort,
                                    limit=options['limit'],
                                    offset=options['start'],
                                    with_total=True,
                                    namespace=namespace)

    # Generate output
    output = []
    noInternal = options['noInternal']

    for record in records:
        if record:
            # TODO: make use of onlyWritable
            # This can be done with canopsis.old.account, but the goal is to
            # use the new permissions/rights system to do it.

            dump = record.data.get('internal', False) if noInternal else True

            if dump:
                data = record.dump(json=True)
                data['id'] = data['_id']

                if 'next_run_time' in data:
                    data['next_run_time'] = str(data['next_run_time'])

                # TODO: Handle projection in ws.db.find()
                if options['fields']:
                    for item in data.keys():
                        if item not in options['fields']:
                            del data[item]

                output.append(data)

    return output, total