def __setitem__(self, name, vals):
        """
        Set a parameter
        """
        if name not in self._rd:
            raise KeyError(name)
        context = self._pdict.get_context(name)
        if self._shp is None: # Not initialized:
            if isinstance(vals, np.ndarray):
                self._shp = vals.shape
            elif isinstance(vals, list):
                self._shp = (len(vals),)
            else:
                raise BadRequest('No shape was defined')

            log.trace('Set shape to %s', self._shp)
        else:
            if isinstance(vals, np.ndarray):
                validate_equal(vals.shape, self._shp, 'Invalid shape on input')
            elif isinstance(vals, list):
                validate_equal(len(vals), self._shp[0], 'Invalid shape on input')

        dom = self.domain
        paramval = get_value_class(context.param_type, domain_set = dom)
        paramval[:] = vals
        paramval.storage._storage.flags.writeable = False
        self._rd[name] = paramval
Exemple #2
0
 def get_event(self, event_id):
     """
     Returns the event object for given event_id or raises NotFound
     """
     log.trace("Retrieving persistent event for id=%s", event_id)
     event_obj = self.event_store.read(event_id)
     return event_obj
    def outgoing(self, invocation):

        log.trace("PolicyInterceptor.outgoing: %s", invocation.get_arg_value('process', invocation))


        #Check for a field with the ResourceId decorator and if found, then set resource-id
        # in the header with that field's value or if the decorator specifies a field within an object,
        #then use the object's field value ( ie. _id)
        try:
            if isinstance(invocation.message, IonObjectBase):
                decorator = 'ResourceId'
                field = invocation.message.find_field_for_decorator(decorator)
                if field is not None and hasattr(invocation.message,field):
                    deco_value = invocation.message.get_decorator_value(field, decorator)
                    if deco_value:
                        #Assume that if there is a value, then it is specifying a field in the object
                        fld_value = getattr(invocation.message,field)
                        if getattr(fld_value, deco_value) is not None:
                            invocation.headers['resource-id'] = getattr(fld_value, deco_value)
                    else:
                        if getattr(invocation.message,field) is not None:
                            invocation.headers['resource-id'] = getattr(invocation.message,field)

        except Exception, ex:
            log.exception(ex)
Exemple #4
0
 def map_cov_rdt(cls, coverage, rdt, field, slice_):
     log.trace('Slice is %s', slice_)
     try:
         n = coverage.get_parameter_values(field, tdoa=slice_)
     except ParameterFunctionException:
         return
     if n is None:
         rdt[field] = [n]
     elif isinstance(n, np.ndarray):
         if coverage.get_data_extents(field)[0] < coverage.num_timesteps:
             log.error(
                 "Misformed coverage detected, padding with fill_value")
             arr_len = utils.slice_shape(slice_,
                                         (coverage.num_timesteps, ))[0]
             fill_arr = np.empty(arr_len - n.shape[0], dtype=n.dtype)
             fill_arr.fill(coverage.get_parameter_context(field).fill_value)
             n = np.append(n, fill_arr)
         elif coverage.get_data_extents(field)[0] > coverage.num_timesteps:
             raise CorruptionError(
                 'The coverage is corrupted:\n\tfield: %s\n\textents: %s\n\ttimesteps: %s'
                 % (field, coverage.get_data_extents(field),
                    coverage.num_timesteps))
         rdt[field] = np.atleast_1d(n)
     else:
         rdt[field] = [n]
Exemple #5
0
 def get_event(self, event_id):
     """
     Returns the event object for given event_id or raises NotFound
     """
     log.trace("Retrieving persistent event for id=%s", event_id)
     event_obj = self.event_store.read(event_id)
     return event_obj
Exemple #6
0
    def publish_event_object(self, event_object):
        """
        Publishes an event of given type for the given origin. Event_type defaults to an
        event_type set when initializing the EventPublisher. Other kwargs fill out the fields
        of the event. This operation will fail with an exception.
        @param event_object     the event object to be published
        @retval event_object    the event object which was published
        """
        assert event_object

        topic = self._topic(event_object)
        to_name = (self._send_name.exchange, topic)
        log.trace("Publishing event message to %s", to_name)

        current_time = int(get_ion_ts())

        #Ensure valid created timestamp if supplied
        if event_object.ts_created:

            if not is_valid_ts(event_object.ts_created):
                raise BadRequest("The ts_created value is not a valid timestamp: '%s'" % (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) > ( current_time + VALID_EVENT_TIME_PERIOD ):
                raise BadRequest("This ts_created value is too far in the future:'%s'" % (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) < (current_time - VALID_EVENT_TIME_PERIOD) :
                raise BadRequest("This ts_created value is too old:'%s'" % (event_object.ts_created))

        else:
            event_object.ts_created = str(current_time)

        #Validate this object
        #TODO - enable this once the resource agent issue sending a dict is figured out
        #event_object._validate()

        #Ensure the event object has a unique id
        if '_id' in event_object:
            raise BadRequest("The event object cannot contain a _id field '%s'" % (event_object))

        #Generate a unique ID for this event
        event_object._id = create_unique_event_id()

        try:
            self.publish(event_object, to_name=to_name)
        except Exception as ex:
            log.exception("Failed to publish event (%s): '%s'" % (ex.message, event_object))
            raise

        try:
            # store published event but only if we specified an event_repo
            if PERSIST_ON_PUBLISH and self.event_repo:
                self.event_repo.put_event(event_object)
        except Exception as ex:
            log.exception("Failed to store published event (%s): '%s'" % (ex.message, event_object))
            raise

        return event_object
Exemple #7
0
    def find_events(self,
                    event_type=None,
                    origin=None,
                    start_ts=None,
                    end_ts=None,
                    id_only=False,
                    **kwargs):
        """
        Returns an ordered list of event objects for given query arguments.
        Return format is list of (event_id, event_key, event object) tuples
        """
        log.trace(
            "Retrieving persistent event for event_type=%s, origin=%s, start_ts=%s, end_ts=%s, descending=%s, limit=%s",
            event_type, origin, start_ts, end_ts,
            kwargs.get("descending", None), kwargs.get("limit", None))
        events = None

        design_name = "event"
        view_name = None
        start_key = []
        end_key = []
        if origin and event_type:
            view_name = "by_origintype"
            start_key = [origin, event_type]
            end_key = [origin, event_type]
        elif origin:
            view_name = "by_origin"
            start_key = [origin]
            end_key = [origin]
        elif event_type:
            view_name = "by_type"
            start_key = [event_type]
            end_key = [event_type]
        elif start_ts or end_ts:
            view_name = "by_time"
            start_key = []
            end_key = []
        else:
            view_name = "by_time"
            if kwargs.get("limit", 0) < 1:
                kwargs["limit"] = 100
                log.warn(
                    "Querying all events, no limit given. Set limit to 100")

        if start_ts:
            start_key.append(start_ts)
        if end_ts:
            end_key.append(end_ts)

        events = self.event_store.find_by_view(design_name,
                                               view_name,
                                               start_key=start_key,
                                               end_key=end_key,
                                               id_only=id_only,
                                               **kwargs)
        return events
    def incoming(self,invocation):

        if not self.enabled:
            return invocation

        log.trace("GovernanceInterceptor.incoming: %s", invocation.get_arg_value('process',invocation))

        if self.governance_controller is not None:
            self.governance_controller.process_incoming_message(invocation)

        return invocation
Exemple #9
0
 def put_event(self, event):
     """
     Places an event object into the event repository. Retains event_ids if existing.
     Returns event_id of new event.
     """
     log.trace("Store event persistently %s", event)
     if not isinstance(event, Event):
         raise BadRequest("event must be type Event, not %s" % type(event))
     event_id = event.__dict__.pop("_id", None)
     new_event_id, _ = self.event_store.create(event, event_id)
     return new_event_id
Exemple #10
0
 def put_event(self, event):
     """
     Places an event object into the event repository. Retains event_ids if existing.
     Returns event_id of new event.
     """
     log.trace("Store event persistently %s", event)
     if not isinstance(event, Event):
         raise BadRequest("event must be type Event, not %s" % type(event))
     event_id = event.__dict__.pop("_id", None)
     new_event_id, _ = self.event_store.create(event, event_id)
     return new_event_id
Exemple #11
0
 def publish(self, msg, stream_id='', stream_route=None):
     '''
     Encapsulates and publishes a message; the message is sent to either the specified
     stream/route or the stream/route specified at instantiation
     '''
     xp = self.xp
     log.trace('Exchange: %s', xp.exchange)
     if stream_route:
         xp = self.container.ex_manager.create_xp(stream_route.exchange_point)
     else:
         stream_route = self.stream_route
     log.trace('Publishing (%s,%s)', xp.exchange, stream_route.routing_key)
     super(StreamPublisher,self).publish(msg, to_name=xp.create_route(stream_route.routing_key), headers={'exchange_point':stream_route.exchange_point, 'stream':stream_id or self.stream_id})
Exemple #12
0
    def close_impl(self):
        """
        Closes the AMQP connection.
        """
        log.trace("BaseChannel.close_impl (%s)", self.get_channel_id())
        if self._transport:

            # the close destroys self._transport, so keep a ref here
            transport = self._transport
            with self._ensure_transport():
                transport.close()

            # set to None now so nothing else tries to use the channel during the callback
            self._transport = None
Exemple #13
0
    def close_impl(self):
        """
        Closes the AMQP connection.
        """
        log.trace("BaseChannel.close_impl (%s)", self.get_channel_id())
        if self._transport:

            # the close destroys self._transport, so keep a ref here
            transport = self._transport
            with self._ensure_transport():
                transport.close()

            # set to None now so nothing else tries to use the channel during the callback
            self._transport = None
Exemple #14
0
 def publish(self, msg, stream_id='', stream_route=None):
     '''
     Encapsulates and publishes a message; the message is sent to either the specified
     stream/route or the stream/route specified at instantiation
     '''
     xp = self.xp
     xp_route = self.xp_route
     log.trace('Exchange: %s', xp.exchange)
     if stream_route:
         xp = self.container.ex_manager.create_xp(stream_route.exchange_point)
         xp_route = xp.create_route(stream_route.routing_key)
     else:
         stream_route = self.stream_route
     log.trace('Publishing (%s,%s)', xp.exchange, stream_route.routing_key)
     super(StreamPublisher,self).publish(msg, to_name=xp_route, headers={'exchange_point':stream_route.exchange_point, 'stream':stream_id or self.stream_id})
Exemple #15
0
    def find_events(self, event_type=None, origin=None, start_ts=None, end_ts=None, id_only=False, **kwargs):
        log.trace(
            "Retrieving persistent event for event_type=%s, origin=%s, start_ts=%s, end_ts=%s, descending=%s, limit=%s",
            event_type,
            origin,
            start_ts,
            end_ts,
            kwargs.get("descending", None),
            kwargs.get("limit", None),
        )
        events = None

        design_name = "event"
        view_name = None
        start_key = []
        end_key = []
        if origin and event_type:
            view_name = "by_origintype"
            start_key = [origin, event_type]
            end_key = [origin, event_type]
        elif origin:
            view_name = "by_origin"
            start_key = [origin]
            end_key = [origin]
        elif event_type:
            view_name = "by_type"
            start_key = [event_type]
            end_key = [event_type]
        elif start_ts or end_ts:
            view_name = "by_time"
            start_key = []
            end_key = []
        else:
            view_name = "by_time"
            if kwargs.get("limit", 0) < 1:
                kwargs["limit"] = 100
                log.warn("Querying all events, no limit given. Set limit to 100")

        if start_ts:
            start_key.append(start_ts)
        if end_ts:
            end_key.append(end_ts)

        events = self.event_store.find_by_view(
            design_name, view_name, start_key=start_key, end_key=end_key, id_only=id_only, **kwargs
        )
        return events
Exemple #16
0
    def _set(self, name, vals):
        """
        Set a parameter
        """
        if name not in self.fields:
            raise KeyError(name)

        if vals is None:
            self._rd[name] = None
            return
        context = self._pdict.get_context(name)

        if self._shp is None and isinstance(
                context.param_type,
            (SparseConstantType, ConstantType, ConstantRangeType)):
            self._shp = (1, )
            self._dirty_shape = True

        elif self._shp is None or self._dirty_shape:
            if isinstance(vals, np.ndarray):
                self._shp = (vals.shape[0], )  # Only support 1-d right now
            elif isinstance(vals, list):
                self._shp = (len(vals), )
            else:
                raise BadRequest('No shape was defined')

            log.trace('Set shape to %s', self._shp)
            if self._dirty_shape:
                self._dirty_shape = False
                self._reshape_const()

        else:
            if isinstance(vals, np.ndarray):
                if not vals.shape:
                    raise BadRequest('Invalid shape on input (dimensionless)')
                validate_equal(
                    vals.shape[0], self._shp[0],
                    'Invalid shape on input (%s expecting %s)' %
                    (vals.shape, self._shp))
            elif isinstance(vals, list):
                validate_equal(len(vals), self._shp[0],
                               'Invalid shape on input')

        dom = self.domain
        paramval = self.get_paramval(context.param_type, dom, vals)
        self._rd[name] = paramval
    def incoming(self, invocation):

        if invocation.args.has_key("process"):
            log.trace(
                "ConversationMonitorInterceptor.incoming: %s" % invocation.get_arg_value("process", invocation).name
            )
        else:
            log.trace("ConversationMonitorInterceptor.incoming: %s" % invocation)

        invocation.message_annotations[
            GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION
        ] = GovernanceDispatcher.STATUS_STARTED

        conv_msg_type = invocation.headers.get("conv-msg-type", None)
        self_principal = invocation.headers.get("receiver-role", None)
        target_principal = invocation.headers.get("sender-role", None)

        op_type = LocalType.RESV
        if conv_msg_type and self_principal and target_principal:
            # if self_principal:
            #    target_principal, sender_type = invocation.get_message_sender()
            #    target_principal_queue = invocation.get_message_sender_queue()
            #    op_type = LocalType.RESV;
            #
            #    if target_principal=='Unknown':
            #        target_principal = target_principal_queue
            #        self._check(invocation, op_type, self_principal, target_principal)
            #    else: self._check(invocation, op_type, self_principal, target_principal, target_principal_queue)

            self._check(invocation, op_type, self_principal, target_principal)

            if (
                invocation.message_annotations[GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION]
                == GovernanceDispatcher.STATUS_STARTED
            ):
                invocation.message_annotations[
                    GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION
                ] = GovernanceDispatcher.STATUS_COMPLETE
        else:
            self._report_error(
                invocation,
                GovernanceDispatcher.STATUS_SKIPPED,
                "The message cannot be monitored since the conversation roles are not in the headers",
            )

        return invocation
Exemple #18
0
    def check_content(self, key, value, content_types):
        split_content_types = []
        if ',' in content_types:
            split_content_types = content_types.split(',')
        else:
            split_content_types.append(content_types)
        log.trace("split_content_types: %s", split_content_types)

        for content_type in split_content_types:
            if type(value).__name__ == content_type.strip():
                return

            # Check for inheritance
            if self.check_inheritance_chain(type(value), content_type):
                return

        raise AttributeError('Invalid value type %s in field "%s.%s", should be one of "%s"' %
                (str(value), type(self).__name__, key, content_types))
 def map_cov_rdt(cls, coverage, rdt, field, slice_):
     log.trace( 'Slice is %s' , slice_)
     try:
         n = coverage.get_parameter_values(field,tdoa=slice_)
     except ParameterFunctionException:
         return
     if n is None:
         rdt[field] = [n]
     elif isinstance(n,np.ndarray):
         if coverage.get_data_extents(field)[0] < coverage.num_timesteps:
             log.error("Misformed coverage detected, padding with fill_value")
             arr_len = utils.slice_shape(slice_, (coverage.num_timesteps,))[0]
             fill_arr = np.empty(arr_len - n.shape[0] , dtype=n.dtype)
             fill_arr.fill(coverage.get_parameter_context(field).fill_value)
             n = np.append(n,fill_arr)
         elif coverage.get_data_extents(field)[0] > coverage.num_timesteps:
             raise CorruptionError('The coverage is corrupted:\n\tfield: %s\n\textents: %s\n\ttimesteps: %s' % (field, coverage.get_data_extents(field), coverage.num_timesteps))
         rdt[field] = np.atleast_1d(n)
     else:
         rdt[field] = [n]
    def _set(self, name, vals):
        """
        Set a parameter
        """
        if name not in self.fields:
            raise KeyError(name)

        if vals is None:
            self._rd[name] = None
            return
        context = self._pdict.get_context(name)

        if self._shp is None and isinstance(context.param_type, (SparseConstantType, ConstantType, ConstantRangeType)):
            self._shp = (1,)
            self._dirty_shape = True

        elif self._shp is None or self._dirty_shape:
            if isinstance(vals, np.ndarray):
                self._shp = (vals.shape[0],)  # Only support 1-d right now
            elif isinstance(vals, list):
                self._shp = (len(vals),)
            else:
                raise BadRequest("No shape was defined")

            log.trace("Set shape to %s", self._shp)
            if self._dirty_shape:
                self._dirty_shape = False
                self._reshape_const()

        else:
            if isinstance(vals, np.ndarray):
                if not vals.shape:
                    raise BadRequest("Invalid shape on input (dimensionless)")
                validate_equal(
                    vals.shape[0], self._shp[0], "Invalid shape on input (%s expecting %s)" % (vals.shape, self._shp)
                )
            elif isinstance(vals, list):
                validate_equal(len(vals), self._shp[0], "Invalid shape on input")

        # paramval = self.get_paramval(context.param_type, dom, vals)
        self._rd[name] = vals
Exemple #21
0
    def _routing_call(self, call, context, *callargs, **callkwargs):
        """
        Endpoints call into here to synchronize across the entire IonProcess.

        Returns immediately with an AsyncResult that can be waited on. Calls
        are made by the loop in _control_flow. We pass in the calling greenlet so
        exceptions are raised in the correct context.

        @param  call        The call to be made within this ION processes' calling greenlet.
        @param  callargs    The keyword args to pass to the call.
        @param  context     Optional process-context (usually the headers of the incoming call) to be
                            set. Process-context is greenlet-local, and since we're crossing greenlet
                            boundaries, we must set it again in the ION process' calling greenlet.
        """
        ar = AsyncResult()

        if len(callargs) == 0 and len(callkwargs) == 0:
            log.trace("_routing_call got no arguments for the call %s, check your call's parameters", call)

        self._ctrl_queue.put((greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
        return ar
    def _set(self, name, vals):
        """
        Set a parameter
        """
        if name not in self.fields:
            raise KeyError(name)

        if vals is None:
            self._rd[name] = None
            return
        context = self._pdict.get_context(name)

        if self._shp is None and (isinstance(context.param_type, ConstantType) or isinstance(context.param_type, ConstantRangeType)):
            self._shp = (1,)
            self._dirty_shape = True
        
        elif self._shp is None or self._dirty_shape:
            if isinstance(vals, np.ndarray):
                self._shp = vals.shape
            elif isinstance(vals, list):
                self._shp = (len(vals),)
            else:
                raise BadRequest('No shape was defined')

            log.trace('Set shape to %s', self._shp)
            if self._dirty_shape:
                self._dirty_shape = False
                self._reshape_const()

        else:
            if isinstance(vals, np.ndarray):
                if not vals.shape:
                    raise BadRequest('Invalid shape on input (dimensionless)')
                validate_equal(vals.shape[0], self._shp[0], 'Invalid shape on input (%s expecting %s)' % (vals.shape, self._shp))
            elif isinstance(vals, list):
                validate_equal(len(vals), self._shp[0], 'Invalid shape on input')

        dom = self.domain
        paramval = self.get_paramval(context.param_type, dom, vals)
        self._rd[name] = paramval
Exemple #23
0
    def publish_event_object(self, event_object):
        """
        Publishes an event of given type for the given origin. Event_type defaults to an
        event_type set when initializing the EventPublisher. Other kwargs fill out the fields
        of the event. This operation will fail with an exception.
        @param event_object     the event object to be published
        @retval event_object    the event object which was published
        """
        assert event_object

        topic = self._topic(event_object)
        to_name = (self._send_name.exchange, topic)
        log.trace("Publishing %s event message %s:%s -> %s", event_object.type_, event_object.origin_type, event_object.origin, to_name)

        current_time = int(get_ion_ts())

        #Ensure valid created timestamp if supplied
        if event_object.ts_created:

            if not is_valid_ts(event_object.ts_created):
                raise BadRequest("The ts_created value is not a valid timestamp: '%s'" % (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) > ( current_time + VALID_EVENT_TIME_PERIOD ):
                raise BadRequest("This ts_created value is too far in the future:'%s'" % (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) < (current_time - VALID_EVENT_TIME_PERIOD) :
                raise BadRequest("This ts_created value is too old:'%s'" % (event_object.ts_created))

        else:
            event_object.ts_created = str(current_time)

        #Validate this object - ideally the validator should pass on problems, but for now just log
        #any errors and keep going, since seeing invalid situations are better than skipping validation.
        try:
            event_object._validate()
        except Exception, e:
            log.exception(e)
Exemple #24
0
    def _publish_event(self, event_msg, origin, event_type=None):
        event_type = event_type or self.event_type or event_msg._get_type()
        assert origin and event_type

        topic = self._topic(event_type, origin, base_types=event_msg.base_types,
            sub_type=event_msg.sub_type, origin_type=event_msg.origin_type)
        to_name = (self._send_name.exchange, topic)
        log.trace("Publishing event message to %s", to_name)

        try:
            self.publish(event_msg, to_name=to_name)
        except Exception as ex:
            log.exception("Failed to publish event '%s'" % (event_msg))
            return False

        try:
            # store published event but only if we specified an event_repo
            if PERSIST_ON_PUBLISH and self.event_repo:
                self.event_repo.put_event(event_msg)
        except Exception as ex:
            log.exception("Failed to store published event '%s'" % (event_msg))
            return False

        return True
Exemple #25
0
    def _routing_call(self, call, context, *callargs, **callkwargs):
        """
        Endpoints call into here to synchronize across the entire IonProcess.

        Returns immediately with an AsyncResult that can be waited on. Calls
        are made by the loop in _control_flow. We pass in the calling greenlet so
        exceptions are raised in the correct context.

        @param  call        The call to be made within this ION processes' calling greenlet.
        @param  callargs    The keyword args to pass to the call.
        @param  context     Optional process-context (usually the headers of the incoming call) to be
                            set. Process-context is greenlet-local, and since we're crossing greenlet
                            boundaries, we must set it again in the ION process' calling greenlet.
        """
        ar = AsyncResult()

        if len(callargs) == 0 and len(callkwargs) == 0:
            log.trace(
                "_routing_call got no arguments for the call %s, check your call's parameters",
                call)

        self._ctrl_queue.put(
            (greenlet.getcurrent(), ar, call, callargs, callkwargs, context))
        return ar
Exemple #26
0
    def _publish_event(self, event_msg, origin, event_type=None):
        event_type = event_type or self.event_type or event_msg._get_type()
        assert origin and event_type

        topic = self._topic(event_type, origin, base_types=event_msg.base_types,
            sub_type=event_msg.sub_type, origin_type=event_msg.origin_type)
        to_name = (self._send_name.exchange, topic)
        log.trace("Publishing event message to %s", to_name)

        try:
            self.publish(event_msg, to_name=to_name)
        except Exception as ex:
            log.exception("Failed to publish event '%s'" % (event_msg))
            return False

        try:
            # store published event but only if we specified an event_repo
            if PERSIST_ON_PUBLISH and self.event_repo:
                self.event_repo.put_event(event_msg)
        except Exception as ex:
            log.exception("Failed to store published event '%s'" % (event_msg))
            return False

        return True
    def outgoing(self, invocation):

        if invocation.args.has_key("process"):
            log.trace(
                "ConversationMonitorInterceptor.outgoing: %s" % invocation.get_arg_value("process", invocation).name
            )
        else:
            log.trace("ConversationMonitorInterceptor.outgoing: %s" % invocation)

        invocation.message_annotations[
            GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION
        ] = GovernanceDispatcher.STATUS_STARTED

        conv_msg_type = invocation.headers.get("conv-msg-type", None)
        self_principal = invocation.headers.get("sender-role", None)  # TODO - should these be set to default values?
        target_principal = invocation.headers.get("receiver-role", None)
        op_type = LocalType.SEND

        if conv_msg_type and self_principal and target_principal:
            #    target_principal = invocation.get_message_receiver()
            #    op_type = LocalType.SEND;
            self._check(invocation, op_type, self_principal, target_principal)
            if (
                invocation.message_annotations[GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION]
                == GovernanceDispatcher.STATUS_STARTED
            ):
                invocation.message_annotations[
                    GovernanceDispatcher.CONVERSATION__STATUS_ANNOTATION
                ] = GovernanceDispatcher.STATUS_COMPLETE
        else:
            self._report_error(
                invocation,
                GovernanceDispatcher.STATUS_SKIPPED,
                "The message cannot be monitored since the conversation roles are not in the headers",
            )
        return invocation
Exemple #28
0
 def put_event(self, event):
     log.trace("Store event persistently %s", event)
     if not isinstance(event, Event):
         raise BadRequest("event must be type Event, not %s" % type(event))
     return self.event_store.create(event)
Exemple #29
0
 def get_event(self, event_id):
     log.trace("Retrieving persistent event for id=%s", event_id)
     event_obj = self.event_store.read(event_id)
     return event_obj
Exemple #30
0
    def incoming(self, invocation):

        log.trace("PolicyInterceptor.incoming: %s", invocation.get_arg_value('process', invocation))

        #print "========"
        #print invocation.headers

        #If missing the performative header, consider it as a failure message.
        msg_performative = invocation.get_header_value('performative', 'failure')
        message_format = invocation.get_header_value('format', '')
        op = invocation.get_header_value('op', 'unknown')
        process_type = invocation.get_invocation_process_type()
        sender, sender_type = invocation.get_message_sender()

        #TODO - This should be removed once better process security is implemented
        #THis fix infers that all messages that do not specify an actor id are TRUSTED wihtin the system
        policy_loaded = CFG.get_safe('system.load_policy', False)
        if policy_loaded:
            actor_id = invocation.get_header_value('ion-actor-id', None)
        else:
            actor_id = invocation.get_header_value('ion-actor-id', 'anonymous')

        #Only check messages marked as the initial rpc request - TODO - remove the actor_id is not None when headless process have actor_ids
        if msg_performative == 'request' and actor_id is not None:

            receiver = invocation.get_message_receiver()

            #Can's check policy if the controller is not initialized
            if self.governance_controller is None:
                log.debug("Skipping policy check for %s(%s) since governance_controller is None", receiver, op)
                invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_SKIPPED
                return invocation

            #No need to check for requests from the system actor - should increase performance during startup
            if actor_id == self.governance_controller.system_actor_id:
                log.debug("Skipping policy check for %s(%s) for the system actor", receiver, op)
                invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_SKIPPED
                return invocation


            #Check to see if there is a AlwaysVerifyPolicy decorator
            always_verify_policy = False
            if is_ion_object(message_format):
                try:
                    msg_class = message_classes[message_format]
                    always_verify_policy = has_class_decorator(msg_class,'AlwaysVerifyPolicy')
                except Exception:
                    pass

            #For services only - if this is a sub RPC request from a higher level service that has already been validated and set a token
            #then skip checking policy yet again - should help with performance and to simplify policy
            #All calls from the RMS must be checked
            if not always_verify_policy and process_type == 'service' and sender != 'resource_management' and self.has_valid_token(invocation, PERMIT_SUB_CALLS):
                log.debug("Skipping policy check for service call %s %s since token is valid", receiver, op)
                #print "skipping call to " + receiver + " " + op + " from " + actor_id + " process_type: " + process_type
                invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_SKIPPED
                return invocation

            log.debug("Checking request for %s: %s(%s) from %s  ", process_type, receiver, op, actor_id)

            #Annotate the message has started policy checking
            invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_STARTED

            ret = None

            #First check for Org boundary policies if the container is configured as such
            org_id = self.governance_controller.get_container_org_boundary_id()
            if org_id is not None:
                ret = self.governance_controller.policy_decision_point_manager.check_resource_request_policies(invocation, org_id)

            if str(ret) != Decision.DENY_STR:
                #Next check endpoint process specific policies
                if process_type == 'agent':
                    ret = self.governance_controller.policy_decision_point_manager.check_agent_request_policies(invocation)

                elif process_type == 'service':
                    ret = self.governance_controller.policy_decision_point_manager.check_service_request_policies(invocation)

            log.debug("Policy Decision: %s", ret)

            #Annonate the message has completed policy checking
            invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_COMPLETE

            if ret is not None:
                if str(ret) == Decision.DENY_STR:
                    self.annotate_denied_message(invocation)
                else:
                    self.permit_sub_rpc_calls_token(invocation)

        else:
            invocation.message_annotations[GovernanceDispatcher.POLICY__STATUS_ANNOTATION] = GovernanceDispatcher.STATUS_SKIPPED

        return invocation
Exemple #31
0
 def put_event(self, event):
     log.trace("Store event persistently %s", event)
     if not isinstance(event, Event):
         raise BadRequest("event must be type Event, not %s" % type(event))
     event_id = event.__dict__.pop("_id", None)
     return self.event_store.create(event, event_id)
Exemple #32
0
    def publish_event_object(self, event_object):
        """
        Publishes an event of given type for the given origin. Event_type defaults to an
        event_type set when initializing the EventPublisher. Other kwargs fill out the fields
        of the event. This operation will fail with an exception.
        @param event_object     the event object to be published
        @retval event_object    the event object which was published
        """
        assert event_object

        topic = self._topic(event_object)
        to_name = (self._send_name.exchange, topic)
        log.trace("Publishing event message to %s", to_name)

        current_time = int(get_ion_ts())

        #Ensure valid created timestamp if supplied
        if event_object.ts_created:

            if not is_valid_ts(event_object.ts_created):
                raise BadRequest(
                    "The ts_created value is not a valid timestamp: '%s'" %
                    (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) > (current_time +
                                               VALID_EVENT_TIME_PERIOD):
                raise BadRequest(
                    "This ts_created value is too far in the future:'%s'" %
                    (event_object.ts_created))

            #Reject events that are older than specified time
            if int(event_object.ts_created) < (current_time -
                                               VALID_EVENT_TIME_PERIOD):
                raise BadRequest("This ts_created value is too old:'%s'" %
                                 (event_object.ts_created))

        else:
            event_object.ts_created = str(current_time)

        #Validate this object
        #TODO - enable this once the resource agent issue sending a dict is figured out
        #event_object._validate()

        #Ensure the event object has a unique id
        if '_id' in event_object:
            raise BadRequest(
                "The event object cannot contain a _id field '%s'" %
                (event_object))

        #Generate a unique ID for this event
        event_object._id = create_unique_event_id()

        try:
            self.publish(event_object, to_name=to_name)
        except Exception as ex:
            log.exception("Failed to publish event (%s): '%s'" %
                          (ex.message, event_object))
            raise

        try:
            # store published event but only if we specified an event_repo
            if PERSIST_ON_PUBLISH and self.event_repo:
                self.event_repo.put_event(event_object)
        except Exception as ex:
            log.exception("Failed to store published event (%s): '%s'" %
                          (ex.message, event_object))
            raise

        return event_object
Exemple #33
0
 def get_event(self, event_id):
     log.trace("Retrieving persistent event for id=%s", event_id)
     event_obj = self.event_store.read(event_id)
     return event_obj
Exemple #34
0
 def put_event(self, event):
     log.trace("Store event persistently %s", event)
     if not isinstance(event, Event):
         raise BadRequest("event must be type Event, not %s" % type(event))
     return self.event_store.create(event)