def _expect_from_root(self, p_root):
        """
        Start an event subscriber to the given root platform.
        To be called before any action that triggers publications from the root
        platform. It sets self._wait_root_event to a function to be called to
        wait for the event.
        """
        async_result = AsyncResult()

        # subscribe:
        event_type = "DeviceAggregateStatusEvent"

        def consume_event(evt, *args, **kwargs):
            async_result.set(evt)

        sub = EventSubscriber(event_type=event_type,
                              origin=p_root.platform_device_id,
                              callback=consume_event)

        sub.start()
        self._data_subscribers.append(sub)
        sub._ready_event.wait(timeout=CFG.endpoint.receive.timeout)

        log.debug("registered for DeviceAggregateStatusEvent")

        # set new wait function:
        def wait():
            root_evt = async_result.get(timeout=CFG.endpoint.receive.timeout)
            return root_evt

        self._wait_root_event = wait
Example #2
0
    def create_mpl_graphs_data_process_definition(self):

        #First look to see if it exists and if not, then create it
        dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='mpl_graphs_transform')
        if len(dpd) > 0:
            return dpd[0]

        #Data Process Definition
        log.debug("Create data process definition MatplotlibGraphsTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
            name='mpl_graphs_transform',
            description='Convert data streams to Matplotlib graphs',
            module='ion.processes.data.transforms.viz.matplotlib_graphs',
            class_name='VizTransformMatplotlibGraphs')
        try:
            procdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except Exception as ex:
            self.fail("failed to create new VizTransformMatplotlibGraphs data process definition: %s" %ex)


        pdict_id = self.datasetclient.read_parameter_dictionary_by_name('graph_image_param_dict',id_only=True)
        # create a stream definition for the data
        stream_def_id = self.pubsubclient.create_stream_definition(name='VizTransformMatplotlibGraphs', parameter_dictionary_id=pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(stream_def_id, procdef_id, binding='graph_image_param_dict' )

        return procdef_id
Example #3
0
    def create_resources_snapshot(self, persist=False, filename=None):
        ds = CouchDataStore(DataStore.DS_RESOURCES, profile=DataStore.DS_PROFILE.RESOURCES, config=CFG, scope=self.sysname)
        all_objs = ds.find_docs_by_view("_all_docs", None, id_only=False)

        log.info("Found %s objects in datastore resources", len(all_objs))

        resources = {}
        associations = {}
        snapshot = dict(resources=resources, associations=associations)

        for obj_id, key, obj in all_objs:
            if obj_id.startswith("_design"):
                continue
            if not isinstance(obj, dict):
                raise Inconsistent("Object of bad type found: %s" % type(obj))
            obj_type = obj.get("type_", None)
            if obj_type == "Association":
                associations[obj_id] = obj.get("ts", None)
            elif obj_type:
                resources[obj_id] = obj.get("ts_updated", None)
            else:
                raise Inconsistent("Object with no type_ found: %s" % obj)

        if persist:
            dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
            path = filename or "interface/rrsnapshot_%s.json" % dtstr
            snapshot_json = json.dumps(snapshot)
            with open(path, "w") as f:
                #yaml.dump(snapshot, f, default_flow_style=False)
                f.write(snapshot_json)

        log.debug("Created resource registry snapshot. %s resources, %s associations", len(resources), len(associations))

        return snapshot
Example #4
0
 def consume_event(self, evt, *args, **kwargs):
     """
     Test callback for events.
     """
     log.debug('Test got event: %s, args: %s, kwargs: %s',
               str(evt), str(args), str(kwargs))
     
     if evt.type_ == 'PublicPlatformTelemetryEvent':
         self._telem_evts.append(evt)
         if self._no_telem_evts > 0 and self._no_telem_evts == len(self._telem_evts):
                 self._done_telem_evt.set()
                 
     elif evt.type_ == 'RemoteQueueModifiedEvent':
         self._queue_mod_evts.append(evt)
         if self._no_queue_mod_evts > 0 and self._no_queue_mod_evts == len(self._queue_mod_evts):
                 self._done_queue_mod_evt.set()
         
     elif evt.type_ == 'RemoteCommandTransmittedEvent':
         self._cmd_tx_evts.append(evt)
         if self._no_cmd_tx_evts > 0 and self._no_cmd_tx_evts == len(self._cmd_tx_evts):
                 self._done_cmd_tx_evt.set()
     
     elif evt.type_ == 'RemoteCommandResult':
         cmd = evt.command
         self._results_recv[cmd.command_id] = cmd
         if len(self._results_recv) == self._no_requests:
             self._done_cmd_evt.set()
    def _check_computed_attributes_of_extended_product(self, expected_data_product_id = '', extended_data_product = None):

        self.assertEqual(expected_data_product_id, extended_data_product._id)
        log.debug("extended_data_product.computed: %s", extended_data_product.computed)

        # Verify that computed attributes exist for the extended instrument
        self.assertIsInstance(extended_data_product.computed.product_download_size_estimated, ComputedFloatValue)
        self.assertIsInstance(extended_data_product.computed.number_active_subscriptions, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_url, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.stored_data_size, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.recent_granules, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.parameters, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.recent_events, ComputedEventListValue)

        self.assertIsInstance(extended_data_product.computed.provenance, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.user_notification_requests, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.active_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.past_user_subscriptions, ComputedListValue)
        self.assertIsInstance(extended_data_product.computed.last_granule, ComputedDictValue)
        self.assertIsInstance(extended_data_product.computed.is_persisted, ComputedIntValue)
        self.assertIsInstance(extended_data_product.computed.data_contents_updated, ComputedStringValue)
        self.assertIsInstance(extended_data_product.computed.data_datetime, ComputedListValue)

        # exact text here keeps changing to fit UI capabilities.  keep assertion general...
        self.assertEqual( 2, len(extended_data_product.computed.data_datetime.value) )

        notifications = extended_data_product.computed.user_notification_requests.value

        notification = notifications[0]
        self.assertEqual(expected_data_product_id, notification.origin)
        self.assertEqual("data product", notification.origin_type)
        self.assertEqual('DetectionEvent', notification.event_type)
    def handle_attribute_value_event(self, driver_event):
        if log.isEnabledFor(logging.TRACE):  # pragma: no cover
            # show driver_event as retrieved (driver_event.vals_dict might be large)
            log.trace("%r: driver_event = %s", self._platform_id, driver_event)
            log.trace("%r: vals_dict:\n%s",
                      self._platform_id, self._pp.pformat(driver_event.vals_dict))

        elif log.isEnabledFor(logging.DEBUG):  # pragma: no cover
            log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())

        stream_name = driver_event.stream_name

        publisher = self._data_publishers.get(stream_name, None)
        if not publisher:
            log.warn('%r: no publisher configured for stream_name=%r. '
                     'Configured streams are: %s',
                     self._platform_id, stream_name, self._data_publishers.keys())
            return

        param_dict = self._param_dicts[stream_name]
        stream_def = self._stream_defs[stream_name]

        if isinstance(stream_def, str):
            rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
                                       stream_definition_id=stream_def)
        else:
            rdt = RecordDictionaryTool(stream_definition=stream_def)

        self._publish_granule_with_multiple_params(publisher, driver_event,
                                                   param_dict, rdt)
    def _construct_stream_and_publisher(self, stream_name, stream_config):

        if log.isEnabledFor(logging.TRACE):  # pragma: no cover
            log.trace("%r: _construct_stream_and_publisher: "
                      "stream_name:%r, stream_config:\n%s",
                      self._platform_id, stream_name,
                      self._pp.pformat(stream_config))

        decoder = IonObjectDeserializer(obj_registry=get_obj_registry())

        if 'stream_def_dict' not in stream_config:
            # should not happen: PlatformAgent._validate_configuration validates this.
            log.error("'stream_def_dict' key not in configuration for stream %r" % stream_name)
            return

        stream_def_dict = stream_config['stream_def_dict']
        stream_def_dict['type_'] = 'StreamDefinition'
        stream_def_obj = decoder.deserialize(stream_def_dict)
        self._stream_defs[stream_name] = stream_def_obj

        routing_key           = stream_config['routing_key']
        stream_id             = stream_config['stream_id']
        exchange_point        = stream_config['exchange_point']
        parameter_dictionary  = stream_def_dict['parameter_dictionary']
        log.debug("%r: got parameter_dictionary from stream_def_dict", self._platform_id)

        self._data_streams[stream_name] = stream_id
        self._param_dicts[stream_name] = ParameterDictionary.load(parameter_dictionary)
        stream_route = StreamRoute(exchange_point=exchange_point, routing_key=routing_key)
        publisher = self._create_publisher(stream_id, stream_route)
        self._data_publishers[stream_name] = publisher

        log.debug("%r: created publisher for stream_name=%r", self._platform_id, stream_name)
    def _create_notification(self, user_name = '', instrument_id='', product_id=''):
        #--------------------------------------------------------------------------------------
        # Make notification request objects
        #--------------------------------------------------------------------------------------

        notification_request_1 = NotificationRequest(   name= 'notification_1',
            origin=instrument_id,
            origin_type="instrument",
            event_type='ResourceLifecycleEvent')

        notification_request_2 = NotificationRequest(   name='notification_2',
            origin=product_id,
            origin_type="data product",
            event_type='DetectionEvent')

        #--------------------------------------------------------------------------------------
        # Create a user and get the user_id
        #--------------------------------------------------------------------------------------

        user = UserInfo()
        user.name = user_name
        user.contact.email = '*****@*****.**' % user_name

        user_id, _ = self.rrclient.create(user)

        #--------------------------------------------------------------------------------------
        # Create notification
        #--------------------------------------------------------------------------------------

        self.usernotificationclient.create_notification(notification=notification_request_1, user_id=user_id)
        self.usernotificationclient.create_notification(notification=notification_request_2, user_id=user_id)
        log.debug( "test_activateInstrumentSample: create_user_notifications user_id %s", str(user_id) )

        return user_id
    def go_active(self):
        """
        Main task here is to determine the topology of platforms
        rooted here then assigning the corresponding definition to self._nnode.

        @raise PlatformConnectionException
        """

        # NOTE: The following log.debug DOES NOT show up when running a test
        # with the pycc plugin (--with-pycc)!  (noticed with test_oms_launch).
        log.debug("%r: going active..", self._platform_id)

        # note, we ping the OMS here regardless of the source for the network
        # definition:
        self.ping()

        if self._topology:
            self._nnode = self._build_network_definition_using_topology()
        else:
            self._nnode = self._build_network_definition_using_oms()

        log.debug("%r: go_active completed ok. _nnode:\n%s",
                 self._platform_id, self._nnode.dump())

        self.__gen_diagram()
    def _publish_to_transform(self, stream_id = '', stream_route = None):

        pub = StandaloneStreamPublisher(stream_id, stream_route)
        publish_granule = self._get_new_ctd_L0_packet(stream_definition_id=self.in_stream_def_id_for_L0, length = 5)
        pub.publish(publish_granule)

        log.debug("Published the following granule: %s", publish_granule)
    def _construct_fsm(self, states=PlatformDriverState,
                       events=PlatformDriverEvent,
                       enter_event=PlatformDriverEvent.ENTER,
                       exit_event=PlatformDriverEvent.EXIT):
        """
        Constructs the FSM for the driver. The preparations here are mostly
        related with the UNCONFIGURED, DISCONNECTED, and CONNECTED state
        transitions, with some common handlers for the CONNECTED state.
        Subclasses can override to indicate specific parameters and add new
        handlers (typically for the CONNECTED state).
        """
        log.debug("constructing base platform driver FSM")

        self._fsm = ThreadSafeFSM(states, events, enter_event, exit_event)

        for state in PlatformDriverState.list():
            self._fsm.add_handler(state, enter_event, self._common_state_enter)
            self._fsm.add_handler(state, exit_event, self._common_state_exit)

        # UNCONFIGURED state event handlers:
        self._fsm.add_handler(PlatformDriverState.UNCONFIGURED, PlatformDriverEvent.CONFIGURE, self._handler_unconfigured_configure)

        # DISCONNECTED state event handlers:
        self._fsm.add_handler(PlatformDriverState.DISCONNECTED, PlatformDriverEvent.CONNECT, self._handler_disconnected_connect)
        self._fsm.add_handler(PlatformDriverState.DISCONNECTED, PlatformDriverEvent.DISCONNECT, self._handler_disconnected_disconnect)

        # CONNECTED state event handlers:
        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.DISCONNECT, self._handler_connected_disconnect)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.CONNECTION_LOST, self._handler_connected_connection_lost)

        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.PING, self._handler_connected_ping)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.GET, self._handler_connected_get)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.SET, self._handler_connected_set)
        self._fsm.add_handler(PlatformDriverState.CONNECTED, PlatformDriverEvent.EXECUTE, self._handler_connected_execute)
    def __init__(self, agent):
        self._agent = agent

        self._platform_id = agent._platform_id
        self.resource_id  = agent.resource_id
        self._pp          = agent._pp
        self.CFG          = agent.CFG

        # Dictionaries used for data publishing.
        self._data_streams = {}
        self._param_dicts = {}
        self._stream_defs = {}
        self._data_publishers = {}

        self._connection_ID = None
        self._connection_index = {}

        # Set of parameter names received in event notification but not
        # configured. Allows to log corresponding warning only once.
        self._unconfigured_params = set()

        stream_info = self.CFG.get('stream_config', None)
        if stream_info is None:
            # should not happen: PlatformAgent._validate_configuration validates this.
            log.error("%r: No stream_config given in CFG", self._platform_id)
            return

        for stream_name, stream_config in stream_info.iteritems():
            self._construct_stream_and_publisher(stream_name, stream_config)

        log.debug("%r: PlatformAgentStreamPublisher complete", self._platform_id)
    def _create_interval_timer_with_end_time(self,timer_interval= None, end_time = None ):
        '''
        A convenience method to set up an interval timer with an end time
        '''
        self.timer_received_time = 0
        self.timer_interval = timer_interval

        start_time = self.now_utc()
        if not end_time:
            end_time = start_time + 2 * timer_interval + 1

        log.debug("got the end time here!! %s" % end_time)

        # Set up the interval timer. The scheduler will publish event with origin set as "Interval Timer"
        sid = self.ssclient.create_interval_timer(start_time="now" ,
            interval=self.timer_interval,
            end_time=end_time,
            event_origin="Interval Timer",
            event_subtype="")

        def cleanup_timer(scheduler, schedule_id):
            """
            Do a friendly cancel of the scheduled event.
            If it fails, it's ok.
            """
            try:
                scheduler.cancel_timer(schedule_id)
            except:
                log.warn("Couldn't cancel")

        self.addCleanup(cleanup_timer, self.ssclient, sid)

        return sid
    def _stop_pending_timers(self):
        """
        Safely stops all pending and active timers.

        For all timers still waiting to run, calls kill on them. For active timers, let
        them exit naturally and prevent the reschedule by setting the _no_reschedule flag.
        """
        # prevent reschedules
        self._no_reschedule = True

        gls = []
        for timer_id in self.schedule_entries:
            spawns = self.__get_spawns(timer_id)

            for spawn in spawns:
                gls.append(spawn)
                # only kill spawns that haven't started yet
                if spawn._start_event is not None:
                    spawn.kill()

            log.debug("_stop_pending_timers: timer %s deleted", timer_id)

        self.schedule_entries.clear()

        # wait for running gls to finish up
        gevent.joinall(gls, timeout=10)

        # allow reschedules from here on out
        self._no_reschedule = False
Example #15
0
    def calculate_conductivity(self, l0, l1):
        CONDWAT_L0 = l0['conductivity']
        TEMPWAT_L1 = l1['temp']
        PRESWAT_L1 = l1['pressure']

        #------------  CALIBRATION COEFFICIENTS FOR CONDUCTIVITY  --------------
        g = self.cond_calibration_coeffs['G']
        h = self.cond_calibration_coeffs['H']
        I = self.cond_calibration_coeffs['I']
        j = self.cond_calibration_coeffs['J']
        CTcor = self.cond_calibration_coeffs['CTCOR']
        CPcor = self.cond_calibration_coeffs['CPCOR']

        log.debug('g %e, h %e, i %e, j %e, CTcor %e, CPcor %e', g, h, I, j, CTcor, CPcor)

        #------------  Computation -------------------------------------
        freq = (CONDWAT_L0 / 256000.0)
        numerator = (g + h * freq**2 + I * freq**3 + j * freq**4)
        denominator = (1 + CTcor * TEMPWAT_L1 + CPcor * PRESWAT_L1)
        log.debug('freq %s, cond = %s / %s', freq, numerator, denominator)
        CONDWAT_L1 = numerator / denominator
#        CONDWAT_L1 = (g + h * freq**2 + I * freq**3 + j * freq**4) / (1 + CTcor * TEMPWAT_L1 + CPcor * PRESWAT_L1)

        #------------------------------------------------------------------------
        # Update the conductivity values
        #------------------------------------------------------------------------
        return CONDWAT_L1
    def _get_computed_events(self, events, add_usernames=True, include_events=False):
        """
        Get events for use in extended resource computed attribute
        @retval ComputedListValue with value list of 4-tuple with Event objects
        """
        events = events or []

        ret = IonObject(OT.ComputedEventListValue)
        ret.value = events
        ret.computed_list = [get_event_computed_attributes(event, include_event=include_events) for event in events]
        ret.status = ComputedValueAvailability.PROVIDED

        if add_usernames:
            try:
                actor_ids = {evt.actor_id for evt in events if evt.actor_id}
                log.debug("Looking up UserInfo for actors: %s" % actor_ids)
                if actor_ids:
                    userinfo_list, assoc_list = self.clients.resource_registry.find_objects_mult(actor_ids,
                                                                                                 predicate=PRED.hasInfo,
                                                                                                 id_only=False)
                    actor_map = {assoc.s: uinfo for uinfo, assoc in zip(userinfo_list, assoc_list)}

                    for evt, evt_cmp in zip(events, ret.computed_list):
                        ui = actor_map.get(evt.actor_id, None)
                        if ui:
                            evt_cmp["event_summary"] += " [%s %s]" % (ui.contact.individual_names_given, ui.contact.individual_name_family)

            except Exception as ex:
                log.exception("Cannot find user names for event actor_ids")

        return ret
    def connect(self, recursion=None):
        """
        Creates an CIOMSClient instance, does a ping to verify connection,
        and starts event dispatch.
        """
        # create CIOMSClient:
        oms_uri = self._driver_config['oms_uri']
        log.debug("%r: creating CIOMSClient instance with oms_uri=%r",
                  self._platform_id, oms_uri)
        self._rsn_oms = CIOMSClientFactory.create_instance(oms_uri)
        log.debug("%r: CIOMSClient instance created: %s",
                  self._platform_id, self._rsn_oms)

        # ping to verify connection:
        self.ping()

        # start event dispatch:
        self._start_event_dispatch()

        # TODO(OOIION-1495) review the following. Commented out for the moment.
        # Note, per the CI-OMS spec ports need to be turned OFF to then proceed
        # with connecting instruments. So we need to determine whether we
        # want to turn all ports ON in this "connect driver" operation,
        # and then add the logic to turn a port OFF before connecting
        # instruments, and then ON again; or, just do the OFF/ON logic in the
        # connect_instrument and disconnect_instrument operations,
        # but not here.
        """
Example #18
0
def test_plot_1():
    from coverage_model.test.examples import SimplexCoverage
    import matplotlib.pyplot as plt

    cov=SimplexCoverage.load('test_data/usgs.cov')

    log.debug('Plot the \'water_temperature\' and \'streamflow\' for all times')
    wtemp = cov.get_parameter_values('water_temperature')
    wtemp_pc = cov.get_parameter_context('water_temperature')
    sflow = cov.get_parameter_values('streamflow')
    sflow_pc = cov.get_parameter_context('streamflow')
    times = cov.get_parameter_values('time')
    time_pc = cov.get_parameter_context('time')

    fig = plt.figure()
    ax1 = fig.add_subplot(2,1,1)
    ax1.plot(times,wtemp)
    ax1.set_xlabel('{0} ({1})'.format(time_pc.name, time_pc.uom))
    ax1.set_ylabel('{0} ({1})'.format(wtemp_pc.name, wtemp_pc.uom))

    ax2 = fig.add_subplot(2,1,2)
    ax2.plot(times,sflow)
    ax2.set_xlabel('{0} ({1})'.format(time_pc.name, time_pc.uom))
    ax2.set_ylabel('{0} ({1})'.format(sflow_pc.name, sflow_pc.uom))

    plt.show(0)
    def _build_network_definition_using_topology(self):
        """
        Uses self._topology to build the network definition.
        """
        log.debug("%r: _build_network_definition_using_topology: %s",
            self._platform_id, self._topology)

        def build(platform_id, children):
            """
            Returns the root NNode for the given platform_id with its
            children according to the given list.
            """
            nnode = NNode(platform_id)
            if self._agent_device_map:
                self._set_attributes_and_ports_from_agent_device_map(nnode)

            log.debug('Created NNode for %r', platform_id)

            for subplatform_id in children:
                subplatform_children = self._topology.get(subplatform_id, [])
                sub_nnode = build(subplatform_id, subplatform_children)
                nnode.add_subplatform(sub_nnode)

            return nnode

        children = self._topology.get(self._platform_id, [])
        return build(self._platform_id, children)
    def __init__(self, platform_id, driver_config, parent_platform_id=None):
        """
        Creates an OmsPlatformDriver instance.

        @param platform_id Corresponding platform ID
        @param driver_config with required 'oms_uri' entry.
        @param parent_platform_id Platform ID of my parent, if any.
                    This is mainly used for diagnostic purposes
        """
        PlatformDriver.__init__(self, platform_id, driver_config, parent_platform_id)

        if not 'oms_uri' in driver_config:
            raise PlatformDriverException(msg="driver_config does not indicate 'oms_uri'")

        oms_uri = driver_config['oms_uri']
        log.debug("%r: creating OmsClient instance with oms_uri=%r",
            self._platform_id, oms_uri)
        self._oms = OmsClientFactory.create_instance(oms_uri)
        log.debug("%r: OmsClient instance created: %s",
            self._platform_id, self._oms)

        # TODO set-up configuration for notification of events associated
        # with values retrieved during platform resource monitoring

        # _monitors: dict { attr_id: OmsResourceMonitor }
        self._monitors = {}

        # we can instantiate this here as the the actual http server is
        # started via corresponding method.
        self._event_listener = OmsEventListener(self._notify_driver_event)
 def _get_ports_using_agent_device_map(self):
     ports = {}
     for port_id, port in self._nnode.ports.iteritems():
         ports[port_id] = {'comms': port.comms, 'attrs': port.attrs}
     log.debug("%r: _get_ports_using_agent_device_map: %s",
           self._platform_id, ports)
     return ports
    def _check_granule_from_L2_salinity_transform(self, ar = None):

        granule_from_transform = ar.get(timeout=20)
        log.debug("Got the following granule from the L2 transform: %s", granule_from_transform)

        # Check the algorithm being applied
        self._check_application_of_L2_salinity_algorithm(granule_from_transform)
Example #23
0
 def enqueue(self, msg):
     """
     Enqueue message for transmission so server.
     """
     log.debug('Client enqueueing message: %s.', msg)
     self._queue.append(msg)
     return len(self._queue)
    def validate_output_granule(self, msg, route, stream_id):
        self.assertIn( stream_id, self._output_stream_ids)

        rdt = RecordDictionaryTool.load_from_granule(msg)
        log.debug('validate_output_granule  rdt: %s', rdt)
        sal_val = rdt['salinity']
        np.testing.assert_array_equal(sal_val, np.array([3]))
Example #25
0
    def _extract_granule_data(self, granules):
        """
        Pull all data out of all granules and return a dict of values
        """
        result = []
        for granule in granules:
            group = []
            log.debug("Granule: %s", granule)
            rdt = RecordDictionaryTool.load_from_granule(granule)

            # Store the data from each record
            for key, value in rdt.iteritems():
                for i in range(0, len(value)):
                    if len(group) <= i:
                        group.append({})
                    group[i][key] = value[i]

                    # Store the connection information for each record
                    if not 'connection_index' in group[i]:
                        group[i]['connection_index'] = granule.connection_index

                    if not 'connection_id' in group[i]:
                        group[i]['connection_id'] = granule.connection_id



            result += group

        log.debug("extracted granules: %s", pprint.pformat(result))

        return result
    def find_events(self, origin='', type='', min_datetime='', max_datetime='', limit=-1,
                    descending=False, skip=0, computed=False):
        """
        Returns a list of events that match the specified search criteria.
        Can return a list of EventComputedAttributes if requested with event objects contained.
        Pagination arguments are supported.

        @param origin         str
        @param min_datetime   str  milliseconds
        @param max_datetime   str  milliseconds
        @param limit          int         (integer limiting the number of results (0 means unlimited))
        @param descending     boolean     (if True, reverse order (of production time) is applied, e.g. most recent first)
        @retval event_list    []
        """
        if limit == 0:
            limit = int(self.CFG.get_safe("service.user_notification.max_events_limit", 1000))
        if max_datetime == "now":
            max_datetime = get_ion_ts()

        event_tuples = self.container.event_repository.find_events(event_type=type, origin=origin,
                                                                   start_ts=min_datetime, end_ts=max_datetime,
                                                                   limit=limit, descending=descending, skip=skip)

        events = [item[2] for item in event_tuples]
        log.debug("find_events found %s events", len(events))

        if computed:
            computed_events = self._get_computed_events(events, include_events=True)
            events = computed_events.computed_list

        return events
Example #27
0
 def _get_checksum(self, platform_id):
     # get checksum from RSN OMS:
     res = self._rsn_oms.get_checksum(platform_id)
     checksum = res[platform_id]
     if log.isEnabledFor(logging.DEBUG):
         log.debug("_rsn_oms: checksum: %s", checksum)
     return checksum
    def _publish_for_child(self, child_obj, status_name, status):
        """
        Publishes a DeviceAggregateStatusEvent from the given platform or
        instrument object.

        NOTE that we just publish on behalf of the child, but the statuses in
        the child itself are *not* set. This is OK for these tests; we just
        need that child's ancestors to react to the event.
        """

        if 'platform_device_id' in child_obj:
            origin = child_obj.platform_device_id
            origin_type = "PlatformDevice"
        else:
            origin = child_obj.instrument_device_id
            origin_type = "InstrumentDevice"

        # create and publish event from the given origin and type:
        evt = dict(event_type='DeviceAggregateStatusEvent',
                   origin_type=origin_type,
                   origin=origin,
                   description="Fake event for testing",
                   status_name=status_name,
                   status=status)

        log.debug("publishing for child %r: evt=%s", origin, evt)
        self._event_publisher.publish_event(**evt)
    def recv_packet(self, msg, stream_route, stream_id):
        '''
        The consumer callback to parse and manage the granule.
        The message is ACK'd once the function returns
        '''
        log.trace('received granule for stream %s', stream_id)

        if msg == {}:
            log.error('Received empty message from stream: %s', stream_id)
            return
        # Message validation
        if not isinstance(msg, Granule):
            log.error('Ingestion received a message that is not a granule: %s', msg)
            return


        rdt = RecordDictionaryTool.load_from_granule(msg)
        if rdt is None:
            log.error('Invalid granule (no RDT) for stream %s', stream_id)
            return
        if not len(rdt):
            log.debug('Empty granule for stream %s', stream_id)
            return

        self.persist_or_timeout(stream_id, rdt)
Example #30
0
    def create_salinity_doubler_data_process_definition(self):

        #First look to see if it exists and if not, then create it
        dpd,_ = self.rrclient.find_resources(restype=RT.DataProcessDefinition, name='salinity_doubler')
        if len(dpd) > 0:
            return dpd[0]

        # Salinity Doubler: Data Process Definition
        log.debug("Create data process definition SalinityDoublerTransform")
        dpd_obj = IonObject(RT.DataProcessDefinition,
            name='salinity_doubler',
            description='create a salinity doubler data product',
            module='ion.processes.data.transforms.example_double_salinity',
            class_name='SalinityDoubler')
        try:
            salinity_doubler_dprocdef_id = self.dataprocessclient.create_data_process_definition(dpd_obj)
        except Exception as ex:
            self.fail("failed to create new SalinityDoubler data process definition: %s" %ex)


        # create a stream definition for the data from the salinity Transform
        ctd_pdict_id = self.datasetclient.read_parameter_dictionary_by_name('ctd_parsed_param_dict', id_only=True)
        salinity_double_stream_def_id = self.pubsubclient.create_stream_definition(name='SalinityDoubler', parameter_dictionary_id=ctd_pdict_id)
        self.dataprocessclient.assign_stream_definition_to_data_process_definition(salinity_double_stream_def_id, salinity_doubler_dprocdef_id, binding='salinity' )

        return salinity_doubler_dprocdef_id
 def _on_link_up(self):
     """
     Processing on link up event.
     Start client socket.
     ION link availability published when pending commands are transmitted.
     """
     log.debug('%s client connecting to %s:%i',
                 self.__class__.__name__,
                 self._other_host, self._other_port)
     self._client.start(self._other_host, self._other_port)
     self._publisher.publish_event(
                             event_type='PublicPlatformTelemetryEvent',
                             origin=self._xs_name,
                             status=TelemetryStatusType.AVAILABLE)        
Example #32
0
    def _start_remote(self):
        """
        """
        # Create agent config.
        remote_endpoint_config = {
            'other_host': 'localhost',
            'other_port': self._terrestrial_port,
            'this_port': self._remote_port,
            'platform_resource_id': self._remote_platform_id,
            'xs_name': self._xs_name,
            'process': {
                'listen_name': self._remote_listen_name
            }
        }

        # Spawn the remote enpoint process.
        log.debug('Spawning remote endpoint process.')
        self._remote_pid = self._container_client.spawn_process(
            name=self._remote_listen_name,
            module='ion.services.sa.tcaa.remote_endpoint',
            cls='RemoteEndpoint',
            config=remote_endpoint_config)
        log.debug('Remote endpoint pid=%s.', str(self._remote_pid))

        # Create an endpoint client.
        self.re_client = RemoteEndpointClient(process=FakeProcess(),
                                              to_name=self._remote_listen_name)
        log.debug('Got re client %s.', str(self.re_client))

        # Remember the remote port.
        self._remote_port = self.re_client.get_port()
        log.debug('The remote port is: %i.', self._remote_port)
Example #33
0
    def configure(self, driver_config):
        """
        Configures this driver. In this base class it basically
        calls validate_driver_configuration and then assigns the given
        config to self._driver_config.

        @param driver_config Driver configuration.
        """
        if log.isEnabledFor(logging.DEBUG):
            log.debug("%r: configure: %s" %
                      (self._platform_id, str(driver_config)))

        self.validate_driver_configuration(driver_config)
        self._driver_config = driver_config
Example #34
0
    def recv_packet(self, packet,stream_route, stream_id):
        """Processes incoming data!!!!
            @param packet granule
            @param stream_route StreamRoute
            @param stream_id str
        """

        log.debug("CTDBP_L0_all received in the stream, %s, the packet: %s", stream_id, packet)

        if packet == {}:
            return
        granules = ctdbp_L0_algorithm.execute([packet], params=self.params)
        for granule in granules:
            self.L0_stream.publish(msg=granule)
Example #35
0
    def destroy_instance(cls, instance):
        """
        Destroys an instance created with create_instance.
        This is mainly a convenience method to deactivate the simulator when
        run in embedded form.
        """
        cls._inst_count -= 1
        if isinstance(instance, CIOMSSimulator):
            instance._deactivate_simulator()
            log.debug("Embedded CIOMSSimulator instance destroyed")

        # else: nothing needed to do.

        log.debug("destroy_instance: _inst_count = %d", cls._inst_count)
Example #36
0
    def _event_received(self, event_instance):
        log.trace('%r: received event_instance=%s', self._platform_id,
                  event_instance)

        if self._notifications:
            self._notifications.append(event_instance)
        else:
            self._notifications = [event_instance]

        log.debug('%r: notifying event_instance=%s', self._platform_id,
                  event_instance)

        driver_event = ExternalEventDriverEvent(event_instance)
        self._notify_driver_event(driver_event)
Example #37
0
    def _start_terrestrial(self):
        """
        """
        # Create terrestrial config.
        terrestrial_endpoint_config = {
            'other_host': 'localhost',
            'other_port': self._remote_port,
            'this_port': self._terrestrial_port,
            'platform_resource_id': self._terrestrial_platform_id,
            'xs_name': self._xs_name,
            'process': {
                'listen_name': self._terrestrial_listen_name
            }
        }

        # Spawn the terrestrial enpoint process.
        log.debug('Spawning terrestrial endpoint process.')
        self._terrestrial_pid = self._container_client.spawn_process(
            name=self._terrestrial_listen_name,
            module='ion.services.sa.tcaa.terrestrial_endpoint',
            cls='TerrestrialEndpoint',
            config=terrestrial_endpoint_config)
        log.debug('Terrestrial endpoint pid=%s.', str(self._terrestrial_pid))

        # Create a terrestrial client.
        self.te_client = TerrestrialEndpointClient(
            process=FakeProcess(), to_name=self._terrestrial_listen_name)
        log.debug('Got te client %s.', str(self.te_client))
        self._terrestrial_port = self.te_client.get_port()
        log.debug('Terrestrial port is: %i', self._terrestrial_port)
    def create_instance(cls, uri=None):
        """
        Creates an CIOMSClient instance.
        Do not forget to call destroy_instance with the returned object when
        you are done with the instance.

        @param uri URI to connect to the RSN OMS server or simulator.
        If None (the default) the value of the OMS environment variable is used
        as argument. If not defined or if the resulting argument is "embsimulator"
        then an CIOMSSimulator instance is directly created and returned.
        Otherwise, the given argument (or value of the OMS environment variable)
        is used as given to try the connection with the corresponding XML/RPC
        server resolvable by that URI.
        """

        if uri is None:
            uri = os.getenv('OMS', 'embsimulator')

        if "embsimulator" == uri:
            # "embedded" simulator, so instantiate CIOMSSimulator here:
            log.debug("Using embedded CIOMSSimulator instance")
            instance = CIOMSSimulator()
        else:
            log.debug("Creating xmlrpclib.ServerProxy: uri=%s", uri)
            instance = xmlrpclib.ServerProxy(uri, allow_none=True)
            log.debug("Created xmlrpclib.ServerProxy: uri=%s", uri)

        cls._inst_count += 1
        log.debug("create_instance: _inst_count = %d", cls._inst_count)
        return instance
Example #39
0
    def _acquire_sample(cls, config, publisher, unlock_new_data_callback, update_new_data_check_attachment):
        """
        Ensures required keys (such as stream_id) are available from config, configures the publisher and then calls:
             BaseDataHandler._constraints_for_new_request (only if config does not contain 'constraints')
             BaseDataHandler._publish_data passing BaseDataHandler._get_data as a parameter
        @param config Dict containing configuration parameters, may include constraints, formatters, etc
        @param publisher the publisher used to publish data
        @param unlock_new_data_callback BaseDataHandler callback function to allow conditional unlocking of the BaseDataHandler._semaphore
        @param update_new_data_check_attachment classmethod to update the external dataset resources file list attachment
        @throws InstrumentParameterException if the data constraints are not a dictionary
        @retval None
        """
        log.debug('start _acquire_sample: config={0}'.format(config))

        cls._init_acquisition_cycle(config)

        constraints = get_safe(config, 'constraints')
        if not constraints:
            gevent.getcurrent().link(unlock_new_data_callback)
            try:
                constraints = cls._constraints_for_new_request(config)
            except NoNewDataWarning:
                #log.info(nndw.message)
                if get_safe(config, 'TESTING'):
                    #log.debug('Publish TestingFinished event')
                    pub = EventPublisher('DeviceCommonLifecycleEvent')
                    pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')
                return

            if constraints is None:
                raise InstrumentParameterException("Data constraints returned from _constraints_for_new_request cannot be None")
            config['constraints'] = constraints
        elif isinstance(constraints, dict):
            addnl_constr = cls._constraints_for_historical_request(config)
            if not addnl_constr is None and isinstance(addnl_constr, dict):
                constraints.update(addnl_constr)
        else:
            raise InstrumentParameterException('Data constraints must be of type \'dict\':  {0}'.format(constraints))

        cls._publish_data(publisher, cls._get_data(config))

        if 'set_new_data_check' in config:
            update_new_data_check_attachment(config['external_dataset_res_id'], config['set_new_data_check'])

        # Publish a 'TestFinished' event
        if get_safe(config, 'TESTING'):
            #log.debug('Publish TestingFinished event')
            pub = EventPublisher('DeviceCommonLifecycleEvent')
            pub.publish_event(origin='BaseDataHandler._acquire_sample', description='TestingFinished')
Example #40
0
def test_policy(container, process=FakeProcess()):

    org_client = OrgManagementServiceProcessClient(node=container.node,
                                                   process=process)
    ion_org = org_client.find_org()

    id_client = IdentityManagementServiceProcessClient(node=container.node,
                                                       process=process)

    system_actor = id_client.find_actor_identity_by_name(
        name=CFG.system.system_actor)
    log.info('system actor:' + system_actor._id)

    policy_client = PolicyManagementServiceProcessClient(node=container.node,
                                                         process=process)

    header_roles = get_role_message_headers(
        org_client.find_all_roles_by_user(system_actor._id))

    users = org_client.find_enrolled_users(ion_org._id,
                                           headers={
                                               'ion-actor-id':
                                               system_actor._id,
                                               'ion-actor-roles': header_roles
                                           })
    for u in users:
        log.info(str(u))

    user = id_client.find_actor_identity_by_name(
        '/DC=org/DC=cilogon/C=US/O=ProtectNetwork/CN=Roger Unwin A254')
    log.debug('user_id: ' + user._id)

    roles = org_client.find_roles_by_user(ion_org._id, user._id)
    for r in roles:
        log.info('User UserRole: ' + str(r))

    header_roles = get_role_message_headers(
        org_client.find_all_roles_by_user(user._id))

    try:
        org_client.grant_role(ion_org._id,
                              user._id,
                              'INSTRUMENT_OPERATOR',
                              headers={
                                  'ion-actor-id': user._id,
                                  'ion-actor-roles': header_roles
                              })
    except Exception, e:
        log.info('This grant role should be denied:' + e.message)
Example #41
0
    def setUp(self):
        """
        @brief Setup test cases.
        """
        log.debug("InstrumentDriverTestCase setUp")

        # Test to ensure we have initialized our test config
        if not self._test_config.initialized:
            return TestNotInitialized(
                msg=
                "Tests non initialized. Missing InstrumentDriverTestCase.initalize(...)?"
            )

        self.clear_events()
        self.init_comm_config()
Example #42
0
 def _debug_values_retrieved(self, attr_id, values):  # pragma: no cover
     ln = len(values)
     # just show a couple of elements
     arrstr = "["
     if ln <= 3:
         vals = [str(e) for e in values[:ln]]
         arrstr += ", ".join(vals)
     else:
         vals = [str(e) for e in values[:2]]
         last_e = values[-1]
         arrstr += ", ".join(vals)
         arrstr += ", ..., " + str(last_e)
     arrstr += "]"
     log.debug("%r: attr=%r: values retrieved(%s) = %s", self._platform_id,
               attr_id, ln, arrstr)
Example #43
0
    def _process_action(self, action):
        log.debug("PD execute action %s", action)
        action_name, action_asyncres, action_kwargs = action

        action_funcname = "_action_%s" % action_name
        action_func = getattr(self, action_funcname, None)
        if not action_func:
            log.warn("Action function not found")
            return
        try:
            action_res = action_func(action_kwargs)
            action_asyncres.set(action_res)
        except Exception as ex:
            log.exception("Error executing action")
            action_asyncres.set_exception(ex)
Example #44
0
    def on_start(self):
        # Persister thread
        self._persist_greenlet = spawn(self._persister_loop,
                                       self.persist_interval)
        log.debug(
            'EventPersister persist greenlet started in "%s" (interval %s)',
            self.__class__.__name__, self.persist_interval)

        # Event subscription
        self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
                                         callback=self._on_event,
                                         queue_name="event_persister",
                                         auto_delete=False)

        self.event_sub.start()
Example #45
0
    def _get_resource_obj(self, res_id, silent=False):
        """Returns a resource object from one of the memory locations for given preload or internal ID"""
        if self.bulk and res_id in self.bulk_resources:
            return self.bulk_resources[res_id]
        elif res_id in self.resource_objs:
            return self.resource_objs[res_id]
        else:
            # Real ID not alias - reverse lookup
            alias_ids = [alias_id for alias_id,int_id in self.resource_ids.iteritems() if int_id==res_id]
            if alias_ids:
                return self.resource_objs[alias_ids[0]]

        if not silent:
            log.debug("_get_resource_obj(): No object found for '%s'", res_id)
        return None
Example #46
0
    def turn_off_port(self, port_id):
        log.debug("%r: turning off port: port_id=%s", self._platform_id,
                  port_id)

        self._assert_rsn_oms()

        response = self._rsn_oms.turn_off_platform_port(
            self._platform_id, port_id)
        log.debug("%r: turn_off_platform_port response: %s", self._platform_id,
                  response)

        dic_plat = self._verify_platform_id_in_response(response)
        self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform
    def delete_stream(self, stream_id=''):
        '''
        Delete an existing stream.

        @param stream_id The id of the stream.
        @retval success Boolean to indicate successful deletion.
        @throws NotFound when stream doesn't exist.
        @todo Determine if operation was successful for return value.
        '''
        log.debug("Deleting stream id: %s", stream_id)
        stream_obj = self.clients.resource_registry.read(stream_id)
        if stream_obj is None:
            raise NotFound("Stream %s does not exist" % stream_id)

        self.clients.resource_registry.delete(stream_id)
Example #48
0
    def get_connected_instruments(self, port_id):
        log.debug("%r: get_connected_instruments: port_id=%s",
                  self._platform_id, port_id)

        self._assert_rsn_oms()

        response = self._rsn_oms.get_connected_instruments(
            self._platform_id, port_id)
        log.debug("%r: port_id=%r: get_connected_instruments response: %s",
                  self._platform_id, port_id, response)

        dic_plat = self._verify_platform_id_in_response(response)
        port_dic = self._verify_port_id_in_response(port_id, dic_plat)

        return dic_plat  # note: return the dic for the platform
Example #49
0
    def _unregister_event_listener(self, url):
        """
        Unregisters given url for all event types.
        """
        log.debug("%r: unregistering event listener: %s", self._platform_id,
                  url)
        try:
            result = self._rsn_oms.event.unregister_event_listener(url)
        except Exception as e:
            raise PlatformConnectionException(
                msg="%r: Cannot unregister_event_listener: %s" %
                (self._platform_id, e))

        log.debug("%r: unregister_event_listener(%r) => %s", self._platform_id,
                  url, result)
Example #50
0
    def ping(self):
        """
        Verifies communication with external platform returning "PONG" if
        this verification completes OK.

        @retval "PONG" iff all OK.
        @raise PlatformConnectionException Cannot ping external platform or
               got unexpected response.
        """
        log.debug("%r: pinging OMS...", self._platform_id)
        self._assert_rsn_oms()
        try:
            retval = self._rsn_oms.hello.ping()
        except Exception, e:
            raise PlatformConnectionException(msg="Cannot ping %s" % str(e))
Example #51
0
    def init_comm_config(self):
        """
        @brief Create the comm config object by reading the comm_config.yml file.
        """
        log.info("Initialize comm config")
        config_file = self.comm_config_file()

        log.debug(" -- reading comm config from: %s" % config_file)
        if not os.path.exists(config_file):
            raise TestNoCommConfig(
                msg=
                "Missing comm config.  Try running start_driver or switch_driver"
            )

        self.comm_config = CommConfig.get_config_from_file(config_file)
    def read_ingestion_configuration(self, ingestion_configuration_id=''):
        """Get an existing ingestion configuration object.

        @param ingestion_configuration_id    str
        @retval ingestion_configuration    IngestionConfiguration
        @throws NotFound    if ingestion configuration did not exist
        """
        log.debug("Reading ingestion configuration object id: %s",
                  ingestion_configuration_id)
        ingestion_configuration = self.clients.resource_registry.read(
            ingestion_configuration_id)
        if ingestion_configuration is None:
            raise NotFound("Ingestion configuration %s does not exist" %
                           ingestion_configuration_id)
        return ingestion_configuration
    def assertInstrumentAgentState(self, expected_state, timeout=0):
        end_time = time.time() + timeout

        while (True):
            state = self._ia_client.get_agent_state()
            log.debug(
                "assertInstrumentAgentState: IA state = %s, expected state = %s"
                % (state, expected_state))
            if state == expected_state:
                return True
            if time.time() >= end_time:
                self.fail(
                    "assertInstrumentAgentState: IA failed to transition to %s state"
                    % expected_state)
            gevent.sleep(1)
Example #54
0
    def __init__(self, platform_id, rate_secs, attr_defns,
                 get_attribute_values, notify_driver_event):
        """
        Creates a monitor for a specific attribute in a given platform.
        Call start to start the monitoring greenlet.

        @param platform_id Platform ID
        @param rate_secs   Monitoring rate in secs
        @param attr_defns  List of attribute definitions
        @param get_attribute_values
                           Function to retrieve attribute values for the specific
                           platform, to be called like this:
                               get_attribute_values(attr_ids, from_time)
        @param notify_driver_event
                           Callback to notify whenever a value is retrieved.
        """
        log.debug("%r: ResourceMonitor entered. rate_secs=%s, attr_defns=%s",
                  platform_id, rate_secs, attr_defns)

        assert platform_id, "must give a valid platform ID"

        self._get_attribute_values = get_attribute_values
        self._platform_id = platform_id
        self._rate_secs = rate_secs
        self._attr_defns = attr_defns
        self._notify_driver_event = notify_driver_event

        # corresponding attribute IDs to be retrieved and "ION System time"
        # compliant timestamp of last retrieved value for each attribute:
        self._attr_ids = []
        self._last_ts = {}
        for attr_defn in self._attr_defns:
            if 'attr_id' in attr_defn:
                attr_id = attr_defn['attr_id']
                self._attr_ids.append(attr_id)
                self._last_ts[attr_id] = None
            else:
                log.warn(
                    "%r: 'attr_id' key expected in attribute definition: %s",
                    self._platform_id, attr_defn)

        self._active = False

        # for debugging purposes
        self._pp = pprint.PrettyPrinter()

        log.debug("%r: ResourceMonitor created. rate_secs=%s, attr_ids=%s",
                  platform_id, rate_secs, self._attr_ids)
Example #55
0
    def delete_transform(self, transform_id=''):
        """Deletes and stops an existing transform process
        @param transform_id The unique transform identifier
        @throws NotFound when a transform doesn't exist
        """

        # get the transform resource (also verifies it's existence before continuing)
        transform_res = self.read_transform(transform_id=transform_id)
        pid = transform_res.process_id

        # get the resources
        process_definition_ids, _ = self.clients.resource_registry.find_objects(
            transform_id, PRED.hasProcessDefinition, RT.ProcessDefinition,
            True)
        in_subscription_ids, _ = self.clients.resource_registry.find_objects(
            transform_id, PRED.hasSubscription, RT.Subscription, True)
        out_stream_ids, _ = self.clients.resource_registry.find_objects(
            transform_id, PRED.hasOutStream, RT.Stream, True)

        # build a list of all the ids above
        id_list = process_definition_ids + in_subscription_ids + out_stream_ids

        # stop the transform process

        #@note: terminate_process does not raise or confirm if there termination was successful or not

        self.clients.process_dispatcher.cancel_process(pid)

        log.debug('(%s): Terminated Process (%s)' % (self.name, pid))

        # delete the associations
        for predicate in [
                PRED.hasProcessDefinition, PRED.hasSubscription,
                PRED.hasOutStream
        ]:
            associations = self.clients.resource_registry.find_associations(
                transform_id, predicate)
            for association in associations:
                self.clients.resource_registry.delete_association(association)

        #@todo: should I delete the resources, or should dpms?

        # iterate through the list and delete each
        #for res_id in id_list:
        #    self.clients.resource_registry.delete(res_id)

        self.clients.resource_registry.delete(transform_id)
        return True
Example #56
0
    def _create_subsequent_deployment(self, prior_dep_info):
        platform_device_obj = IonObject(RT.PlatformDevice,
                                        name='PlatformDevice2',
                                        description='test platform device')
        platform_device_id = self.imsclient.create_platform_device(
            platform_device_obj)

        instrument_device_obj = IonObject(RT.InstrumentDevice,
                                          name='InstrumentDevice2',
                                          description='test instrument device')
        instrument_device_id = self.imsclient.create_instrument_device(
            instrument_device_obj)
        self.rrclient.create_association(platform_device_id, PRED.hasDevice,
                                         instrument_device_id)

        self.imsclient.assign_platform_model_to_platform_device(
            prior_dep_info.platform_model_id, platform_device_id)
        self.imsclient.assign_instrument_model_to_instrument_device(
            prior_dep_info.instrument_model_id, instrument_device_id)

        start = str(int(time.mktime(datetime.datetime(2013, 6,
                                                      1).timetuple())))
        end = str(int(time.mktime(datetime.datetime(2020, 6, 1).timetuple())))
        temporal_bounds = IonObject(OT.TemporalBounds,
                                    name='planned',
                                    start_datetime=start,
                                    end_datetime=end)
        deployment_obj = IonObject(RT.Deployment,
                                   name='TestDeployment2',
                                   description='some new deployment',
                                   context=IonObject(
                                       OT.CabledNodeDeploymentContext),
                                   constraint_list=[temporal_bounds])
        deployment_id = self.omsclient.create_deployment(deployment_obj)

        self.omsclient.assign_site_to_deployment(
            prior_dep_info.platform_site_id, deployment_id)
        self.omsclient.assign_device_to_deployment(
            prior_dep_info.platform_device_id, deployment_id)

        log.debug("test_create_deployment: created deployment id: %s ",
                  str(deployment_id))

        ret = DotDict(instrument_device_id=instrument_device_id,
                      platform_device_id=platform_device_id,
                      deployment_id=deployment_id)

        return ret
    def create_couch_instance(self,
                              name='',
                              description='',
                              host='',
                              port=5984,
                              username='',
                              password='',
                              file_system_datastore_id='',
                              persistence_system_id='',
                              config=None):
        """Create an Persistence Instance resource describing a couch instance.

        @param name    str
        @param description    str
        @param host    str
        @param port    int
        @param username    str
        @param password    str
        @param file_system_datastore_id    str
        @param persistence_system_id    str
        @param config    IngestionConfiguration
        @retval persistence_instance_id    str
        """
        persistence_instance = PersistenceInstance(
            name=name,
            description=description,
            type=PersistenceType.COUCHDB,
            host=host,
            port=port,
            username=username,
            password=password)
        if not config is None:
            persistence_instance.config.update(config)
        persistence_instance_id, rev = self.clients.resource_registry.create(
            persistence_instance)
        log.debug(persistence_system_id)

        if file_system_datastore_id != '':
            self.clients.resource_registry.create_association(
                persistence_instance_id, PRED.hasDatastore,
                file_system_datastore_id)

        if persistence_system_id != '':
            self.clients.resource_registry.create_association(
                persistence_system_id, PRED.hasPersistenceInstance,
                persistence_instance_id)

        return persistence_instance_id
Example #58
0
    def _start_subscriber_device_status_event(self, origin):
        """
        @param origin    the resource_id associated with child
        """
        event_type = "DeviceStatusEvent"
        sub = self._agent._create_event_subscriber(
            event_type=event_type,
            origin=origin,
            callback=self._got_device_status_event)

        with self._lock:
            self._set_event_subscriber(origin, event_type, sub)

        log.debug(
            "%r: registered event subscriber for event_type=%r"
            " coming from origin=%r", self._platform_id, event_type, origin)
Example #59
0
    def run(self):
        if self._listener == self._dummy_listener:
            log.warn("No listener provided. Using a dummy one")

        log.debug("_Receiver running")

        self._running = True
        while self._running:
            # some timeout to regularly check for end() call
            rlist, wlist, elist = select.select([self._sock], [], [], 0.5)
            if rlist:
                recv_message = self._sock.recv(1024)
                self._parse_message_and_notify_listener(recv_message)

        self._sock.close()
        log.debug("_Receiver.run done.")
    def on_system_restart(self):
        '''
        On system restart, get timer data from Resource Registry and restore the Scheduler state
        '''
        # Remove all active timers
        # When this method is called, there should not be any active timers but if it is called from test, this helps
        # to remove current active timer and restore them from Resource Regstiry
        self._stop_pending_timers()

        # Restore the timer from Resource Registry
        scheduler_entries, _ = self.clients.resource_registry.find_resources(
            RT.SchedulerEntry, id_only=False)
        for scheduler_entry in scheduler_entries:
            self.__schedule(scheduler_entry, scheduler_entry._id)
            log.debug("SchedulerService:on_system_restart: timer restored: " +
                      scheduler_entry._id)