示例#1
0
 def stop_es(origin, es):
     try:
         self._agent._destroy_event_subscriber(es)
     except Exception as ex:
         log.warn(
             "%r: error destroying event subscriber: origin=%r: %s",
             self._platform_id, origin, ex)
示例#2
0
    def _get_data(cls, config):
        parser = get_safe(config, 'parser', None)
        ext_dset_res = get_safe(config, 'external_dataset_res', None)
        if ext_dset_res and parser:
            #CBM: Not in use yet...
            #            t_vname = ext_dset_res.dataset_description.parameters['temporal_dimension']
            #            x_vname = ext_dset_res.dataset_description.parameters['zonal_dimension']
            #            y_vname = ext_dset_res.dataset_description.parameters['meridional_dimension']
            #            z_vname = ext_dset_res.dataset_description.parameters['vertical_dimension']
            #            var_lst = ext_dset_res.dataset_description.parameters['variables']

            max_rec = get_safe(config, 'max_records', 1)
            dprod_id = get_safe(config, 'data_producer_id',
                                'unknown data producer')
            tx_yml = get_safe(config, 'taxonomy')
            ttool = TaxyTool.load(
                tx_yml
            )  #CBM: Assertion inside RDT.__setitem__ requires same instance of TaxyTool

            cnt = cls._calc_iter_cnt(len(parser.sensor_map), max_rec)
            for x in xrange(cnt):
                rdt = RecordDictionaryTool(taxonomy=ttool)

                for name in parser.sensor_map:
                    d = parser.data_map[name][x * max_rec:(x + 1) * max_rec]
                    rdt[name] = d

                g = build_granule(data_producer_id=dprod_id,
                                  taxonomy=ttool,
                                  record_dictionary=rdt)
                yield g
        else:
            log.warn('No parser object found in config')
示例#3
0
    def _apply_policy(self):

        self.core.apply_policy()

        try:
            new_service_state = _core_hastate_to_service_state(
                self.core.status())
            new_policy = self._policy_dict
            service = self.container.resource_registry.read(self.service_id)

            update_service = False
            if service.state != new_service_state:
                service.state = new_service_state
                update_service = True

            if service.policy != new_policy:
                service.policy = new_policy
                update_service = True

            if update_service is True:
                self.container.resource_registry.update(service)
        except Exception:
            log.warn("%sProblem when updating Service state",
                     self.logprefix,
                     exc_info=True)
示例#4
0
 def list_persistence(self):
     # Show ingestion streams, workers (active or not) etc
     ingconf_objs, _ = self.rr.find_resources(RT.IngestionConfiguration,
                                              id_only=False)
     if not ingconf_objs:
         log.warn("Could not find system IngestionConfiguration")
     ingconf_obj = ingconf_objs[0]
     scheme = [
         dict(name="Subscriptions",
              query=["find_objects", PRED.hasSubscription],
              recurse=[
                  dict(query=[
                      "find_subjects", PRED.hasSubscription, RT.ExchangeName
                  ],
                       recurse=[
                           dict(query=[
                               "find_objects", PRED.hasIngestionWorker
                           ],
                                recurse=[
                                    dict(query=[
                                        "find_objects",
                                        PRED.hasProcessDefinition
                                    ]),
                                ]),
                       ]),
                  dict(query=["find_objects", PRED.hasStream],
                       recurse=[
                           dict(query=[
                               "find_subjects", PRED.hasStream, RT.Dataset
                           ]),
                       ]),
              ]),
     ]
     self._print_scheme(ingconf_obj, scheme)
示例#5
0
    def _process_state(self, process):
        # handle incoming process state updates from the real PD service.
        # some states map to ION events while others are ignored.

        process_id = None
        state = None
        if process:
            process_id = process.get('upid')
            state = process.get('state')

        if not (process and process_id and state):
            log.warn("Invalid process state from CEI process dispatcher: %s",
                     process)
            return

        ion_process_state = _PD_PROCESS_STATE_MAP.get(state)
        if not ion_process_state:
            log.debug(
                "Received unknown process state from Process Dispatcher." +
                " process=%s state=%s", process_id, state)
            return

        self.event_pub.publish_event(event_type="ProcessLifecycleEvent",
                                     origin=process_id,
                                     origin_type="DispatchedProcess",
                                     state=ion_process_state)
示例#6
0
    def handle_attribute_value_event(self, driver_event):

        assert isinstance(driver_event, AttributeValueDriverEvent)

        if log.isEnabledFor(logging.TRACE):  # pragma: no cover
            # show driver_event as retrieved (driver_event.vals_dict might be large)
            log.trace("%r: driver_event = %s", self._platform_id, driver_event)
            log.trace("%r: vals_dict:\n%s", self._platform_id,
                      self._pp.pformat(driver_event.vals_dict))

        elif log.isEnabledFor(logging.DEBUG):  # pragma: no cover
            log.debug("%r: driver_event = %s", self._platform_id,
                      driver_event.brief())

        stream_name = driver_event.stream_name

        publisher = self._data_publishers.get(stream_name, None)
        if not publisher:
            log.warn(
                '%r: no publisher configured for stream_name=%r. '
                'Configured streams are: %s', self._platform_id, stream_name,
                self._data_publishers.keys())
            return

        param_dict = self._param_dicts[stream_name]
        stream_def = self._stream_defs[stream_name]

        if isinstance(stream_def, str):
            rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
                                       stream_definition_id=stream_def)
        else:
            rdt = RecordDictionaryTool(stream_definition=stream_def)

        self._publish_granule_with_multiple_params(publisher, driver_event,
                                                   param_dict, rdt)
示例#7
0
        def set_ports(pnode):
            platform_id = pnode.platform_id
            port_infos = rsn_oms.get_platform_ports(platform_id)
            if not isinstance(port_infos, dict):
                log.warn("%r: get_platform_ports returned: %s", platform_id,
                         port_infos)
                return

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: port_infos: %s", platform_id, port_infos)

            assert platform_id in port_infos
            ports = port_infos[platform_id]
            for port_id, dic in ports.iteritems():
                port = PortNode(port_id, dic['network'])
                pnode.add_port(port)

                # add connected instruments:
                instrs_res = rsn_oms.get_connected_instruments(
                    platform_id, port_id)
                if not isinstance(instrs_res, dict):
                    log.warn("%r: port_id=%r: get_connected_instruments "
                             "returned: %s" %
                             (platform_id, port_id, instrs_res))
                    continue

                if log.isEnabledFor(logging.TRACE):
                    log.trace("%r: port_id=%r: get_connected_instruments "
                              "returned: %s" %
                              (platform_id, port_id, instrs_res))
                assert platform_id in instrs_res
                assert port_id in instrs_res[platform_id]
                instr = instrs_res[platform_id][port_id]
                for instrument_id, attrs in instr.iteritems():
                    port.add_instrument(InstrumentNode(instrument_id, attrs))
    def _get_data_product_metadata(self, data_product_id=""):

        dp_meta_data = {}
        if not data_product_id:
            raise BadRequest("The data_product_id parameter is missing")

        # get the dataset_id and dataset associated with the data_product. Need it to do the data retrieval
        ds_ids,_ = self.clients.resource_registry.find_objects(data_product_id, PRED.hasDataset, RT.Dataset, True)

        if not ds_ids:
            log.warn("Specified data_product does not have an associated dataset")
            return None

        # Start collecting the data to populate the output dictionary
        #time_bounds = self.clients.dataset_management.dataset_bounds(ds_ids[0])['time']
        #dp_meta_data['time_bounds'] = [float(ntplib.ntp_to_system_time(i)) for i in time_bounds]
        time_bounds = self.clients.dataset_management.dataset_temporal_bounds(ds_ids[0])
        dp_meta_data['time_bounds'] = time_bounds
        dp_meta_data['time_steps'] = self.clients.dataset_management.dataset_extents(ds_ids[0])['time'][0]

        # use the associations to get to the parameter dict
        parameter_display_names = {}
        for stream_def in self.clients.resource_registry.find_objects(data_product_id, PRED.hasStreamDefinition, id_only=True)[0]:
            for pdict in self.clients.resource_registry.find_objects(stream_def, PRED.hasParameterDictionary, id_only=True)[0]:
                for context in self.clients.resource_registry.find_objects(pdict, PRED.hasParameterContext, id_only=False)[0]:
                    #display_name = context.display_name
                    parameter_display_names[context.name] = context.display_name

        dp_meta_data['parameter_display_names'] = parameter_display_names

        return simplejson.dumps(dp_meta_data)
    def start_resource_monitoring(self):
        """
        Starts greenlets to periodically retrieve values of the attributes
        associated with my platform, and do corresponding event notifications.
        """
        log.debug("%r: start_resource_monitoring", self._platform_id)

        attr_info = self._get_platform_attributes()

        if not attr_info:
            # no attributes to monitor.
            log.debug("%r: NOT starting resource monitoring", self._platform_id)
            return

        log.debug("%r: starting resource monitoring: attr_info=%s",
                  self._platform_id, str(attr_info))

        #
        # TODO attribute grouping so one single greenlet is launched for a
        # group of attributes having same or similar monitoring rate. For
        # simplicity at the moment, start a greenlet per attribute.
        #

        for attr_id, attr_defn in attr_info.iteritems():
            log.debug("%r: dispatching resource monitoring for attr_id=%r attr_defn=%s",
                      self._platform_id, attr_id, attr_defn)
            if 'monitorCycleSeconds' in attr_defn:
                self._start_monitor_greenlet(attr_id, attr_defn)
            else:
                log.warn(
                    "%r: unexpected: attribute info does not contain %r "
                    "for attribute %r. attr_defn = %s",
                        self._platform_id,
                        'monitorCycleSeconds', attr_id, str(attr_defn))
    def _register_service(self):

        definition = self.process_definition
        existing_services, _ = self.container.resource_registry.find_resources(
            restype="Service", name=definition.name)

        if len(existing_services) > 0:
            if len(existing_services) > 1:
                log.warning(
                    "There is more than one service object for %s. Using the first one"
                    % definition.name)
            service_id = existing_services[0]._id
        else:
            svc_obj = Service(name=definition.name,
                              exchange_name=definition.name)
            service_id, _ = self.container.resource_registry.create(svc_obj)

        svcdefs, _ = self.container.resource_registry.find_resources(
            restype="ServiceDefinition", name=definition.name)

        if svcdefs:
            try:
                self.container.resource_registry.create_association(
                    service_id, "hasServiceDefinition", svcdefs[0]._id)
            except BadRequest:
                log.warn(
                    "Failed to associate %s Service and ServiceDefinition. It probably exists.",
                    definition.name)
        else:
            log.error("Cannot find ServiceDefinition resource for %s",
                      definition.name)

        return service_id, definition.name
示例#11
0
    def _device_removed_event(self, evt):
        """
        Handles the device_removed event to remove associated information and
        status updates, which mauy result in events being published.
        """

        # the actual child removed is in the values component of the event:
        if isinstance(evt.values, (list, tuple)):
            # normally it will be just one element but handle as array:
            for sub_resource_id in evt.values:
                self._remove_child(sub_resource_id)
        else:
            log.warn(
                "%r: Got device_removed event with invalid values member: %r",
                self._platform_id, evt)
            return

        # finally forward event so ancestors also get notified:
        # only adjustment is that now my platform's resource_id is the origin:
        evt = dict(event_type=evt.type_,
                   sub_type=evt.sub_type,
                   origin_type=evt.origin_type,
                   origin=self.resource_id,
                   description=evt.description,
                   values=evt.values)
        try:
            log.debug('%r: _device_removed_event: forwarding to ancestors: %s',
                      self._platform_id, evt)

            self._event_publisher.publish_event(**evt)

        except Exception:
            log.exception('%r: platform agent could not publish event: %s',
                          self._platform_id, evt)
示例#12
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace(
                    "%r: ignoring event type %r. Only handle %r directly",
                    self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info(
                "%r/%s: (%s) status report triggered by diagnostic event:\n"
                "%s\n"
                "%40s : %s\n", self._platform_id, state, self.resource_id,
                statuses, "invalidated_children", invalidated_children)
示例#13
0
    def _dump_datastore(self, outpath_base, ds_name, clear_dir=True):
        ds = DatastoreFactory.get_datastore(datastore_name=ds_name,
                                            config=self.config,
                                            scope=self.sysname)
        try:
            if not ds.datastore_exists(ds_name):
                log.warn("Datastore does not exist: %s" % ds_name)
                return

            if not os.path.exists(outpath_base):
                os.makedirs(outpath_base)

            outpath = "%s/%s" % (outpath_base, ds_name)
            if not os.path.exists(outpath):
                os.makedirs(outpath)
            if clear_dir:
                [
                    os.remove(os.path.join(outpath, f))
                    for f in os.listdir(outpath)
                ]

            objs = ds.find_docs_by_view("_all_docs", None, id_only=False)
            compact_obj = [obj for obj_id, obj_key, obj in objs]
            compact_obj = ["COMPACTDUMP", compact_obj]
            with open("%s/%s_compact.json" % (outpath, ds_name), 'w') as f:
                json.dump(compact_obj, f)
            numwrites = len(objs)

            log.info("Wrote %s files to %s" % (numwrites, outpath))
        finally:
            ds.close()
示例#14
0
    def load_datastore(self, path=None, ds_name=None, ignore_errors=True):
        """
        Loads data from files into a datastore
        """
        path = path or "res/preload/default"
        if not os.path.exists(path):
            log.warn("Load path not found: %s" % path)
            return
        if not os.path.isdir(path):
            log.error("Path is not a directory: %s" % path)

        if ds_name:
            # Here we expect path to contain YML files for given datastore
            log.info("DatastoreLoader: LOAD datastore=%s" % ds_name)
            self._load_datastore(path, ds_name, ignore_errors)
        else:
            # Here we expect path to have subdirs that are named according to logical
            # datastores, e.g. "resources"
            log.info("DatastoreLoader: LOAD ALL DATASTORES")
            for fn in os.listdir(path):
                fp = os.path.join(path, fn)
                if not os.path.exists(path):
                    log.warn("Item %s is not a directory" % fp)
                    continue
                self._load_datastore(fp, fn, ignore_errors)
示例#15
0
 def clear_datastore(self, ds_name=None, prefix=None):
     """
     Clears a datastore or a set of datastores of common prefix
     """
     ds = DatastoreFactory.get_datastore(config=self.config,
                                         scope=self.sysname)
     try:
         if ds_name:
             try:
                 ds.delete_datastore(ds_name)
             except NotFound:
                 try:
                     # Try the unscoped version
                     ds1 = DatastoreFactory.get_datastore(
                         config=self.config)
                     ds1.delete_datastore(ds_name)
                 except NotFound:
                     pass
         elif prefix:
             prefix = prefix.lower()
             ds_noscope = DatastoreFactory.get_datastore(config=self.config)
             for dsn in ds_noscope.list_datastores():
                 if dsn.lower().startswith(prefix):
                     ds_noscope.delete_datastore(dsn)
         else:
             log.warn(
                 "Cannot clear datastore without prefix or datastore name")
     finally:
         ds.close()
示例#16
0
 def dump_datastore(self, path=None, ds_name=None, clear_dir=True):
     """
     Dumps CouchDB datastores into a directory as YML files.
     @param ds_name Logical name (such as "resources") of an ION datastore
     @param path Directory to put dumped datastores into (defaults to
                 "res/preload/local/dump_[timestamp]")
     @param clear_dir if True, delete contents of datastore dump dirs
     @param compact if True, saves all objects in one big YML file
     """
     if not path:
         dtstr = datetime.datetime.today().strftime('%Y%m%d_%H%M%S')
         path = "res/preload/local/dump_%s" % dtstr
     if ds_name:
         ds = DatastoreFactory.get_datastore(datastore_name=ds_name,
                                             config=self.config,
                                             scope=self.sysname)
         if ds.datastore_exists(ds_name):
             self._dump_datastore(path, ds_name, clear_dir)
         else:
             log.warn("Datastore does not exist")
         ds.close()
     else:
         ds_list = ['resources', 'objects', 'state', 'events']
         for dsn in ds_list:
             self._dump_datastore(path, dsn, clear_dir)
示例#17
0
    def execute_resource(self, resource_id='', command=None):
        """Execute command on the resource represented by agent.
        @param resource_id The id of the resource agennt.
        @param command An AgentCommand containing the command.
        @retval result An AgentCommandResult containing the result.
        @throws BadRequest if the command was malformed.
        @throws NotFound if the command is not available in current state.
        @throws ResourceError if the resource produced an error during execution.

        @param resource_id    str
        @param command    AgentCommand
        @retval result    AgentCommandResult
        @throws BadRequest    if the command was malformed.
        @throws NotFound    if the command is not implemented in the agent.
        @throws ResourceError    if the resource produced an error.
        """
        res_type = self._get_resource_type(resource_id)
        if self._has_agent(res_type):
            rac = ResourceAgentClient(resource_id=resource_id)
            return rac.execute_resource(resource_id=resource_id, command=command)

        cmd_res = None
        res_interface = self._get_type_interface(res_type)

        target = get_safe(res_interface, "commands.%s.execute" % command.command, None)
        if target:
            res = self._call_execute(target, resource_id, res_type, command.args, command.kwargs)
            cmd_res = AgentCommandResult(command_id=command.command_id,
                command=command.command,
                ts_execute=get_ion_ts(),
                status=0)
        else:
            log.warn("execute_resource(): command %s not defined", command.command)

        return cmd_res
示例#18
0
    def set_resource(self, resource_id='', params=None):
        """Set the value of the given resource parameters.
        @param resource_id The id of the resource agent.
        @param params A dict of resource parameter name-value pairs.
        @throws BadRequest if the command was malformed.
        @throws NotFound if a parameter is not supported by the resource.
        @throws ResourceError if the resource encountered an error while setting
        the parameters.

        @param resource_id    str
        @param params    dict
        @throws BadRequest    if the command was malformed.
        @throws NotFound    if the parameter does not exist.
        @throws ResourceError    if the resource failed while trying to set the parameter.
        """
        res_type = self._get_resource_type(resource_id)
        if self._has_agent(res_type):
            rac = ResourceAgentClient(resource_id=resource_id)
            return rac.set_resource(resource_id=resource_id, params=params)

        res_interface = self._get_type_interface(res_type)

        for param in params:
            setter = get_safe(res_interface, "params.%s.set" % param, None)
            if setter:
                self._call_setter(setter, params[param], resource_id, res_type)
            else:
                log.warn("set_resource(): param %s not defined", param)
示例#19
0
def list_file_info(base, pattern, name_index=0, type=None):
    """
    Constructs a list of tuples containing information about the files as indicated by the pattern.
    The name_index should correspond to the index in the resulting tuple that contains the name of the file, default is 0
    @param base path for files
    @param pattern regular expression describing file names
    @param name_index
    @param type the type of server (http, ftp, or local file system)
    """

    # If type isn't specified, attempt to determine based on base
    type = type or _get_type(base)

    # Switch on type
    if type is 'http':
        lst = list_file_info_http(base, pattern, name_index)
    elif type is 'ftp':
        lst = list_file_info_ftp(base, pattern)
    elif type is 'fs':
        lst = list_file_info_fs(base, pattern)
    else:
        log.warn('Unknown type specified: {0}'.format(type))
        lst = []

    return lst
示例#20
0
    def acquire_samples(self, max_samples=0):
        log.debug('Orb_DataAgentPlugin.acquire_samples')
        if os.path.exists(self.data_dir):
            files = os.listdir(self.data_dir)
            cols = []
            rows = []
            for f in files:
                fpath = self.data_dir + f
                with open(fpath) as fh:
                    try:
                        pkt = json.load(fh)
                        if not cols:
                            cols = [str(c['chan']) for c in pkt['channels']]
                        row = self._extract_row(pkt, cols)
                        dims = [len(c) for c in row[:3]]
                        if all(d == 400 for d in dims):
                            rows.append(row)
                        else:
                            log.warning('Inconsistent dimensions %s, %s' %
                                        (str(dims), fpath))
                        fh.close()
                        os.remove(fpath)
                        log.info('sample: ' + fpath)
                    except Exception as ex:
                        log.warn(ex)
                        log.warn('Incomplete packet %s' % fpath)

            if cols and rows:
                coltypes = {}
                for c in cols:
                    coltypes[c] = '400i4'
                cols.append('time')
                samples = dict(cols=cols, data=rows, coltypes=coltypes)
                return samples
示例#21
0
    def __init__(self, platform_id, notify_driver_event):
        """
        Creates a listener.

        @param notify_driver_event callback to notify event events. Must be
                                    provided.
        """

        self._platform_id = platform_id
        self._notify_driver_event = notify_driver_event

        self._http_server = None
        self._url = None

        # _notifications: if not None, [event_instance, ...]
        self._notifications = None

        # _no_notifications: flag only intended for developing purposes
        # see ion.agents.platform.rsn.simulator.oms_events
        self._no_notifications = os.getenv("NO_OMS_NOTIFICATIONS") is not None
        if self._no_notifications:  # pragma: no cover
            log.warn(
                "%r: NO_OMS_NOTIFICATIONS env variable defined: "
                "no notifications will be done", self._platform_id)
            self._url = "http://NO_OMS_NOTIFICATIONS"
    def _init_buffers(self):
        """
        Initializes self._buffers (empty arrays for each attribute)
        """
        self._buffers = {}
        for attr_key, attr_defn in self._attr_info.iteritems():
            if 'monitor_cycle_seconds' not in attr_defn:
                log.warn("%r: unexpected: attribute info does not contain %r. "
                         "attr_defn = %s",
                         self._platform_id,
                         'monitor_cycle_seconds', attr_defn)
                continue
            if 'ion_parameter_name' not in attr_defn:
                log.warn("%r: unexpected: attribute info does not contain %r. "
                         "attr_defn = %s",
                         self._platform_id,
                         'ion_parameter_name', attr_defn)
                continue

          

            self._buffers[attr_defn['ion_parameter_name']] = []
            
            log.debug('*********%r: Created Buffer for =%s',
                      self._platform_id, attr_defn['ion_parameter_name'])
    def stop_launched_simulator(cls):
        """
        Utility to stop the process launched with launch_simulator.
        The stop is attempted a couple of times in case of errors (with a few
        seconds of sleep in between).

        @return None if process seems to have been stopped properly.
                Otherwise the exception of the last attempt to stop it.
        """
        if cls._sim_process:
            sim_proc, cls._sim_process = cls._sim_process, None
            attempts = 3
            attempt = 0
            while attempt <= attempts:
                attempt += 1
                log.debug(
                    "[OMSim] stopping launched simulator (attempt=%d) ...",
                    attempt)
                try:
                    sim_proc.stop()
                    log.debug(
                        "[OMSim] simulator process seems to have stopped properly"
                    )
                    return None

                except Exception as ex:
                    if attempt < attempts:
                        sleep(10)
                    else:
                        log.warn(
                            "[OMSim] error while stopping simulator process: %s",
                            ex)
                        return ex
    def _policy_thread_loop(self):
        """Single thread runs policy loops, to prevent races
        """
        while not self._policy_loop_event.wait(timeout=0.1):
            # wait until our event is set, up to policy_interval seconds
            self.policy_event.wait(self.policy_interval)
            if self.policy_event.is_set():
                self.policy_event.clear()
                log.debug("%sapplying policy due to event", self.logprefix)
            else:

                # on a regular basis, we check for the current state of each process.
                # this is essentially a hedge against bugs in the HAAgent, or in the
                # ION events system that could prevent us from seeing state changes
                # of processes.
                log.debug(
                    "%sapplying policy due to timer. Reloading process cache first.",
                    self.logprefix)
                try:
                    self.control.reload_processes()
                except (Exception, gevent.Timeout):
                    log.warn(
                        "%sFailed to reload processes from PD. Will retry later.",
                        self.logprefix,
                        exc_info=True)

            try:
                self._apply_policy()
            except (Exception, gevent.Timeout):
                log.warn("%sFailed to apply policy. Will retry later.",
                         self.logprefix,
                         exc_info=True)
    def execute_resource(self, resource_id='', command=None):
        """Execute command on the resource represented by agent.
        @param resource_id The id of the resource agennt.
        @param command An AgentCommand containing the command.
        @retval result An AgentCommandResult containing the result.
        @throws BadRequest if the command was malformed.
        @throws NotFound if the command is not available in current state.
        @throws ResourceError if the resource produced an error during execution.

        @param resource_id    str
        @param command    AgentCommand
        @retval result    AgentCommandResult
        @throws BadRequest    if the command was malformed.
        @throws NotFound    if the command is not implemented in the agent.
        @throws ResourceError    if the resource produced an error.
        """
        res_type = self._get_resource_type(resource_id)
        if self._has_agent(res_type):
            rac = ResourceAgentClient(resource_id=resource_id)
            return rac.execute_resource(resource_id=resource_id, command=command)

        cmd_res = None
        res_interface = self._get_type_interface(res_type)

        target = get_safe(res_interface, "commands.%s.execute" % command.command, None)
        if target:
            res = self._call_execute(target, resource_id, res_type, command.args, command.kwargs)
            cmd_res = AgentCommandResult(command_id=command.command_id,
                command=command.command,
                ts_execute=get_ion_ts(),
                status=0)
        else:
            log.warn("execute_resource(): command %s not defined", command.command)

        return cmd_res
示例#26
0
    def _load_datastore(self, path=None, ds_name=None, ignore_errors=True):
        ds = CouchDataStore(ds_name, config=self.config, scope=self.sysname)
        try:
            objects = []
            for fn in os.listdir(path):
                fp = os.path.join(path, fn)
                try:
                    with open(fp, 'r') as f:
                        yaml_text = f.read()
                    obj = yaml.load(yaml_text)
                    if obj and type(obj) is list and obj[0] == "COMPACTDUMP":
                        objects.extend(obj[1:])
                    else:
                        objects.append(obj)
                except Exception as ex:
                    if ignore_errors:
                        log.warn("load error id=%s err=%s" % (fn, str(ex)))
                    else:
                        raise ex

            if objects:
                for obj in objects:
                    if "_rev" in obj:
                        del obj["_rev"]
                try:
                    res = ds.create_doc_mult(objects)
                    log.info("DatastoreLoader: Loaded %s objects into %s" %
                             (len(res), ds_name))
                except Exception as ex:
                    if ignore_errors:
                        log.warn("load error id=%s err=%s" % (fn, str(ex)))
                    else:
                        raise ex
        finally:
            ds.close()
    def handle_attribute_value_event(self, driver_event):
        if log.isEnabledFor(logging.TRACE):  # pragma: no cover
            # show driver_event as retrieved (driver_event.vals_dict might be large)
            log.trace("%r: driver_event = %s", self._platform_id, driver_event)
            log.trace("%r: vals_dict:\n%s",
                      self._platform_id, self._pp.pformat(driver_event.vals_dict))

        elif log.isEnabledFor(logging.DEBUG):  # pragma: no cover
            log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())

        stream_name = driver_event.stream_name

        publisher = self._data_publishers.get(stream_name, None)
        if not publisher:
            log.warn('%r: no publisher configured for stream_name=%r. '
                     'Configured streams are: %s',
                     self._platform_id, stream_name, self._data_publishers.keys())
            return

        param_dict = self._param_dicts[stream_name]
        stream_def = self._stream_defs[stream_name]

        if isinstance(stream_def, str):
            rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
                                       stream_definition_id=stream_def)
        else:
            rdt = RecordDictionaryTool(stream_definition=stream_def)

        self._publish_granule_with_multiple_params(publisher, driver_event,
                                                   param_dict, rdt)
示例#28
0
    def _trigger_func(self, stream_id):

        while True:

            length = random.randint(1,20)

            c = [random.uniform(0.0,75.0)  for i in xrange(length)]

            t = [random.uniform(-1.7, 21.0) for i in xrange(length)]

            p = [random.lognormvariate(1,2) for i in xrange(length)]

            lat = [random.uniform(-90.0, 90.0) for i in xrange(length)]

            lon = [random.uniform(0.0, 360.0) for i in xrange(length)]

            tvar = [self.last_time + i for i in xrange(1,length+1)]

            self.last_time = max(tvar)

            ctd_packet = ctd_stream_packet(stream_id=stream_id,
                c=c, t=t, p=p, lat=lat, lon=lon, time=tvar)

            log.warn('SimpleCtdPublisher sending %d values!' % length)
            self.publisher.publish(ctd_packet)

            time.sleep(2.0)
示例#29
0
    def on_init(self):
        # Time in between event persists
        self.persist_interval = float(self.CFG.get_safe("process.event_persister.persist_interval", 1.0))

        self.persist_blacklist = self.CFG.get_safe("process.event_persister.persist_blacklist", 1.0)

        self._event_type_blacklist = [entry['event_type'] for entry in self.persist_blacklist if entry.get('event_type', None) and len(entry) == 1]
        self._complex_blacklist = [entry for entry in self.persist_blacklist if not (entry.get('event_type', None) and len(entry) == 1)]
        if self._complex_blacklist:
            log.warn("EventPersister does not yet support complex blacklist expressions: %s", self._complex_blacklist)

        # Holds received events FIFO in syncronized queue
        self.event_queue = Queue()

        # Temporarily holds list of events to persist while datastore operation are not yet completed
        # This is where events to persist will remain if datastore operation fails occasionally.
        self.events_to_persist = None

        # Number of unsuccessful attempts to persist in a row
        self.failure_count = 0

        # bookkeeping for timeout greenlet
        self._persist_greenlet = None
        self._terminate_persist = Event() # when set, exits the timeout greenlet

        # The event subscriber
        self.event_sub = None
    def _get_new_ctd_packet(self, stream_id, length):

        rdt = RecordDictionaryTool(taxonomy=tx)

        #Explicitly make these numpy arrays...
        c = numpy.array([random.uniform(0.0,75.0)  for i in xrange(length)]) 
        t = numpy.array([random.uniform(-1.7, 21.0) for i in xrange(length)]) 
        p = numpy.array([random.lognormvariate(1,2) for i in xrange(length)]) 
        lat = numpy.array([random.uniform(-90.0, 90.0) for i in xrange(length)]) 
        lon = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)]) 
        h = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)]) 
        tvar = numpy.array([self.last_time + i for i in xrange(1,length+1)]) 
        self.last_time = max(tvar)

        log.warn('Got time: %s' % str(tvar))
        log.warn('Got t: %s' % str(t))

        rdt['time'] = tvar
        rdt['lat'] = lat
        rdt['lon'] = lon
        rdt['height'] = h
        rdt['temp'] = t
        rdt['cond'] = c
        rdt['pres'] = p

#        rdt['coordinates'] = rdt0
#        rdt['data'] = rdt1

        g = build_granule(data_producer_id=stream_id, taxonomy=tx, record_dictionary=rdt)

        return g
示例#31
0
    def _trigger_func(self, stream_id):

        point_def = ctd_stream_definition(stream_id=stream_id)
        point_constructor = PointSupplementConstructor(point_definition=point_def)

        while True:

            length = 1

            c = [random.uniform(0.0,75.0)  for i in xrange(length)]

            t = [random.uniform(-1.7, 21.0) for i in xrange(length)]

            p = [random.lognormvariate(1,2) for i in xrange(length)]

            lat = [random.uniform(-90.0, 90.0) for i in xrange(length)]

            lon = [random.uniform(0.0, 360.0) for i in xrange(length)]

            tvar = [self.last_time + i for i in xrange(1,length+1)]

            self.last_time = max(tvar)

            point_id = point_constructor.add_point(time=tvar,location=(lon[0],lat[0]))
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='temperature', values=t)
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='pressure', values=p)
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='conductivity', values=c)

            ctd_packet = point_constructor.get_stream_granule()

            log.warn('SimpleCtdPublisher sending %d values!' % length)
            self.publisher.publish(ctd_packet)

            time.sleep(2.0)
    def set_resource(self, resource_id='', params=None):
        """Set the value of the given resource parameters.
        @param resource_id The id of the resource agent.
        @param params A dict of resource parameter name-value pairs.
        @throws BadRequest if the command was malformed.
        @throws NotFound if a parameter is not supported by the resource.
        @throws ResourceError if the resource encountered an error while setting
        the parameters.

        @param resource_id    str
        @param params    dict
        @throws BadRequest    if the command was malformed.
        @throws NotFound    if the parameter does not exist.
        @throws ResourceError    if the resource failed while trying to set the parameter.
        """
        res_type = self._get_resource_type(resource_id)
        if self._has_agent(res_type):
            rac = ResourceAgentClient(resource_id=resource_id)
            return rac.set_resource(resource_id=resource_id, params=params)

        res_interface = self._get_type_interface(res_type)

        for param in params:
            setter = get_safe(res_interface, "params.%s.set" % param, None)
            if setter:
                self._call_setter(setter, params[param], resource_id, res_type)
            else:
                log.warn("set_resource(): param %s not defined", param)
    def _start_port_agent(self, instrument_agent_instance_obj=None):
        """
        Construct and start the port agent, ONLY NEEDED FOR INSTRUMENT AGENTS.
        """

        _port_agent_config = instrument_agent_instance_obj.port_agent_config

        # It blocks until the port agent starts up or a timeout
        _pagent = PortAgentProcess.launch_process(_port_agent_config,  test_mode = True)
        pid = _pagent.get_pid()
        port = _pagent.get_data_port()
        cmd_port = _pagent.get_command_port()
        log.info("IMS:_start_pagent returned from PortAgentProcess.launch_process pid: %s ", pid)

        # Hack to get ready for DEMO.  Further though needs to be put int
        # how we pass this config info around.
        host = 'localhost'

        driver_config = instrument_agent_instance_obj.driver_config
        comms_config = driver_config.get('comms_config')
        if comms_config:
            host = comms_config.get('addr')
        else:
            log.warn("No comms_config specified, using '%s'" % host)

        # Configure driver to use port agent port number.
        instrument_agent_instance_obj.driver_config['comms_config'] = {
            'addr' : host,
            'cmd_port' : cmd_port,
            'port' : port
        }
        instrument_agent_instance_obj.driver_config['pagent_pid'] = pid
        self.imsclient.update_instrument_agent_instance(instrument_agent_instance_obj)
        return self.imsclient.read_instrument_agent_instance(instrument_agent_instance_obj._id)
    def _set_attributes_and_ports_from_agent_device_map(self, nnode):
        """
        Sets the attributes and ports for the given NNode from
        self._agent_device_map.
        """
        platform_id = nnode.platform_id
        if platform_id not in self._agent_device_map:
            log.warn("%r: no entry in agent_device_map for platform_id",
                     self._platform_id, platform_id)
            return

        device_obj = self._agent_device_map[platform_id]
        log.info("%r: for platform_id=%r device_obj=%s",
                    self._platform_id, platform_id, device_obj)

        attrs = device_obj.platform_monitor_attributes
        ports = device_obj.ports

        for attr_obj in attrs:
            attr = Attr(attr_obj.id, {
                'name': attr_obj.name,
                'monitorCycleSeconds': attr_obj.monitor_rate,
                'units': attr_obj.units,
                })
            nnode.add_attribute(attr)

        for port_obj in ports:
            port = Port(port_obj.port_id, port_obj.ip_address)
            nnode.add_port(port)
    def undeclare_exchange_name(self, canonical_name='', exchange_space_id=''):
        """Remove an exhange name resource
        """
        exchange_space_obj = self._validate_resource_id("exchange_space_id", exchange_space_id, RT.ExchangeSpace,
                                                        optional=True)
        # TODO: currently we are using the exchange_name's id as the canonical name and exchange_space_id is unused?
        exchange_name = self.rr.read(canonical_name)
        if not exchange_name:
            raise NotFound("ExchangeName with id %s does not exist" % canonical_name)

        exchange_name_id = exchange_name._id        # yes, this should be same, but let's make it look cleaner

        # Get associated XS first
        exchange_space_list, assocs = self.rr.find_subjects(RT.ExchangeSpace, PRED.hasExchangeName, exchange_name_id)
        if len(exchange_space_list) != 1:
            log.warn("ExchangeName %s has no associated Exchange Space" % exchange_name_id)

        exchange_space = exchange_space_list[0] if exchange_space_list else None

        # Remove association between itself and XS
        for assoc in assocs:
            self.rr.delete_association(assoc._id)

        # Remove XN
        self.rr.delete(exchange_name_id)

        # Call container API
        if exchange_space:
            xs = self.container.ex_manager.create_xs(exchange_space.name, declare=False)
            xn = self.container.ex_manager._create_xn(exchange_name.xn_type, exchange_name.name, xs, declare=False)
            self.container.ex_manager.delete_xn(xn)
    def _register_service(self):

        definition = self.process_definition
        existing_services, _ = self.container.resource_registry.find_resources(
            restype="Service", name=definition.name)

        if len(existing_services) > 0:
            if len(existing_services) > 1:
                log.warning("There is more than one service object for %s. Using the first one" % definition.name)
            service_id = existing_services[0]._id
        else:
            svc_obj = Service(name=definition.name, exchange_name=definition.name)
            service_id, _ = self.container.resource_registry.create(svc_obj)

        svcdefs, _ = self.container.resource_registry.find_resources(
            restype="ServiceDefinition", name=definition.name)

        if svcdefs:
            try:
                self.container.resource_registry.create_association(
                    service_id, "hasServiceDefinition", svcdefs[0]._id)
            except BadRequest:
                log.warn("Failed to associate %s Service and ServiceDefinition. It probably exists.",
                    definition.name)
        else:
            log.error("Cannot find ServiceDefinition resource for %s",
                    definition.name)

        return service_id, definition.name
    def _policy_thread_loop(self):
        """Single thread runs policy loops, to prevent races
        """
        while True:
            # wait until our event is set, up to policy_interval seconds
            self.policy_event.wait(self.policy_interval)
            if self.policy_event.is_set():
                self.policy_event.clear()
                log.debug("%sapplying policy due to event", self.logprefix)
            else:

                # on a regular basis, we check for the current state of each process.
                # this is essentially a hedge against bugs in the HAAgent, or in the
                # ION events system that could prevent us from seeing state changes
                # of processes.
                log.debug("%sapplying policy due to timer. Reloading process cache first.",
                    self.logprefix)
                try:
                    self.control.reload_processes()
                except (Exception, gevent.Timeout):
                    log.warn("%sFailed to reload processes from PD. Will retry later.",
                        self.logprefix, exc_info=True)

            try:
                self._apply_policy()
            except (Exception, gevent.Timeout):
                log.warn("%sFailed to apply policy. Will retry later.",
                    self.logprefix, exc_info=True)
示例#38
0
    def _call_plugins(self, method, process, config, **kwargs):
        bootstrap_plugins = config.get_safe("bootstrap_plugins", None)
        if bootstrap_plugins is None:
            log.warn("Bootstrapper called without bootstrap_plugins config")

        # Finding the system actor ID. If found, construct call context headers.
        # This may be called very early in bootstrap with no system actor yet existing
        system_actor, _ = process.container.resource_registry.find_resources(
            RT.ActorIdentity, name=self.CFG.system.system_actor, id_only=False)

        actor_headers = get_system_actor_header(
            system_actor[0] if system_actor else None)

        # Set the call context of the current process
        with process.push_context(actor_headers):

            for plugin_info in bootstrap_plugins:
                plugin_mod, plugin_cls = plugin_info.get(
                    "plugin", [None, None])
                plugin_cfg = plugin_info.get("config", None)
                plugin_cfg = dict_merge(
                    config, plugin_cfg) if plugin_cfg is not None else config

                try:
                    log.info("Bootstrapping plugin %s.%s ...", plugin_mod,
                             plugin_cls)
                    plugin = for_name(plugin_mod, plugin_cls)
                    plugin_func = getattr(plugin, method)
                    plugin_func(process, plugin_cfg, **kwargs)
                except AbortBootstrap as abort:
                    raise
                except Exception as ex:
                    log.exception("Error bootstrapping plugin %s.%s",
                                  plugin_mod, plugin_cls)
示例#39
0
 def acquire_samples(self, max_samples=0):
     log.debug('Orb_DataAgentPlugin.acquire_samples')
     if os.path.exists(self.data_dir):
         files = os.listdir(self.data_dir)
         cols = []
         rows = []
         for f in files:
             fpath = self.data_dir + f
             with open(fpath) as fh:
                 try:
                     pkt = json.load(fh)
                     if not cols:
                         cols = [str(c['chan']) for c in pkt['channels']]              
                     row = self._extract_row(pkt, cols)
                     dims = [len(c) for c in row[:3]]
                     if all(d==400 for d in dims):
                         rows.append(row)
                     else:
                         log.warning('Inconsistent dimensions %s, %s' % (str(dims), fpath))
                     fh.close()
                     os.remove(fpath)
                     log.info('sample: ' + fpath)
                 except Exception as ex:
                     log.warn(ex)
                     log.warn('Incomplete packet %s' % fpath)
                     
         if cols and rows:
             coltypes = {}
             for c in cols:
                 coltypes[c] = '400i4'
             cols.append('time')
             samples = dict(cols=cols, data=rows, coltypes=coltypes)
             return samples
示例#40
0
 def shutdown(self):
     if self._spawn_greenlets:
         try:
             gevent.killall(list(self._spawn_greenlets), block=True)
         except Exception:
             log.warn("Ignoring error while killing spawn greenlets", exc_info=True)
         self._spawn_greenlets.clear()
示例#41
0
    def _get_granule_header_errors(self, granule, granule_def):
        """
        Verify all parameters defined in the header:
        - Stream type
        - Internal timestamp
        """
        errors = []
        granule_dict = self._granule_as_dict(granule)
        granule_timestamp = granule_dict.get('internal_timestamp')
        expected_time = granule_def.get('internal_timestamp')

        # Verify the timestamp
        if granule_timestamp and not expected_time:
            errors.append("granule_timestamp defined in granule, but not expected")
        elif not granule_timestamp and expected_time:
            errors.append("granule_timestamp expected, but not defined in granule")

        # If we have a timestamp AND expect one then compare values
        elif (granule_timestamp and
              granule_timestamp != self._string_to_ntp_date_time(expected_time)):
            errors.append("expected internal_timestamp mismatch, %f != %f (%f)" %
                (self._string_to_ntp_date_time(expected_time), granule_timestamp,
                 self._string_to_ntp_date_time(expected_time)- granule_timestamp))

        # verify the stream name
        granule_stream = granule_dict.get('stream_name')
        if granule_stream:
            expected_stream =  self._result_set_header['granule_type']
            if granule_stream != expected_stream:
                errors.append("expected stream name mismatch: %s != %s" %
                              (expected_stream, granule_stream))
        else:
            log.warn("No stream defined in granule.  Not verifying stream name.")

        return errors
示例#42
0
    def _call_plugins(self, method, process, config, **kwargs):
        bootstrap_plugins = config.get_safe("bootstrap_plugins", None)
        if bootstrap_plugins is None:
            log.warn("Bootstrapper called without bootstrap_plugins config")

        # Finding the system actor ID. If found, construct call context headers.
        # This may be called very early in bootstrap with no system actor yet existing
        system_actor, _ = process.container.resource_registry.find_resources(
            RT.ActorIdentity, name=self.CFG.system.system_actor, id_only=True
        )
        system_actor_id = system_actor[0] if system_actor else "anonymous"

        actor_headers = {
            "ion-actor-id": system_actor_id,
            "ion-actor-roles": {"ION": ["ION_MANAGER", "ORG_MANAGER"]} if system_actor else {},
        }

        # Set the call context of the current process
        with process.push_context(actor_headers):

            for plugin_info in bootstrap_plugins:
                plugin_mod, plugin_cls = plugin_info.get("plugin", [None, None])
                plugin_cfg = plugin_info.get("config", None)
                plugin_cfg = dict_merge(config, plugin_cfg) if plugin_cfg is not None else config

                try:
                    log.info("Bootstrapping plugin %s.%s ...", plugin_mod, plugin_cls)
                    plugin = for_name(plugin_mod, plugin_cls)
                    plugin_func = getattr(plugin, method)
                    plugin_func(process, plugin_cfg, **kwargs)
                except AbortBootstrap as abort:
                    raise
                except Exception as ex:
                    log.exception("Error bootstrapping plugin %s.%s", plugin_mod, plugin_cls)
    def terminate_realtime_visualization_data(self, query_token=''):
        """This operation terminates and cleans up resources associated with realtime visualization data. This operation requires a
        user specific token which was provided from a previsou request to the init_realtime_visualization operation.

        @param query_token    str
        @throws NotFound    Throws if specified query_token or its visualization product does not exist
        """

        if not query_token:
            raise BadRequest("The query_token parameter is missing")


        subscription_ids = self.clients.resource_registry.find_resources(restype=RT.Subscription, name=query_token, id_only=True)

        if not subscription_ids:
            raise BadRequest("A Subscription object for the query_token parameter %s is not found" % query_token)


        if len(subscription_ids[0]) > 1:
            log.warn("An inconsistent number of Subscription resources associated with the name: %s - using the first one in the list",query_token )

        subscription_id = subscription_ids[0][0]

        self.clients.pubsub_management.deactivate_subscription(subscription_id)

        self.clients.pubsub_management.delete_subscription(subscription_id)

        #Taking advantage of idempotency
        xq = self.container.ex_manager.create_xn_queue(query_token)

        self.container.ex_manager.delete_xn(xq)
示例#44
0
    def _set_calibration_for_data_product(self, dp_obj, dev_cfg):
        from ion.util.direct_coverage_utils import DirectCoverageAccess
        from coverage_model import SparseConstantType

        log.debug("Setting calibration for data product '%s'", dp_obj.name)
        dataset_ids, _ = self.rr.find_objects(dp_obj, PRED.hasDataset, id_only=True)
        publisher = EventPublisher(OT.InformationContentModifiedEvent)
        if not dataset_ids:
            data_product_management = DataProductManagementServiceProcessClient(process=self)
            log.debug(" Creating dataset for data product %s", dp_obj.name)
            data_product_management.create_dataset_for_data_product(dp_obj._id)
            dataset_ids, _ = self.rr.find_objects(dp_obj, PRED.hasDataset, id_only=True)
            if not dataset_ids:
                raise NotFound('No datasets were found for this data product, ensure that it was created')
        for dataset_id in dataset_ids:
            # Synchronize with ingestion
            with DirectCoverageAccess() as dca:
                cov = dca.get_editable_coverage(dataset_id)
                # Iterate over the calibrations
                for cal_name, contents in dev_cfg.iteritems():
                    if cal_name in cov.list_parameters() and isinstance(cov.get_parameter_context(cal_name).param_type, SparseConstantType):
                        value = float(contents['value'])
                        log.info(' Updating Calibrations for %s in %s', cal_name, dataset_id)
                        cov.set_parameter_values(cal_name, value)
                    else:
                        log.warn(" Calibration %s not found in dataset", cal_name)
                publisher.publish_event(origin=dataset_id, description="Calibrations Updated")
        publisher.close()
        log.info("Calibration set for data product '%s' in %s coverages", dp_obj.name, len(dataset_ids))
示例#45
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """
        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)

        conductivity = psd.get_values('conductivity')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))


        # The L1 conductivity data product algorithm takes the L0 conductivity data product and converts it
        # into Siemens per meter (S/m)
        #    SBE 37IM Output Format 0
        #    1) Standard conversion from 5-character hex string to decimal
        #    2)Scaling
        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(conductivity)):
            scaled_conductivity =  ( conductivity[i] / 100000.0 ) - 0.5
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=scaled_conductivity)

        return psc.close_stream_granule()
示例#46
0
    def get(self, *args, **kwargs):
        #TODO: Add documentation
        #TODO: Fix raises statements
        """
        Called from:
                      InstrumentAgent._handler_get_params

        @raises InstrumentTimeoutException:
        @raises InstrumentProtocolException:
        @raises NotImplementedException:
        @raises InstrumentParameterException:
        """
        try:
            pnames=args[0]
        except IndexError:
            log.warn("No argument provided to get, return all parameters")
            pnames = [DataHandlerParameter.ALL]

        result = None
        if DataHandlerParameter.ALL in pnames:
            result = self._params
        else:
            if not isinstance(pnames, (list,tuple)):
                raise InstrumentParameterException('Get argument not a list or tuple: {0}'.format(pnames))
            result={}
            for pn in pnames:
                try:
                    log.debug('Get parameter with key: {0}'.format(pn))
                    result[pn] = self._params[pn]
                except KeyError:
                    log.debug('\'{0}\' not found in self._params'.format(pn))
                    raise InstrumentParameterException('{0} is not a valid parameter for this DataHandler.'.format(pn))

        return result
示例#47
0
    def start_agent(self, agent_instance_id, resource_id):
        if not agent_instance_id or not resource_id:
            log.warn("Could not %s agent %s for device %s", self.op, agent_instance_id, resource_id)
            return

        res_obj = self.rr.read(resource_id)

        log.info('Starting agent...')
        if res_obj.type_ == RT.ExternalDatasetAgentInstance or res_obj == RT.ExternalDataset:
            dams = DataAcquisitionManagementServiceProcessClient(process=self)
            dams.start_external_dataset_agent_instance(agent_instance_id)
        elif res_obj.type_ == RT.InstrumentDevice:
            ims = InstrumentManagementServiceClient()
            ims.start_instrument_agent_instance(agent_instance_id)
        elif res_obj.type_ == RT.PlatformDevice:
            ims = InstrumentManagementServiceClient()
            ims.start_platform_agent_instance(agent_instance_id)
        else:
            BadRequest("Attempt to start unsupported agent type: %s", res_obj.type_)
        log.info('Agent started!')

        activate = self.CFG.get("activate", True)
        if activate:
            log.info('Activating agent...')
            client = ResourceAgentClient(resource_id, process=self)
            client.execute_agent(AgentCommand(command=ResourceAgentEvent.INITIALIZE))
            client.execute_agent(AgentCommand(command=ResourceAgentEvent.GO_ACTIVE))
            client.execute_agent(AgentCommand(command=ResourceAgentEvent.RUN))
            client.execute_resource(command=AgentCommand(command=DriverEvent.START_AUTOSAMPLE))

            log.info('Agent active!')
    def _retrieve_attribute_value(self):
        """
        Retrieves the attribute value using the given function and calls
        _values_retrieved with retrieved values.
        """
        attrNames = [self._attr_id]
        from_time = (self._last_ts + _DELTA_TIME) if self._last_ts else 0.0

        if log.isEnabledFor(logging.DEBUG):
            log.debug("CIDEVSA-450 %r: retrieving attribute %r from_time %f",
                      self._platform_id, self._attr_id, from_time)

        retrieved_vals = self._get_attribute_values(attrNames, from_time)

        if log.isEnabledFor(logging.DEBUG):
            log.debug("CIDEVSA-450 %r: _get_attribute_values returned %s", self._platform_id, retrieved_vals)

        if log.isEnabledFor(logging.DEBUG):
            log.debug("CIDEVSA-450 %r: retrieved attribute %r values from_time %f = %s",
                      self._platform_id, self._attr_id, from_time, str(retrieved_vals))

        if self._attr_id in retrieved_vals:
            values = retrieved_vals[self._attr_id]
            if values:
                self._values_retrieved(values)

            elif log.isEnabledFor(logging.DEBUG):
                log.debug("CIDEVSA-450 %r: No values reported for attribute=%r from_time=%f",
                    self._platform_id, self._attr_id, from_time)
        else:
            log.warn("CIDEVSA-450 %r: unexpected: response does not include requested attribute %r",
                self._platform_id, self._attr_id)
示例#49
0
def get_user(username, password, *args, **kwargs):
    """ Callback to assert username/password and establish a user session.
    Used in password authentication flow. Called only during token creation.

    NOTE: If the same username logs in multiple times (e.g. different windows),
    then the most recent session overrides all others, which still remain valid
    """
    log.info("OAuth:get_user(%s)", username)
    if username and password:
        try:
            actor_id = ui_instance.idm_client.check_actor_credentials(username, password)
            actor_user = ui_instance.idm_client.read_actor_identity(actor_id)
            if actor_user.details.type_ != OT.UserIdentityDetails:
                log.warn("Bad identity details")
                return None

            # Create user session dict for ActorIdentity
            full_name = actor_user.details.contact.individual_names_given + " " + actor_user.details.contact.individual_name_family
            valid_until = int(get_ion_ts_millis() / 1000 + ui_instance.session_timeout)  # Int with seconds
            user_session = {"actor_id": actor_id, "user_id": actor_id, "username": username, "full_name": full_name, "valid_until": valid_until}
            actor_user.session = user_session

            ui_instance.container.resource_registry.update(actor_user)

            flask.g.actor_id = actor_id
            return user_session
        except NotFound:
            pass
    return None
示例#50
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace("%r: ignoring event type %r. Only handle %r directly",
                          self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info("%r/%s: (%s) status report triggered by diagnostic event:\n"
                     "%s\n"
                     "%40s : %s\n",
                     self._platform_id, state, self.resource_id, statuses,
                     "invalidated_children", invalidated_children)
示例#51
0
 def process_packet_cb(packet, route, stream):
     if not isinstance(packet, DataPacket):
         log.warn("Received a non DataPacket message")
     self.recv_packets.append(packet)
     self.recv_rows += len(packet.data["data"])
     log.info("Received data packet #%s: rows=%s, cols=%s", len(self.recv_packets), len(packet.data["data"]),
              packet.data["cols"])
示例#52
0
    def _device_terminated_event(self, origin):
        """
        Reacts to the notification that a child agent has been terminated.

        - notifies platform to invalidate the child
        - set UNKNOWN for the corresponding child_agg_status
        - update rollup_status and do publication in case of change

        @param origin    the origin (resource_id) of the child
        """

        # notify platform:
        log.debug("%r: notifying agent _child_terminated: origin=%r", self._platform_id, origin)
        self._agent._child_terminated(origin)

        if origin not in self.aparam_child_agg_status:
            log.warn("%r: OOIION-1077 _device_terminated_event: unrecognized origin=%r", self._platform_id, origin)
            return

        log.debug("%r: OOIION-1077 _device_terminated_event: origin=%r", self._platform_id, origin)

        # set entries to UNKNOWN:
        self._initialize_child_agg_status(origin)

        # update rollup_status and publish in case of change:
        for status_name in AggregateStatusType._str_map.keys():
            self._update_rollup_status_and_publish(status_name, origin)
示例#53
0
    def _device_removed_event(self, evt):
        """
        Handles the device_removed event to remove associated information and
        status updates, which mauy result in events being published.
        """

        # the actual child removed is in the values component of the event:
        if isinstance(evt.values, (list, tuple)):
            # normally it will be just one element but handle as array:
            for sub_resource_id in evt.values:
                self._remove_child(sub_resource_id)
        else:
            log.warn("%r: Got device_removed event with invalid values member: %r",
                     self._platform_id, evt)
            return

        # finally forward event so ancestors also get notified:
        # only adjustment is that now my platform's resource_id is the origin:
        evt = dict(event_type  = evt.type_,
                   sub_type    = evt.sub_type,
                   origin_type = evt.origin_type,
                   origin      = self.resource_id,
                   description = evt.description,
                   values      = evt.values)
        try:
            log.debug('%r: _device_removed_event: forwarding to ancestors: %s',
                      self._platform_id, evt)

            self._event_publisher.publish_event(**evt)

        except Exception:
            log.exception('%r: platform agent could not publish event: %s',
                          self._platform_id, evt)
示例#54
0
    def on_restart(self, process, config, **kwargs):
        self.process = process
        inst_objs, _ = process.container.resource_registry.find_resources(restype=RT.Instrument, id_only=False)
        active_agents = []
        for inst in inst_objs:
            if len(inst.agent_state) >= 1:
                active_agents.append(inst._id)

        if not active_agents:
            return

        log.info("Restarting %s agents: %s", len(active_agents), active_agents)

        svc_client = ScionManagementProcessClient(process=process)
        for inst_id in active_agents:
            try:
                svc_client.start_agent(inst_id)
            except Exception as ex:
                log.exception("Cannot restart agent for %s" % inst_id)
                if "Agent already active" in ex.message:
                    try:
                        svc_client.stop_agent(inst_id)
                    except Exception:
                        pass
                    try:
                        svc_client.start_agent(inst_id)
                    except Exception:
                        log.warn("Agent stop/start for %s unsuccessful" % inst_id)
示例#55
0
    def stop_launched_simulator(cls):
        """
        Utility to stop the process launched with launch_simulator.
        The stop is attempted a couple of times in case of errors (with a few
        seconds of sleep in between).

        @return None if process seems to have been stopped properly.
                Otherwise the exception of the last attempt to stop it.
        """
        if cls._sim_process:
            sim_proc, cls._sim_process = cls._sim_process, None
            attempts = 3
            attempt = 0
            while attempt <= attempts:
                attempt += 1
                log.debug("[OMSim] stopping launched simulator (attempt=%d) ...", attempt)
                try:
                    sim_proc.stop()
                    log.debug("[OMSim] simulator process seems to have stopped properly")
                    return None

                except Exception as ex:
                    if attempt < attempts:
                        sleep(10)
                    else:
                        log.warn("[OMSim] error while stopping simulator process: %s", ex)
                        return ex
示例#56
0
    def subplatform_launched(self, pa_client, sub_resource_id):
        """
        PlatformAgent calls this to indicate that a child sub-platform has been
        launched.

        - Since the sub-platform may have been running already by the time
        the PlatformAgent is to add it, this method directly gets the
        "rollup_status" and the "child_agg_status" of the child and do
        updates here.

        NOTE : *no* publications of DeviceAggregateStatusEvent events are done
        because ancestors may not already have entries for this platform.

        - also does the corresponding "device_added" event publication.

        @param pa_client        sub-platform's resource client
        @param sub_resource_id  sub-platform's resource ID
        """

        self._start_subscriber_resource_agent_lifecycle_event(sub_resource_id)

        # do any updates from sub-platform's rollup_status and child_agg_status:
        try:
            resp = pa_client.get_agent(['child_agg_status', 'rollup_status'])
            child_child_agg_status = resp['child_agg_status']
            child_rollup_status = resp['rollup_status']

            log.trace(
                "%r: retrieved from sub-platform %r: "
                "child_agg_status=%s  rollup_status=%s", self._platform_id,
                sub_resource_id, child_child_agg_status, child_rollup_status)

            with self._lock:

                # take the child's child_agg_status'es:
                for sub_origin, sub_statuses in child_child_agg_status.iteritems(
                ):
                    self._prepare_new_child(sub_origin, False, sub_statuses)

                # update my own child_agg_status from the child's rollup_status
                # and also my rollup_status:
                for status_name, status in child_rollup_status.iteritems():
                    self.aparam_child_agg_status[sub_resource_id][
                        status_name] = status
                    self._update_rollup_status(status_name)

            log.trace(
                "%r: my updated child status after processing sub-platform %r: %s",
                self._platform_id, sub_resource_id,
                self.aparam_child_agg_status)

        except Exception as e:
            log.warn(
                "%r: could not get rollup_status or reported rollup_status is "
                "invalid from sub-platform %r: %s", self._platform_id,
                sub_resource_id, e)

        # publish device_added event:
        self.publish_device_added_event(sub_resource_id)
示例#57
0
    def on_init(self):
        # Time in between event persists
        self.persist_interval = float(
            self.CFG.get_safe("process.event_persister.persist_interval", 1.0))

        self.persist_blacklist = self.CFG.get_safe(
            "process.event_persister.persist_blacklist", {})

        self._event_type_blacklist = [
            entry['event_type'] for entry in self.persist_blacklist
            if entry.get('event_type', None) and len(entry) == 1
        ]
        self._complex_blacklist = [
            entry for entry in self.persist_blacklist
            if not (entry.get('event_type', None) and len(entry) == 1)
        ]
        if self._complex_blacklist:
            log.warn(
                "EventPersister does not yet support complex blacklist expressions: %s",
                self._complex_blacklist)

        # Time in between view refreshs
        self.refresh_interval = float(
            self.CFG.get_safe("process.event_persister.refresh_interval",
                              60.0))

        # Holds received events FIFO in syncronized queue
        self.event_queue = Queue()

        # Temporarily holds list of events to persist while datastore operation are not yet completed
        # This is where events to persist will remain if datastore operation fails occasionally.
        self.events_to_persist = None

        # Number of unsuccessful attempts to persist in a row
        self.failure_count = 0

        # bookkeeping for greenlet
        self._persist_greenlet = None
        self._terminate_persist = Event(
        )  # when set, exits the persister greenlet
        self._refresh_greenlet = None
        self._terminate_refresh = Event(
        )  # when set, exits the refresher greenlet

        # The event subscriber
        self.event_sub = None

        # Registered event process plugins
        self.process_plugins = {}
        for plugin_name, plugin_cls, plugin_args in PROCESS_PLUGINS:
            try:
                plugin = named_any(plugin_cls)(**plugin_args)
                self.process_plugins[plugin_name] = plugin
                log.info("Loaded event processing plugin %s (%s)", plugin_name,
                         plugin_cls)
            except Exception as ex:
                log.error(
                    "Cannot instantiate event processing plugin %s (%s): %s",
                    plugin_name, plugin_cls, ex)
 def _load_uri_aliases(cls):
     try:
         cls._uri_aliases = yaml.load(file(_URI_ALIASES_FILENAME))
         if log.isEnabledFor(logging.TRACE):
             log.trace("Loaded CGSN URI aliases = %s" % cls._uri_aliases)
     except Exception as e:
         log.warn("Cannot loaded %s: %s" % (_URI_ALIASES_FILENAME, e))
         cls._uri_aliases = {}
示例#59
0
 def process_packet_cb(packet, route, stream):
     if not isinstance(packet, DataPacket):
         log.warn("Received a non DataPacket message")
     self.recv_packets.append(packet)
     self.recv_rows += len(packet.data["data"])
     log.info("Received data packet #%s: rows=%s, cols=%s",
              len(self.recv_packets), len(packet.data["data"]),
              packet.data["cols"])
示例#60
0
 def _add_ui_refassoc(self, sub_refid, predicate, obj_refid):
     # Create a pre-association based on UI refids (not object IDs)
     if not sub_refid or not obj_refid:
         log.warn("Association not complete: %s (%s) -> %s" %
                  (sub_refid, predicate, obj_refid))
     else:
         refassoc = (sub_refid, predicate, obj_refid)
         self.ref_assocs.append(refassoc)