def _uncamel(cls, name):
     """
     convert CamelCase to camel_case, from http://stackoverflow.com/a/1176023/2063546
     """
     log.trace("name is %s: '%s'" % (type(name).__name__, name))
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
     return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Ejemplo n.º 2
0
    def _get_array_shape_from_slice(self, slice_):
        """
        Calculates and returns the shape of the slice in each dimension of the total domain

        @param slice_   Requested slice
        @return A tuple object denoting the shape of the slice in each dimension of the total domain
        """

        log.debug('Getting array shape for slice_: %s', slice_)

        vals = self.brick_list.values()
        log.trace('vals: %s', vals)
        if len(vals) == 0:
            return 0
        # Calculate the min and max brick value indices for each dimension
        if len(vals[0][1]) > 1:
            min_len = min([min(*x[0][i])+1 for i,x in enumerate(vals)])
            max_len = max([min(*x[0][i])+min(x[3]) for i,x in enumerate(vals)])
        else:
            min_len = min([min(*x[0])+1 for i,x in enumerate(vals)])
            max_len = max([min(*x[0])+min(x[3]) for i,x in enumerate(vals)])

        maxes = [max_len, min_len]

        # Calculate the shape base on the type of slice_
        shp = []
        for i, s in enumerate(slice_):
            if isinstance(s, int):
                shp.append(1)
            elif isinstance(s, (list,tuple)):
                shp.append(len(s))
            elif isinstance(s, slice):
                shp.append(len(range(*s.indices(maxes[i])))) # TODO: Does not support n-dimensional

        return tuple(shp)
Ejemplo n.º 3
0
    def insert_spans(self, uuid, spans, cur):
        log.debug("Inserting spans")
        try:
            for span in spans:
                cols, values = self.span_values(uuid, span)
                dic = dict(zip(cols, values))
                if len(cols) > 0:
                    span_addr = span.address.get_db_str()
                    statement = ''
                    if self._span_exists(span_addr):
                        statement = ''.join(['UPDATE ', self.span_table_name, ' SET '])
                        for k, v in dic.iteritems():
                            statement = ''.join([statement, k, '=', v, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, " WHERE span_address = '", span_addr, "'"])
                    else:
                        statement = """INSERT into """ + self.span_table_name + """ ("""
                        for col in cols:
                            statement = ''.join([statement, col, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, """) VALUES ("""])
                        for val in values:
                            statement = ''.join([statement, val, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, """)"""])

                    log.trace("Inserting span into datastore: %s", statement)
                    with self.span_store.pool.cursor(**self.datastore.cursor_args) as cur:
                        cur.execute(statement)
        except Exception as ex:
            log.warn('Unable to insert spans %s %s', str(spans), ex.message)
Ejemplo n.º 4
0
    def _load(self):
        try:
            results = DBFactory.get_db().get(self.guid)
            for key in results:
                val = results[key]
                if isinstance(val, basestring) and val.startswith('DICTABLE'):
                    i = val.index('|', 9)
                    smod, sclass = val[9:i].split(':')
                    value = unpack(val[i+1:])
                    module = __import__(smod, fromlist=[sclass])
                    classobj = getattr(module, sclass)
                    value = classobj._fromdict(value)
                elif key in ('root_dir', 'file_path'):
                    # No op - set in constructor
                    continue
                elif key == 'brick_tree':
                    setattr(self, key, RTreeProxy.deserialize(val))
                    continue
                elif key == 'span_collection':
                    unpacked = unpack(val)
                    value = SpanCollectionByFile.from_str(unpacked)
                    log.trace("Reconstructed SpanCollection for %s: %s", self.guid, str(value))
                else:
                    value = unpack(val)

                if isinstance(value, tuple):
                    value = list(value)

                setattr(self, key, value)

        except Exception as e:
            log.error("Caught exception reconstructing metadata for guid %s : %s", self.guid, e.message)
            raise
Ejemplo n.º 5
0
 def typecache_add(self, resource_id, resource_type):
     """
     add an entry to the internal cache of resource types by id
     """
     assert resource_type in RT
     log.trace("typecache_add type %s", resource_type)
     self._type_lookup[resource_id] = resource_type
Ejemplo n.º 6
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace("%r: ignoring event type %r. Only handle %r directly",
                          self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info("%r/%s: (%s) status report triggered by diagnostic event:\n"
                     "%s\n"
                     "%40s : %s\n",
                     self._platform_id, state, self.resource_id, statuses,
                     "invalidated_children", invalidated_children)
 def _uncamel(self, name):
     """
     convert CamelCase to camel_case, from http://stackoverflow.com/a/1176023/2063546
     """
     log.trace("name is %s: '%s'" % (type(name).__name__, name))
     s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
     return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Ejemplo n.º 8
0
 def typecache_add(self, resource_id, resource_type):
     """
     add an entry to the internal cache of resource types by id
     """
     assert resource_type in RT
     log.trace("typecache_add type %s", resource_type)
     self._type_lookup[resource_id] = resource_type
Ejemplo n.º 9
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace(
                    "%r: ignoring event type %r. Only handle %r directly",
                    self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info(
                "%r/%s: (%s) status report triggered by diagnostic event:\n"
                "%s\n"
                "%40s : %s\n", self._platform_id, state, self.resource_id,
                statuses, "invalidated_children", invalidated_children)
Ejemplo n.º 10
0
    def find_models_fromcache(self, resource_id):
        """
        Find any models assiciatiated with a device/site id.  use a local cache

        returns a list OR a string, depending on resource type
        """
        if resource_id in self._model_lookup:
            self._modelcache_hits += 1
            log.trace("Modelcache HIT/miss = %s / %s", self._modelcache_hits, self._modelcache_miss)
            return self._model_lookup[resource_id]

        self._modelcache_miss += 1
        log.trace("Modelcache hit/MISS = %s / %s", self._modelcache_hits, self._modelcache_miss)

        rsrc_type = self.get_resource_type(resource_id)
        if RT.PlatformDevice == rsrc_type:
            model = self.RR2.find_platform_model_id_of_platform_device_using_has_model(resource_id)
        elif RT.PlatformSite == rsrc_type:
            model = self.RR2.find_platform_model_ids_of_platform_site_using_has_model(resource_id)
        elif RT.InstrumentDevice == rsrc_type:
            model = self.RR2.find_instrument_model_id_of_instrument_device_using_has_model(resource_id)
        elif RT.InstrumentSite == rsrc_type:
            model = self.RR2.find_instrument_model_ids_of_instrument_site_using_has_model(resource_id)
        else:
            raise AssertionError("Got unexpected type from which to find a model: %s" % rsrc_type)

        self._model_lookup[resource_id] = model

        return model
Ejemplo n.º 11
0
 def add_sites(site_ids, model_type):
     for s in site_ids:
         models = self.RR2.find_objects(s, PRED.hasModel, model_type, id_only=True)
         for m in models:
             self.typecache_add(m, model_type)
         log.trace("Found %s %s objects of site", len(models), model_type)
         add_site_models(s, models)
Ejemplo n.º 12
0
    def _set_values_to_brick(self, brick_guid, brick_slice, values, value_slice=None):
        brick_file_path = os.path.join(self.brick_path, '{0}.hdf5'.format(brick_guid))
        log.trace('Brick slice to fill: %s', brick_slice)
        log.trace('Value slice to extract: %s', value_slice)

        # Create the HDF5 dataset that represents one brick
        bD = tuple(self.brick_domains[1])
        cD = self.brick_domains[2]
        if value_slice is not None:
            vals = values[value_slice]
        else:
            vals = values

        if values.ndim == 0 and len(values.shape) == 0 and np.iterable(vals): # Prevent single value strings from being iterated
            vals = [vals]

        # Check for object type
        data_type = self.dtype
        fv = self.fill_value

        # Check for object type
        if data_type == '|O8':
            if np.iterable(vals):
                vals = [pack(x) for x in vals]
            else:
                vals = pack(vals)

        if self.inline_data_writes:
            if data_type == '|O8':
                data_type = h5py.special_dtype(vlen=str)
            if 0 in cD or 1 in cD:
                cD = True
            with h5py.File(brick_file_path, 'a') as f:
                # TODO: Due to usage concerns, currently locking chunking to "auto"
                f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)
                f[brick_guid][brick_slice] = vals
        else:
            work_key = brick_guid
            work = (brick_slice, vals)
            work_metrics = (brick_file_path, bD, cD, data_type, fv)
            log.trace('Work key: %s', work_key)
            log.trace('Work metrics: %s', work_metrics)
            log.trace('Work[0]: %s', work[0])

            # If the brick file doesn't exist, 'touch' it to make sure it's immediately available
            if not os.path.exists(brick_file_path):
                if data_type == '|O8':
                    data_type = h5py.special_dtype(vlen=str)
                if 0 in cD or 1 in cD:
                    cD = True
                with h5py.File(brick_file_path, 'a') as f:
                    # TODO: Due to usage concerns, currently locking chunking to "auto"
                    f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)

            if self.auto_flush:
                # Immediately submit work to the dispatcher
                self.brick_dispatcher.put_work(work_key, work_metrics, work)
            else:
                # Queue the work for later flushing
                self._queue_work(work_key, work_metrics, work)
Ejemplo n.º 13
0
    def _set_values_to_brick(self, brick_guid, brick_slice, values, value_slice=None):
        brick_file_path = os.path.join(self.brick_path, '{0}.hdf5'.format(brick_guid))
        log.trace('Brick slice to fill: %s', brick_slice)
        log.trace('Value slice to extract: %s', value_slice)

        # Create the HDF5 dataset that represents one brick
        bD = tuple(self.brick_domains[1])
        cD = self.brick_domains[2]
        if value_slice is not None:
            vals = values[value_slice]
        else:
            vals = values

        if values.ndim == 0 and len(values.shape) == 0 and np.iterable(vals): # Prevent single value strings from being iterated
            vals = [vals]

        # Check for object type
        data_type = self.dtype
        fv = self.fill_value

        # Check for object type
        if data_type == '|O8':
            if np.iterable(vals):
                vals = [pack(x) for x in vals]
            else:
                vals = pack(vals)

        if self.inline_data_writes:
            if data_type == '|O8':
                data_type = h5py.special_dtype(vlen=str)
            if 0 in cD or 1 in cD:
                cD = True
            with HDFLockingFile(brick_file_path, 'a') as f:
                # TODO: Due to usage concerns, currently locking chunking to "auto"
                f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)
                f[brick_guid][brick_slice] = vals
        else:
            work_key = brick_guid
            work = (brick_slice, vals)
            work_metrics = (brick_file_path, bD, cD, data_type, fv)
            log.trace('Work key: %s', work_key)
            log.trace('Work metrics: %s', work_metrics)
            log.trace('Work[0]: %s', work[0])

            # If the brick file doesn't exist, 'touch' it to make sure it's immediately available
            if not os.path.exists(brick_file_path):
                if data_type == '|O8':
                    data_type = h5py.special_dtype(vlen=str)
                if 0 in cD or 1 in cD:
                    cD = True
                with HDFLockingFile(brick_file_path, 'a') as f:
                    # TODO: Due to usage concerns, currently locking chunking to "auto"
                    f.require_dataset(brick_guid, shape=bD, dtype=data_type, chunks=None, fillvalue=fv)

            if self.auto_flush:
                # Immediately submit work to the dispatcher
                self.brick_dispatcher.put_work(work_key, work_metrics, work)
            else:
                # Queue the work for later flushing
                self._queue_work(work_key, work_metrics, work)
Ejemplo n.º 14
0
    def subplatform_launched(self, pa_client, sub_resource_id):
        """
        PlatformAgent calls this to indicate that a child sub-platform has been
        launched.

        - Since the sub-platform may have been running already by the time
        the PlatformAgent is to add it, this method directly gets the
        "rollup_status" and the "child_agg_status" of the child and do
        updates here.

        NOTE : *no* publications of DeviceAggregateStatusEvent events are done
        because ancestors may not already have entries for this platform.

        - also does the corresponding "device_added" event publication.

        @param pa_client        sub-platform's resource client
        @param sub_resource_id  sub-platform's resource ID
        """

        self._start_subscriber_process_lifecycle_event(sub_resource_id)

        # do any updates from sub-platform's rollup_status and child_agg_status:
        try:
            resp = pa_client.get_agent(['child_agg_status', 'rollup_status'])
            child_child_agg_status = resp['child_agg_status']
            child_rollup_status = resp['rollup_status']

            log.trace(
                "%r: retrieved from sub-platform %r: "
                "child_agg_status=%s  rollup_status=%s", self._platform_id,
                sub_resource_id, child_child_agg_status, child_rollup_status)

            with self._lock:

                # take the child's child_agg_status'es:
                for sub_origin, sub_statuses in child_child_agg_status.iteritems(
                ):
                    self._prepare_new_child(sub_origin, False, sub_statuses)

                # update my own child_agg_status from the child's rollup_status
                # and also my rollup_status:
                for status_name, status in child_rollup_status.iteritems():
                    self.aparam_child_agg_status[sub_resource_id][
                        status_name] = status
                    self._update_rollup_status(status_name)

            log.trace(
                "%r: my updated child status after processing sub-platform %r: %s",
                self._platform_id, sub_resource_id,
                self.aparam_child_agg_status)

        except Exception as e:
            log.warn(
                "%r: could not get rollup_status or reported rollup_status is "
                "invalid from sub-platform %r: %s", self._platform_id,
                sub_resource_id, e)

        # publish device_added event:
        self.publish_device_added_event(sub_resource_id)
Ejemplo n.º 15
0
 def start_sampling(self):
     if self._poller:
         raise InstStateError('already polling')
     memento = self._get_state('poller_state')
     config = self.config['poller']
     log.trace('poller config: %r', config)
     self._poller = self._create_plugin(config, args=[config['config'], memento, self.poller_callback, self.exception_callback])
     self._poller.start()
    def _generate_stream_config(self):
        log.debug("_generate_stream_config for %s", self.agent_instance_obj.name)
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name}

        #retrieve the output products
        # TODO: What about platforms? other things?
        device_id = device_obj._id
        data_product_objs = self.RR2.find_data_products_of_instrument_device_using_has_output_product(device_id)

        stream_config = {}
        for dp in data_product_objs:
            stream_def_id = self.RR2.find_stream_definition_id_of_data_product_using_has_stream_definition(dp._id)
            for stream_name, stream_info_dict in streams_dict.items():
                # read objects from cache to be compared
                pdict = self.RR2.find_resource_by_name(RT.ParameterDictionary, stream_info_dict.get('param_dict_name'))
                stream_def_id = self._find_streamdef_for_dp_and_pdict(dp._id, pdict._id)

                if stream_def_id:
                    #model_param_dict = self.RR2.find_resources_by_name(RT.ParameterDictionary,
                    #                                         stream_info_dict.get('param_dict_name'))[0]
                    #model_param_dict = self._get_param_dict_by_name(stream_info_dict.get('param_dict_name'))
                    #stream_route = self.RR2.read(product_stream_id).stream_route
                    product_stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(dp._id)
                    stream_def = psm.read_stream_definition(stream_def_id)
                    stream_route = psm.read_stream_route(stream_id=product_stream_id)

                    from pyon.core.object import IonObjectSerializer
                    stream_def_dict = IonObjectSerializer().serialize(stream_def)
                    stream_def_dict.pop('type_')

                    if stream_name in stream_config:
                        log.warn("Overwriting stream_config[%s]", stream_name)

                    stream_config[stream_name] = {  'routing_key'           : stream_route.routing_key,  # TODO: Serialize stream_route together
                                                    'stream_id'             : product_stream_id,
                                                    'stream_definition_ref' : stream_def_id,
                                                    'stream_def_dict'       : stream_def_dict,  # This is very large
                                                    'exchange_point'        : stream_route.exchange_point,
                                                    # This is redundant and very large - the param dict is in the stream_def_dict
                                                    #'parameter_dictionary'  : stream_def.parameter_dictionary,

                    }
        if len(stream_config) < len(streams_dict):
            log.warn("Found only %s matching streams by stream definition (%s) than %s defined in the agent (%s).",
                     len(stream_config), stream_config.keys(), len(streams_dict), streams_dict.keys())

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", stream_config)
        return stream_config
Ejemplo n.º 17
0
    def _generate_stream_config(self):
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            param_dict_id = dsm.read_parameter_dictionary_by_name(stream_cfg.parameter_dictionary_name,
                                                                  id_only=True)
            stream_def_id = psm.create_stream_definition(parameter_dictionary_id=param_dict_id)
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name,
                                                    'stream_def_id':stream_def_id,
                                                    'records_per_granule': stream_cfg.records_per_granule,
                                                    'granule_publish_rate':stream_cfg.granule_publish_rate,
                                                    'alarms'              :stream_cfg.alarms  }

        #retrieve the output products
        device_id = device_obj._id
        data_product_ids = self.RR2.find_data_product_ids_of_instrument_device_using_has_output_product(device_id)

        out_streams = []
        for product_id in data_product_ids:
            stream_id = self.RR2.find_stream_id_of_data_product(product_id)
            out_streams.append(stream_id)


        stream_config = {}

        log.debug("Creating a stream config got each stream (dataproduct) assoc with this agent/device")
        for product_stream_id in out_streams:

            #get the streamroute object from pubsub by passing the stream_id
            stream_def_id = self.RR2.find_stream_definition_id_of_stream(product_stream_id)

            #match the streamdefs/apram dict for this model with the data products attached to this device to know which tag to use
            for model_stream_name, stream_info_dict  in streams_dict.items():

                if self.clients.pubsub_management.compare_stream_definition(stream_info_dict.get('stream_def_id'),
                                                                            stream_def_id):
                    model_param_dict = DatasetManagementService.get_parameter_dictionary_by_name(stream_info_dict.get('param_dict_name'))
                    stream_route = self.clients.pubsub_management.read_stream_route(stream_id=product_stream_id)

                    stream_config[model_stream_name] = {'routing_key'           : stream_route.routing_key,
                                                            'stream_id'             : product_stream_id,
                                                            'stream_definition_ref' : stream_def_id,
                                                            'exchange_point'        : stream_route.exchange_point,
                                                            'parameter_dictionary'  : model_param_dict.dump(),
                                                            'records_per_granule'  : stream_info_dict.get('records_per_granule'),
                                                            'granule_publish_rate'  : stream_info_dict.get('granule_publish_rate'),
                                                            'alarms'                : stream_info_dict.get('alarms')
                    }

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", str(stream_config) )
        return stream_config
    def _generate_stream_config(self):
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            param_dict_id = dsm.read_parameter_dictionary_by_name(stream_cfg.parameter_dictionary_name,
                                                                  id_only=True)
            stream_def_id = psm.create_stream_definition(parameter_dictionary_id=param_dict_id)
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name,
                                                    'stream_def_id':stream_def_id,
                                                    'records_per_granule': stream_cfg.records_per_granule,
                                                    'granule_publish_rate':stream_cfg.granule_publish_rate,
                                                    'alarms'              :stream_cfg.alarms  }

        #retrieve the output products
        device_id = device_obj._id
        data_product_ids = self.RR2.find_data_product_ids_of_instrument_device_using_has_output_product(device_id)

        out_streams = []
        for product_id in data_product_ids:
            stream_id = self.RR2.find_stream_id_of_data_product(product_id)
            out_streams.append(stream_id)


        stream_config = {}

        log.debug("Creating a stream config got each stream (dataproduct) assoc with this agent/device")
        for product_stream_id in out_streams:

            #get the streamroute object from pubsub by passing the stream_id
            stream_def_id = self.RR2.find_stream_definition_id_of_stream(product_stream_id)

            #match the streamdefs/apram dict for this model with the data products attached to this device to know which tag to use
            for model_stream_name, stream_info_dict  in streams_dict.items():

                if self.clients.pubsub_management.compare_stream_definition(stream_info_dict.get('stream_def_id'),
                                                                            stream_def_id):
                    model_param_dict = DatasetManagementService.get_parameter_dictionary_by_name(stream_info_dict.get('param_dict_name'))
                    stream_route = self.clients.pubsub_management.read_stream_route(stream_id=product_stream_id)

                    stream_config[model_stream_name] = {'routing_key'           : stream_route.routing_key,
                                                            'stream_id'             : product_stream_id,
                                                            'stream_definition_ref' : stream_def_id,
                                                            'exchange_point'        : stream_route.exchange_point,
                                                            'parameter_dictionary'  : model_param_dict.dump(),
                                                            'records_per_granule'  : stream_info_dict.get('records_per_granule'),
                                                            'granule_publish_rate'  : stream_info_dict.get('granule_publish_rate'),
                                                            'alarms'                : stream_info_dict.get('alarms')
                    }

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", str(stream_config) )
        return stream_config
    def _generate_stream_config(self):
        log.debug("_generate_stream_config for %s", self.agent_instance_obj.name)
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name,
                                                    #'stream_def_id':stream_def_id,
                                                    'records_per_granule': stream_cfg.records_per_granule,
                                                    'granule_publish_rate':stream_cfg.granule_publish_rate,
                                                     }

        #retrieve the output products
        device_id = device_obj._id
        data_product_objs = self.RR2.find_data_products_of_instrument_device_using_has_output_product(device_id)

        stream_config = {}
        for d in data_product_objs:
            stream_def_id = self.RR2.find_stream_definition_id_of_data_product_using_has_stream_definition(d._id)
            for model_stream_name, stream_info_dict  in streams_dict.items():
                # read objects from cache to be compared
                pdict = self.RR2.find_resource_by_name(RT.ParameterDictionary, stream_info_dict.get('param_dict_name'))
                stream_def_id = self._meet_in_the_middle(d._id, pdict._id)

                if stream_def_id:
                    #model_param_dict = self.RR2.find_resources_by_name(RT.ParameterDictionary,
                    #                                         stream_info_dict.get('param_dict_name'))[0]
                    #model_param_dict = self._get_param_dict_by_name(stream_info_dict.get('param_dict_name'))
                    #stream_route = self.RR2.read(product_stream_id).stream_route
                    product_stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(d._id)
                    stream_def = psm.read_stream_definition(stream_def_id)
                    stream_route = psm.read_stream_route(stream_id=product_stream_id)
                    
                    from pyon.core.object import IonObjectSerializer
                    stream_def_dict = IonObjectSerializer().serialize(stream_def)
                    sdtype = stream_def_dict.pop('type_')

                    if model_stream_name in stream_config:
                        log.warn("Overwiting stream_config[%s]", model_stream_name)

                    stream_config[model_stream_name] = {'routing_key'           : stream_route.routing_key,
                                                        'stream_id'             : product_stream_id,
                                                        'stream_definition_ref' : stream_def_id,
                                                        'stream_def_dict'       : stream_def_dict,
                                                        'exchange_point'        : stream_route.exchange_point,
                                                        'parameter_dictionary'  : stream_def.parameter_dictionary,
                                                        'records_per_granule'   : stream_info_dict.get('records_per_granule'),
                                                        'granule_publish_rate'  : stream_info_dict.get('granule_publish_rate'),
                    }

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", str(stream_config) )
        return stream_config
Ejemplo n.º 20
0
 def add_devices(device_ids, model_type):
     for d in device_ids:
         model = self.RR2.find_object(d,
                                      PRED.hasModel,
                                      model_type,
                                      id_only=True)
         self.typecache_add(model, model_type)
         log.trace("Found 1 %s object of device", model_type)
         add_device_model(d, model)
Ejemplo n.º 21
0
 def list_instruments(self):
     """ perform lookup used by ion-ux to generate list of devices """
     out = ServiceApi.find_by_resource_type('InstrumentDevice')
     log.trace('out: %r', out)
     while isinstance(out[0], list):
         log.warn('have list of lists -- should have list of devices!')
         out = out[0]
         log.trace('out: %r', out)
     return out
Ejemplo n.º 22
0
    def _prepare_using_csp(self):
        """
        use the previously collected resoures in a CSP problem
        """
        site_tree = self.resource_collector.collected_site_tree()
        device_tree = self.resource_collector.collected_device_tree()
        device_models = self.resource_collector.collected_models_by_device()
        site_models = self.resource_collector.collected_models_by_site()

        log.debug("Collected %s device models, %s site models",
                  len(device_models), len(site_models))

        # csp solver can't handle multiple platforms, because it doesn't understand hierarchy.
        #             (parent-platformsite---hasmodel-a, child-platformsite---hasmodel-b)
        # would match (parent-platformdevice-hasmodel-b, child-platformdevice-hasmodel-a)
        #
        # we can avoid this by simply restricting the deployment to 1 platform device/site in this case

        #        n_pdev = sum(RT.PlatformDevice == self.resource_collector.get_resource_type(d) for d in device_models.keys())
        #        if 1 < n_pdev:
        #            raise BadRequest("Deployment activation without port_assignment is limited to 1 PlatformDevice, got %s" % n_pdev)
        #
        #        n_psite = sum(RT.PlatformSite == self.resource_collector.get_resource_type(d) for d in site_models.keys())
        #        if 1 < n_psite:
        #            raise BadRequest("Deployment activation without port_assignment is limited to 1 PlatformSite, got %s" % n_psite)

        solutions = self._get_deployment_csp_solutions(device_tree, site_tree,
                                                       device_models,
                                                       site_models)

        if 1 > len(solutions):
            raise BadRequest(
                "The set of devices could not be mapped to the set of sites, based on matching "
                + "models")  # and streamdefs")

        if 1 == len(solutions):
            log.info(
                "Found one possible way to map devices and sites.  Best case scenario!"
            )
        else:
            log.info("Found %d possible ways to map device and site",
                     len(solutions))
            log.trace("Here is the %s of all of them:",
                      type(solutions).__name__)
            for i, s in enumerate(solutions):
                log.trace("Option %d: %s", i + 1,
                          self._csp_solution_to_string(s))
            uhoh = (
                "The set of devices could be mapped to the set of sites in %s ways based only "
                + "on matching models, and no port assignments were specified."
            ) % len(solutions)
            #raise BadRequest(uhoh)
            log.warn(uhoh + "  PICKING THE FIRST AVAILABLE OPTION.")

        # return list of site_id, device_id
        return [(solutions[0][mk_csp_var(device_id)], device_id)
                for device_id in device_models.keys()]
Ejemplo n.º 23
0
 def _run(self):
     sleep(3)  # wait a bit before first event
     while self._keep_running:
         self.generate_and_notify_event()
         # sleep for a few secs regularly checking we still are running
         secs = 7
         while self._keep_running and secs > 0:
             sleep(0.3)
             secs -= 0.3
     log.trace("event generation stopped.")
Ejemplo n.º 24
0
    def add_listener(self, url, event_type):
        assert event_type in EventInfo.EVENT_TYPES

        url_dict = self._listeners[event_type]

        if not url in url_dict:
            url_dict[url] = ntplib.system_to_ntp_time(time.time())
            log.trace("added listener=%s for event_type=%s", url, event_type)

        return url_dict[url]
Ejemplo n.º 25
0
 def _run(self):
     sleep(3)  # wait a bit before first event
     while self._keep_running:
         self.generate_and_notify_event()
         # sleep for a few secs regularly checking we still are running
         secs = 7
         while self._keep_running and secs > 0:
             sleep(0.3)
             secs -= 0.3
     log.trace("event generation stopped.")
Ejemplo n.º 26
0
    def add_listener(self, url, event_type):
        assert event_type in EventInfo.EVENT_TYPES

        url_dict = self._listeners[event_type]

        if not url in url_dict:
            url_dict[url] = ntplib.system_to_ntp_time(time.time())
            log.trace("added listener=%s for event_type=%s", url, event_type)

        return url_dict[url]
Ejemplo n.º 27
0
    def subplatform_launched(self, pa_client, sub_resource_id):
        """
        PlatformAgent calls this to indicate that a child sub-platform has been
        launched.

        - Since the sub-platform may have been running already by the time
        the PlatformAgent is to add it, this method directly gets the
        "rollup_status" and the "child_agg_status" of the child and do
        updates here.

        NOTE : *no* publications of DeviceAggregateStatusEvent events are done
        because ancestors may not already have entries for this platform.

        - also does the corresponding "device_added" event publication.

        @param pa_client        sub-platform's resource client
        @param sub_resource_id  sub-platform's resource ID
        """

        self._start_subscriber_process_lifecycle_event(sub_resource_id)

        # do any updates from sub-platform's rollup_status and child_agg_status:
        try:
            resp = pa_client.get_agent(['child_agg_status', 'rollup_status'])
            child_child_agg_status = resp['child_agg_status']
            child_rollup_status    = resp['rollup_status']

            log.trace("%r: retrieved from sub-platform %r: "
                      "child_agg_status=%s  rollup_status=%s",
                      self._platform_id, sub_resource_id,
                      child_child_agg_status, child_rollup_status)

            with self._lock:

                # take the child's child_agg_status'es:
                for sub_origin, sub_statuses in child_child_agg_status.iteritems():
                    self._prepare_new_child(sub_origin, False, sub_statuses)

                # update my own child_agg_status from the child's rollup_status
                # and also my rollup_status:
                for status_name, status in child_rollup_status.iteritems():
                    self.aparam_child_agg_status[sub_resource_id][status_name] = status
                    self._update_rollup_status(status_name)

            log.trace("%r: my updated child status after processing sub-platform %r: %s",
                      self._platform_id, sub_resource_id,
                      self.aparam_child_agg_status)

        except Exception as e:
            log.warn("%r: could not get rollup_status or reported rollup_status is "
                     "invalid from sub-platform %r: %s",
                     self._platform_id, sub_resource_id, e)

        # publish device_added event:
        self.publish_device_added_event(sub_resource_id)
Ejemplo n.º 28
0
        def set_ports(pnode):
            platform_id = pnode.platform_id
            port_infos = rsn_oms.port.get_platform_ports(platform_id)
            if not isinstance(port_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports response is not a dict: %s" % (
                    platform_id, port_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: port_infos: %s", platform_id, port_infos)

            if not platform_id in port_infos:
                raise PlatformDriverException(
                    "%r: get_platform_ports response does not include "
                    "platform_id: %s" % (platform_id, port_infos))

            ports = port_infos[platform_id]

            if not isinstance(ports, dict):
                raise PlatformDriverException(
                    "%r: get_platform_ports: entry for platform_id is "
                    "not a dict: %s" % (platform_id, ports))

            for port_id, dic in ports.iteritems():
                port = PortNode(port_id)
                port.set_state(dic['state'])
                pnode.add_port(port)

                # add connected instruments:
                instrs_res = rsn_oms.instr.get_connected_instruments(platform_id, port_id)
                if not isinstance(instrs_res, dict):
                    log.warn("%r: port_id=%r: get_connected_instruments "
                             "response is not a dict: %s" % (platform_id, port_id, instrs_res))
                    continue

                if log.isEnabledFor(logging.TRACE):
                    log.trace("%r: port_id=%r: get_connected_instruments "
                              "returned: %s" % (platform_id, port_id, instrs_res))

                if not platform_id in instrs_res:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response"
                        "does not have entry for platform_id: %s" % (
                        platform_id, ports))

                if not port_id in instrs_res[platform_id]:
                    raise PlatformDriverException(
                        "%r: port_id=%r: get_connected_instruments response "
                        "for platform_id does not have entry for port_id: %s" % (
                        platform_id, port_id, instrs_res[platform_id]))

                instr = instrs_res[platform_id][port_id]
                for instrument_id, attrs in instr.iteritems():
                    port.add_instrument(InstrumentNode(instrument_id, attrs))
Ejemplo n.º 29
0
    def _csp_solution_to_string(self, soln):
        ret = "%s" % type(soln).__name__

        for k, s in soln.iteritems():
            d = unpack_csp_var(k)
            log.trace("reading device %s", d)
            dev_obj = self.resource_collector.read_using_typecache(d)
            log.trace("reading site %s", s)
            site_obj = self.resource_collector.read_using_typecache(s)
            ret = "%s, %s '%s' -> %s '%s'" % (ret, dev_obj._get_type(), d, site_obj._get_type(), s)
        return ret
Ejemplo n.º 30
0
                def get_related_resources_fn(input_resource_id, recursion_limit=1024):
                    """
                    This is the function that finds related resources.

                    input_resource_id and recursion_limit are self explanatory

                    The return value is a list of associations.
                    """
                    retval, _ = get_related_resources_h((set([]), set([])), input_resource_id, recursion_limit)
                    log.trace("final_ret is %s", ["%s %s %s" % (a.st, a.p, a.ot) for a in retval])
                    return list(retval)
                def get_related_resources_fn(input_resource_id, recursion_limit=1024):
                    """
                    This is the function that finds related resources.

                    input_resource_id and recursion_limit are self explanatory

                    The return value is a list of associations.
                    """
                    retval, _ = get_related_resources_h((set([]), set([])), input_resource_id, recursion_limit)
                    log.trace("final_ret is %s", ["%s %s %s" % (a.st, a.p, a.ot) for a in retval])
                    return list(retval)
Ejemplo n.º 32
0
 def add_sites(site_ids, model_type):
     for s in site_ids:
         models = self.RR2.find_objects(s,
                                        PRED.hasModel,
                                        model_type,
                                        id_only=True)
         for m in models:
             self.typecache_add(m, model_type)
         log.trace("Found %s %s objects of site", len(models),
                   model_type)
         add_site_models(s, models)
Ejemplo n.º 33
0
    def remove_listener(self, url, event_type):
        assert event_type in EventInfo.EVENT_TYPES

        url_dict = self._listeners[event_type]

        unreg_time = 0
        if url in url_dict:
            unreg_time = ntplib.system_to_ntp_time(time.time())
            del url_dict[url]
            log.trace("removed listener=%s for event_type=%s", url, event_type)

        return unreg_time
Ejemplo n.º 34
0
    def get_cumulative_status_dict(self,
                                   device_id,
                                   child_device_ids=None,
                                   status_dict=None):

        h_agent, reason = self.get_device_agent(device_id)
        log.trace("Got h_agent = %s, reason = %s", h_agent, reason)
        if None is h_agent:
            log.warn('no agent for device %s, reason=%s', device_id, reason)
            return None, reason

        if status_dict and device_id in status_dict:
            this_status = status_dict.get(device_id, {})
        else:

            # read child agg status
            try:
                #retrieve the platform status from the platform agent
                this_status = h_agent.get_agent(['aggstatus'])['aggstatus']
                log.debug("this_status for %s is %s", device_id, this_status)

            except Unauthorized:
                log.warn(
                    "The requester does not have the proper role to access the status of this agent"
                )
                return None, "InstrumentDevice(get_agent) has been denied"

        out_status = {device_id: this_status}

        if DriverTypingMethod.ByAgent == self.dtm:
            # we're done if the agent doesn't support child_agg_status
            if not "child_agg_status" in [
                    c.name for c in h_agent.get_capabilities()
            ]:
                return out_status, None
        elif DriverTypingMethod.ByRR == self.dtm:
            device_obj = self.RR2.read(device_id)
            if RT.PlatformDevice != device_obj._get_type():
                return out_status, None

        try:
            child_agg_status = h_agent.get_agent(['child_agg_status'
                                                  ])['child_agg_status']
            log.debug('get_cumulative_status_dict child_agg_status : %s',
                      child_agg_status)
            if child_agg_status:
                out_status.update(child_agg_status)
            return out_status, None
        except Unauthorized:
            log.warn(
                "The requester does not have the proper role to access the child_agg_status of this agent"
            )
            return out_status, "Error getting child status: 'child_agg_status' has been denied"
Ejemplo n.º 35
0
    def _csp_solution_to_string(self, soln):
        ret = "%s" % type(soln).__name__

        for k, s in soln.iteritems():
            d = unpack_csp_var(k)
            log.trace("reading device %s", d)
            dev_obj = self.resource_collector.read_using_typecache(d)
            log.trace("reading site %s", s)
            site_obj = self.resource_collector.read_using_typecache(s)
            ret = "%s, %s '%s' -> %s '%s'" % (ret, dev_obj._get_type(), d,
                                              site_obj._get_type(), s)
        return ret
Ejemplo n.º 36
0
    def remove_listener(self, url, event_type):
        assert event_type in EventInfo.EVENT_TYPES

        url_dict = self._listeners[event_type]

        unreg_time = 0
        if url in url_dict:
            unreg_time = ntplib.system_to_ntp_time(time.time())
            del url_dict[url]
            log.trace("removed listener=%s for event_type=%s", url, event_type)

        return unreg_time
Ejemplo n.º 37
0
 def start_sampling(self):
     if self._poller:
         raise InstStateError('already polling')
     memento = self._get_state('poller_state')
     config = self.config['poller']
     log.trace('poller config: %r', config)
     self._poller = self._create_plugin(config,
                                        args=[
                                            config['config'], memento,
                                            self.poller_callback,
                                            self.exception_callback
                                        ])
     self._poller.start()
Ejemplo n.º 38
0
 def is_persisted(self, uuid):
     try:
         with self.datastore.pool.cursor(**self.datastore.cursor_args) as cur:
             log.trace(cur.mogrify("""SELECT 1 from """ + self.entity_table_name + " where id=%(uuid)s""",
                                   {'uuid': uuid}))
             cur.execute("""SELECT 1 from """ + self.entity_table_name + " where id=%(uuid)s""", {'uuid': uuid})
             if 0 < cur.rowcount:
                 log.trace("Record exists: %s", uuid)
                 return True
     except Exception as e:
         log.warn('Caught exception checking Postgres existence: %s', e.message)
         return False
     return False
Ejemplo n.º 39
0
 def close(self):
     """
     Close any connections required for this datastore.
     """
     log.trace("Closing connection to %s", self.datastore_name)
     # Compatiblity between couchdb client 0.8 and 0.9
     if hasattr(self.server.resource.session, 'conns'):
         conns = self.server.resource.session.conns
         self.server.resource.session.conns = {}     # just in case we try to reuse this, for some reason
     else:
         conns = self.server.resource.session.connection_pool.conns
         self.server.resource.session.connection_pool.conns = {}     # just in case we try to reuse this, for some reason
     map(lambda x: map(lambda y: y.close(), x), conns.values())
Ejemplo n.º 40
0
                def get_related_resources_h(accum, input_resource_id,
                                            recursion_limit):
                    """
                    This is a recursive helper function that does the work of crawling for related resources

                    The accum is a tuple: (set of associations that are deemed "Related", set of "seen" resources)

                    The input resource id is the current resource being crawled

                    The recursion limit decrements with each recursive call, ending at 0.  So -1 for infinity.

                    The return value is a list of associations
                    """
                    if 0 == recursion_limit:
                        return accum

                    if -1000 > recursion_limit:
                        log.warn(
                            "Terminating related resource recursion, hit -1000"
                        )
                        return accum

                    acc, seen = accum

                    matches = lookup_fn(input_resource_id)
                    log.trace("get_related_resources_h got matches %s", [
                        dict((k, "%s %s %s" % (a.st, a.p, a.ot))
                             for k, a in matches.iteritems())
                    ])

                    unseen = set(matches.keys()) - seen
                    seen.add(input_resource_id)
                    acc = acc | set(matches.values())

                    #if log.isEnabledFor(logging.TRACE):
                    #    summary = {}
                    #    for a in acc:
                    #        label = "%s %s %s" % (a.st, a.p, a.ot)
                    #        if not label in summary: summary[label] = 0
                    #        summary[label] += 1
                    #    log.trace("acc2 is now %s", ["%s x%d" % (k, v) for k, v in summary.iteritems()])

                    def looper(acc2, input_rsrc_id):
                        return get_related_resources_h(acc2, input_rsrc_id,
                                                       recursion_limit - 1)

                    h_ret = reduce(looper, unseen, (acc, seen))
                    #h_ret = reduce(looper, unseen, (acc, seen))
                    #(h_ret_acc, h_ret_seen) = h_ret
                    #log.trace("h_ret is %s", ["%s %s %s" % (a.st, a.p, a.ot) for a in h_ret_acc])
                    return h_ret
    def _prepare_using_csp(self):
        """
        use the previously collected resoures in a CSP problem
        """
        device_models = self.resource_collector.collected_models_by_device()
        site_models = self.resource_collector.collected_models_by_site()

        log.debug("Collected %s device models, %s site models", len(device_models), len(site_models))

        # csp solver can't handle multiple platforms, because it doesn't understand hierarchy.
        #             (parent-platformsite---hasmodel-a, child-platformsite---hasmodel-b)
        # would match (parent-platformdevice-hasmodel-b, child-platformdevice-hasmodel-a)
        #
        # we can avoid this by simply restricting the deployment to 1 platform device/site in this case

        n_pdev = sum(RT.PlatformDevice == self.resource_collector.get_resource_type(d) for d in device_models.keys())
        if 1 < n_pdev:
            raise BadRequest(
                "Deployment activation without port_assignment is limited to 1 PlatformDevice, got %s" % n_pdev
            )

        n_psite = sum(RT.PlatformSite == self.resource_collector.get_resource_type(d) for d in site_models.keys())
        if 1 < n_psite:
            raise BadRequest(
                "Deployment activation without port_assignment is limited to 1 PlatformSite, got %s" % n_psite
            )

        solutions = self._get_deployment_csp_solutions(device_models, site_models)

        if 1 > len(solutions):
            raise BadRequest(
                "The set of devices could not be mapped to the set of sites, based on matching " + "models"
            )  # and streamdefs")

        if 1 < len(solutions):
            log.info("Found %d possible ways to map device and site", len(solutions))
            log.trace("Here is the %s of all of them:", type(solutions).__name__)
            for i, s in enumerate(solutions):
                log.trace("Option %d: %s", i + 1, self._csp_solution_to_string(s))
            raise BadRequest(
                (
                    "The set of devices could be mapped to the set of sites in %s ways based only "
                    + "on matching models, and no port assignments were specified"
                )
                % len(solutions)
            )

        log.info("Found one possible way to map devices and sites.  Best case scenario!")
        # return list of site_id, device_id
        return [(solutions[0][mk_csp_var(device_id)], device_id) for device_id in device_models.keys()]
Ejemplo n.º 42
0
 def __init__(self):
     bootstrap_pyon()
     dsm = DatastoreManager()
     self.datastore = dsm.get_datastore(ds_name='coverage')
     if self.datastore is None:
         raise RuntimeError("Unable to load datastore for coverage")
     else:
         self.entity_table_name = self.datastore._get_datastore_name()
         log.trace("Got datastore: %s type %s" % (self.datastore._get_datastore_name(), str(type(self.datastore))))
     self.span_store = dsm.get_datastore(ds_name='coverage_spans')
     if self.span_store is None:
         raise RuntimeError("Unable to load datastore for coverage_spans")
     else:
         self.span_table_name = self.span_store._get_datastore_name()
         log.trace("Got datastore: %s type %s", self.span_store._get_datastore_name(), type(self.span_store))
Ejemplo n.º 43
0
 def get_coverage_type(self, uuid):
     try:
         with self.datastore.pool.cursor(**self.datastore.cursor_args) as cur:
             statement = ''.join(['SELECT coverage_type from ', self.entity_table_name, " WHERE id='",
                                  uuid, "'"])
             log.trace(cur.mogrify(statement))
             cur.execute(statement)
             #cur.execute("""SELECT coverage_type from """ + self.entity_table_name + """ where id=%s""", (uuid,))
             row = cur.fetchone()
             val = row[0]
             val = str.decode(val, self.encoding)
             return val
     except Exception as e:
         log.warn('Caught exception extracting coverage type from Postgres: %s', e)
         return ''
Ejemplo n.º 44
0
    def read_using_typecache(self, resource_id):
        """
        RR2.read needs a type to do a cache lookup... so keep a cache of object types, because
        sometimes we don't know if an ID is for a platform or instrument model/device/site
        """
        if resource_id in self._type_lookup:
            self._typecache_hits += 1
            log.trace("Typeache HIT/miss = %s / %s", self._typecache_hits, self._typecache_miss)
            return self.RR2.read(resource_id, self._type_lookup[resource_id])

        self._typecache_miss += 1
        log.trace("Typeache hit/MISS = %s / %s", self._typecache_hits, self._typecache_miss)
        rsrc_obj = self.RR2.read(resource_id)
        self.typecache_add(resource_id, rsrc_obj._get_type())
        return rsrc_obj
Ejemplo n.º 45
0
    def flush(self, deep=True):
        if self.is_dirty(deep):
            try:
                # package for storage
                insert_dict = {}
                for k in list(self._dirty):
                    v = getattr(self, k)
                    log.trace('FLUSH: key=%s  v=%s', k, v)
                    if isinstance(v, Dictable):
                        prefix='DICTABLE|{0}:{1}|'.format(v.__module__, v.__class__.__name__)
                        value = prefix + pack(v.dump())
                    elif k == 'brick_tree':
                        if hasattr(self, 'brick_tree') and isinstance(self.brick_tree, RTreeProxy):
                            val = self.brick_tree.serialize()
                            if val != '':
                                insert_dict['brick_tree'] = val
                            continue
                    elif k == 'parameter_metadata':
                        value = pack_parameter_manager_dict(v)
                    else:
                        value = pack(v)

                    insert_dict[k] = value

                    # Update the hash_value in _hmap
                    self._hmap[k] = hash_any(v)

                dirty_spans = self.span_collection.get_dirty_spans()
                if len(dirty_spans) > 0:
                    val = str(self.span_collection)
                    log.trace("Span tuple: %s", val)
                    value = pack(val)
                    insert_dict['span_collection'] = value


                DBFactory.get_db().insert(self.guid, insert_dict, dirty_spans)

                for span in dirty_spans:
                    span.is_dirty = False
                self._dirty.clear()

            except IOError, ex:
                if "unable to create file (File accessability: Unable to open file)" in ex.message:
                    log.info('Issue writing to hdf file during master_manager.flush - this is not likely a huge problem: %s', ex.message)
                else:
                    raise

            super(DbBackedMetadataManager, self).__setattr__('_is_dirty',False)
Ejemplo n.º 46
0
 def recv_packet(self, packet, stream_route, stream_id):
     if self._time_to_first:
         log.trace("%s: received granule: %r from stream %r", self._label, packet, stream_id)
     else:
         self._time_to_first = time.time() - self._start
         log.info('%s: received first message after %f seconds', self._label, self._time_to_first)
         if self._agent:
             self._agent.report(self._label, PerformanceResult({'first': self._time_to_first}))
     n = self.increment_count()
     if n > self._next:
         with self._report_lock:
             self._next += self._rate
             end = time.time()
             elapsed = end-self._start
             self._start = end
             self.report(elapsed)
Ejemplo n.º 47
0
 def _check_for_files(self):
     filenames = glob.glob(self._path)
     # files, but no change since last time
     if self._last_filename and filenames and filenames[-1]==self._last_filename:
         return None
     # no files yet, just like last time
     if not self._last_filename and not filenames:
         return None
     if self._last_filename:
         position = filenames.index(self._last_filename) # raises ValueError if file was removed
         out = filenames[position+1:]
     else:
         out = filenames
     self._last_filename = filenames[-1]
     log.trace('found files: %r', out)
     return out
Ejemplo n.º 48
0
        def _got_process_lifecycle_event(evt, *args, **kwargs):
            with self._lock:
                if not self._active:
                    log.warn(
                        "%r: _got_process_lifecycle_event called but "
                        "manager has been destroyed", self._platform_id)
                    return

                if evt.type_ != "ProcessLifecycleEvent":
                    log.trace(
                        "%r: ignoring event type %r. Only handle "
                        "ProcessLifecycleEvent directly.", self._platform_id,
                        evt.type_)
                    return

                # evt.origin is a PID
                pid = evt.origin

                if not pid in self._rids:
                    log.warn(
                        "%r: OOIION-1077 ignoring event from pid=%r. "
                        "Expecting one of %s", self._platform_id, pid,
                        self._rids.keys())
                    return

                origin = self._rids[pid]

                # # Before the _rids mapping, a preliminary mechanism to check
                # # whether the event came from the expected origin relied on
                # # the process ID having origin as a substring:
                #
                # if not origin in pid:
                #     log.warn("%r: OOIION-1077 ignoring event from origin %r. "
                #              "Expecting an origin containing %r",
                #              self._platform_id, pid, origin)
                #     return
                # # BUT this was definitely weak. Although the PID for an
                # # initial agent process seems to satisfy this assumption,
                # # this is not anymore the case upon a re-start of that agent.

                log.debug(
                    "%r: OOIION-1077  _got_process_lifecycle_event: "
                    "pid=%r origin=%r state=%r(%s)", self._platform_id, pid,
                    origin, ProcessStateEnum._str_map[evt.state], evt.state)

                if evt.state is ProcessStateEnum.TERMINATED:
                    self._device_terminated_event(origin, pid)
Ejemplo n.º 49
0
    def instrument_launched(self, ia_client, i_resource_id):
        """
        PlatformAgent calls this to indicate that a child instrument has been
        launched.

        - Since the instrument may have been running already by the time
        the PlatformAgent is to add it, this method directly gets the
        "aggstatus" of the child and do updates here.

        NOTE : *no* publications of DeviceAggregateStatusEvent events are done
        because ancestors may not already have entries for this platform.

        - also does the corresponding "device_added" event publication.

        @param ia_client      instrument's resource client
        @param i_resource_id  instrument's resource ID
        """

        self._start_subscriber_process_lifecycle_event(i_resource_id)

        # do any updates from instrument's aggstatus:
        try:
            aggstatus = ia_client.get_agent(['aggstatus'])['aggstatus']

            log.trace("%r: retrieved aggstatus from instrument %r: %s",
                      self._platform_id, i_resource_id, aggstatus)

            with self._lock:
                for status_name, status in aggstatus.iteritems():
                    # update my image of the child's status:
                    self.aparam_child_agg_status[i_resource_id][
                        status_name] = status

                    self._update_rollup_status(status_name)

            log.trace("%r: my updated child status for instrument %r: %s",
                      self._platform_id, i_resource_id,
                      self.aparam_child_agg_status[i_resource_id])

        except Exception as e:
            log.warn(
                "%r: could not get aggstatus or reported aggstatus is "
                "invalid from instrument %r: %s", self._platform_id,
                i_resource_id, e)

        # publish device_added event:
        self.publish_device_added_event(i_resource_id)
Ejemplo n.º 50
0
    def read_using_typecache(self, resource_id):
        """
        RR2.read needs a type to do a cache lookup... so keep a cache of object types, because
        sometimes we don't know if an ID is for a platform or instrument model/device/site
        """
        if resource_id in self._type_lookup:
            self._typecache_hits += 1
            log.trace("Typeache HIT/miss = %s / %s", self._typecache_hits,
                      self._typecache_miss)
            return self.RR2.read(resource_id, self._type_lookup[resource_id])

        self._typecache_miss += 1
        log.trace("Typeache hit/MISS = %s / %s", self._typecache_hits,
                  self._typecache_miss)
        rsrc_obj = self.RR2.read(resource_id)
        self.typecache_add(resource_id, rsrc_obj._get_type())
        return rsrc_obj
Ejemplo n.º 51
0
 def _check_for_files(self):
     filenames = glob.glob(self._path)
     # files, but no change since last time
     if self._last_filename and filenames and filenames[
             -1] == self._last_filename:
         return None
     # no files yet, just like last time
     if not self._last_filename and not filenames:
         return None
     if self._last_filename:
         position = filenames.index(
             self._last_filename)  # raises ValueError if file was removed
         out = filenames[position + 1:]
     else:
         out = filenames
     self._last_filename = filenames[-1]
     log.trace('found files: %r', out)
     return out
Ejemplo n.º 52
0
                def lookup_fn(resource_id):
                    """
                    return a dict of related resources as dictated by the pred dict and whitelist
                     - the key is the next resource id to crawl
                     - the value is the entire association
                    """
                    retval = {}

                    for a in master_assn_list:
                        search_sto, search_ots = predicate_dictionary[a.p]

                        if search_sto and a.s == resource_id and a.ot in resource_whitelist:
                            log.trace("lookup_fn matched %s object", a.ot)
                            retval[a.o] = a
                        elif search_ots and a.o == resource_id and a.st in resource_whitelist:
                            log.trace("lookup_fn matched %s subject", a.st)
                            retval[a.s] = a

                    return retval
Ejemplo n.º 53
0
def get_brick_slice_nd(slice_, bounds):
    if len(slice_) != len(bounds):
        raise ValueError(
            '\'slice_\' and \'bounds\' must be equal length: len({0}) != len({1})'
            .format(slice_, bounds))

    brick_slice = []
    brick_mm = []
    for x, sl in enumerate(slice_):  # Dimensionality
        log.trace('x=%s  sl=%s', x, sl)
        log.trace('bbnds[%s]: %s', x, bounds[x])
        try:
            bsl, mm = calc_brick_slice_1d(sl, bounds[x])
            brick_slice.append(bsl)
            brick_mm.append(mm)
        except ValueError:
            continue

    return tuple(brick_slice), tuple(brick_mm)
Ejemplo n.º 54
0
 def _read_profile(self, f):
     line = f.read(11)
     # EOF here is expected -- no more profiles
     if not line:
         return None
     out = dict(records=[])
     while True:
         if line == CTD_END_PROFILE_DATA:
             break
         elif not line:
             # EOF here is bad -- incomplete profile
             raise ParserException(
                 'bad file format -- EOF before reached end of profile')
         out['records'].append(line)
         line = f.read(11)
     # after 'ff'*11 marker, next 8 bytes are start/end times
     out['start'], out['end'] = struct.unpack('>II', f.read(8))
     log.trace('read profile [%d-%d] %d records', out['start'], out['end'],
               len(out['records']))
     return out
Ejemplo n.º 55
0
    def _got_device_status_event(self, evt, *args, **kwargs):
        """
        Handles "device_added" and "device_removed" DeviceStatusEvents.
        """

        expected_subtypes = ("device_added", "device_removed",
                             "device_failed_command")

        with self._lock:
            if not self._active:
                log.warn(
                    "%r: _got_device_status_event called but "
                    "manager has been destroyed", self._platform_id)
                return

        # we are only interested in DeviceStatusEvent directly:
        # (note that also subclasses of DeviceStatusEvent will be notified here)
        if evt.type_ != "DeviceStatusEvent":
            log.trace(
                "%r: ignoring event type %r. Only handle DeviceStatusEvent directly.",
                self._platform_id, evt.type_)
            return

        sub_type = evt.sub_type

        log.debug("%r: _got_device_status_event: %s\n sub_type=%r",
                  self._platform_id, evt, evt.sub_type)

        if not sub_type in expected_subtypes:
            log.error(
                "StatusManager._got_device_status_event: Unexpected sub_type=%r. Expecting one of %r"
                % (sub_type, expected_subtypes))
            return

        with self._lock:
            if sub_type == "device_added":
                self._device_added_event(evt)
            elif sub_type == "device_removed":
                self._device_removed_event(evt)
            else:
                self.device_failed_command_event(evt)
    def __getattr__(self, item):
        """
        anything we can't puzzle out gets passed along to the real RR client
        """

        dynamic_fns = [
            self.
            _make_dynamic_assign_function,  # understand assign_x_x_to_y_y_with_some_predicate(o, s) functions
            self.
            _make_dynamic_assign_single_object_function,  # understand assign_one_x_x_to_y_y_with_some_predicate(o, s) functions
            self.
            _make_dynamic_assign_single_subject_function,  # understand assign_x_x_to_one_y_y_with_some_predicate(o, s) functions
            self.
            _make_dynamic_unassign_function,  # understand unassign_x_x_to_y_y_with_some_predicate(o, s) functions
            self.
            _make_dynamic_find_objects_function,  # understand find_x_xs_by_y_y_using_some_predicate(s) functions
            self.
            _make_dynamic_find_subjects_function,  # understand find_x_xs_by_y_y_using_some_predicate(o) functions
            self.
            _make_dynamic_find_object_function,  # understand find_x_x_by_y_y_using_some_predicate(s) functions
            self.
            _make_dynamic_find_subject_function,  # understand find_x_x_by_y_y_using_some_predicate(o) functions
            self.
            _make_dynamic_find_object_ids_function,  # understand find_x_x_ids_by_y_y_using_some_predicate(s) functions
            self.
            _make_dynamic_find_subject_ids_function,  # understand find_x_x_ids_by_y_y_using_some_predicate(o) functions
            self.
            _make_dynamic_find_object_id_function,  # understand find_x_x_id_by_y_y_using_some_predicate(s) functions
            self.
            _make_dynamic_find_subject_id_function,  # understand find_x_x_id_by_y_y_using_some_predicate(o) functions
        ]

        # try parsing against all the dynamic functions to see if one works
        for gen_fn in dynamic_fns:
            fn = gen_fn(item)
            if None is fn:
                log.trace("dynamic function match fail")
            else:
                log.trace("dynamic function match for %s", item)
                return fn

        log.trace("Getting %s attribute from self.RR", item)
        if not hasattr(self.RR, item):
            raise AttributeError((
                "The method '%s' could not be parsed as a dynamic function and does not exist "
                + "in the Resource Registry Client (%s)") %
                                 (item, type(self.RR).__name__))
        ret = getattr(self.RR, item)
        log.trace("Got attribute from self.RR: %s", type(ret).__name__)

        return ret
Ejemplo n.º 57
0
        def set_attributes(pnode):
            platform_id = pnode.platform_id
            attr_infos = rsn_oms.attr.get_platform_attributes(platform_id)
            if not isinstance(attr_infos, dict):
                raise PlatformDriverException(
                    "%r: get_platform_attributes returned: %s" % (
                    platform_id, attr_infos))

            if log.isEnabledFor(logging.TRACE):
                log.trace("%r: attr_infos: %s", platform_id, attr_infos)

            if not platform_id in attr_infos:
                raise PlatformDriverException(
                    "%r: get_platform_attributes response does not "
                    "include entry for platform_id: %s" %(
                    platform_id, attr_infos))

            ret_infos = attr_infos[platform_id]
            for attrName, attr_defn in ret_infos.iteritems():
                attr = AttrNode(attrName, attr_defn)
                pnode.add_attribute(attr)
Ejemplo n.º 58
0
    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a driver by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The driver command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.
        msg = {'cmd':cmd,'args':args,'kwargs':kwargs}
        
        log.debug('Sending command %s.' % str(msg))
        while True:
            try:
                # Attempt command send. Retry if necessary.
                self.zmq_cmd_socket.send_pyobj(msg)
                if msg == 'stop_driver_process':
                    return 'driver stopping'

                # Command sent, break out and wait for reply.
                break    

            except zmq.ZMQError:
                # Socket not ready to accept send. Sleep and retry later.
                time.sleep(.5)
            
        log.trace('Awaiting reply.')
        while True:
            try:
                # Attempt reply recv. Retry if necessary.
                reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)
                # Reply recieved, break and return.
                break
            except zmq.ZMQError:
                # Socket not ready with the reply. Sleep and retry later.
                time.sleep(.5)
            except Exception,e:
                raise SystemError('exception reading from zmq socket')
Ejemplo n.º 59
0
 def poller_callback(self, file_like_object, state_memento):
     log.debug('poller found data to parse')
     try:
         config = self.config['parser']
         parser = self._create_plugin(
             config,
             kwargs=dict(open_file=file_like_object,
                         parse_after=self.latest_granule_time))
         records = parser.get_records(max_count=self.max_records)
         log.trace('have %d records', len(records))
         while records:
             self._asp.on_sample_mult(records)
             # # secretly uses pubsub client
             # rdt = RecordDictionaryTool(param_dictionary=self.parameter_dictionary)
             # for key in records[0]: #assume all dict records have same keys
             #     rdt[key] = [ record[key] for record in records ]
             # g = rdt.to_granule()
             # self.publisher.publish(g)
             records = parser.get_records(max_count=self.max_records)
         self._set_state('poller_state', state_memento)
     except Exception as ex:
         log.error('error handling data', exc_info=True)