def set_configuration(self, config):
        log.warn("DRIVER: set_configuration")
        """
        expect configuration to have:
        - parser module/class
        - directory, wildcard to find data files
        - optional timestamp of last granule
        - optional poll rate
        - publish info
        """
        log.error("Log level: %s", log.getEffectiveLevel())
        log.debug('using configuration: %s', config)
        self.config = config
        self.max_records = get_safe(config, 'max_records', 100)
        self.stream_config = self.CFG.get('stream_config', {})
        if len(self.stream_config) == 1:
            stream_cfg = self.stream_config.values()[0]
        elif len(self.stream_config) > 1:
            stream_cfg = self.stream_config.values()[0]

        stream_id = stream_cfg['stream_id']
        stream_route = IonObject(OT.StreamRoute, routing_key=stream_cfg['routing_key'], exchange_point=stream_cfg['exchange_point'])
        param_dict = stream_cfg['stream_def_dict']['parameter_dictionary']
        self.publisher = StandaloneStreamPublisher(stream_id=stream_id, stream_route=stream_route)
        self.parameter_dictionary = ParameterDictionary.load(param_dict)
        self.time_field = self.parameter_dictionary.get_temporal_context()
        self.latest_granule_time = get_safe(config, 'last_time', 0)
    def _clear_caches(self):
        log.warn("Clearing caches")
        for r in self._resources_to_cache():
            self.RR2.clear_cached_resource(r)

        for p in self._predicates_to_cache():
            self.RR2.clear_cached_predicate(p)
Exemple #3
0
    def log_scope_call(scope, log_entry, include_stack=True, stack_first_frame=4):
        try:
            if not trace_data["config"].get("enabled", False):
                return

            log_entry["scope"] = scope
            if not "ts" in log_entry:
                log_entry["ts"] = get_ion_ts()
            trace_data["scope_seq"][scope] += 1
            log_entry["seq"] = trace_data["scope_seq"][scope]

            if include_stack:
                stack = inspect.stack()
                frame_num = stack_first_frame
                context = []
                while len(stack) > frame_num and frame_num < 15:
                    exec_line = "%s:%s:%s" % (stack[frame_num][1], stack[frame_num][2], stack[frame_num][3])
                    context.insert(0, exec_line)
                    if exec_line.endswith("_control_flow") or exec_line.endswith("load_ion") or exec_line.endswith("spawn_process")\
                        or exec_line.endswith(":main") or exec_line.endswith(":dispatch_request"):
                        break
                    frame_num += 1
                log_entry["stack"] = context

            trace_data["trace_log"].append(log_entry)
            if len(trace_data["trace_log"]) > trace_data["config"].get("max_entries", DEFAULT_CONFIG["max_entries"]) + 100:
                trace_data["trace_log"] = trace_data["trace_log"][-trace_data["config"].get("max_entries", DEFAULT_CONFIG["max_entries"]):]

            CallTracer.log_trace(log_entry)
        except Exception as ex:
            log.warn("Count not log trace call: %s", log_entry)
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, 'dvr_egg')
            module_name = self._dvr_config['dvr_mod']
            class_name = self._dvr_config['dvr_cls']
            config = self._dvr_config['startup_config']
        except:
            log.error('error in configuration', exc_info=True)
            raise

        egg_name = None
        egg_repo = None
        memento = self._get_state(DSA_STATE_KEY)


        log.warn("Get driver object: %s, %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo, memento)
        if uri:
            egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
            egg_repo = uri[0:len(uri)-len(egg_name)-1] if uri.startswith('http') else None

        log.info("instantiate driver plugin %s.%s", module_name, class_name)
        params = [config, memento, self.publish_callback, self.persist_state_callback, self.exception_callback]
        return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
    def _device_removed_event(self, evt):
        """
        Handles the device_removed event to remove associated information and
        status updates, which mauy result in events being published.
        """

        # the actual child removed is in the values component of the event:
        if isinstance(evt.values, (list, tuple)):
            # normally it will be just one element but handle as array:
            for sub_resource_id in evt.values:
                self._remove_child(sub_resource_id)
        else:
            log.warn("%r: Got device_removed event with invalid values member: %r",
                     self._platform_id, evt)
            return

        # finally forward event so ancestors also get notified:
        # only adjustment is that now my platform's resource_id is the origin:
        evt = dict(event_type  = evt.type_,
                   sub_type    = evt.sub_type,
                   origin_type = evt.origin_type,
                   origin      = self.resource_id,
                   description = evt.description,
                   values      = evt.values)
        try:
            log.debug('%r: _device_removed_event: forwarding to ancestors: %s',
                      self._platform_id, evt)

            self._event_publisher.publish_event(**evt)

        except Exception:
            log.exception('%r: platform agent could not publish event: %s',
                          self._platform_id, evt)
    def get_parameter_values(self, param_name, tdoa=None, sdoa=None, return_value=None):
        """
        Retrieve the value for a parameter

        Returns the value from param_name.  Temporal and spatial DomainOfApplication objects can be used to
        constrain the response.  See DomainOfApplication for details.

        @param param_name   The name of the parameter
        @param tdoa The temporal DomainOfApplication
        @param sdoa The spatial DomainOfApplication
        @param return_value If supplied, filled with response value - currently via OVERWRITE
        @throws KeyError    The coverage does not contain a parameter with name 'param_name'
        """
        if not param_name in self._range_value:
            raise KeyError('Parameter \'{0}\' not found in coverage'.format(param_name))

        if return_value is not None:
            log.warn('Provided \'return_value\' will be OVERWRITTEN')

        slice_ = []

        tdoa = get_valid_DomainOfApplication(tdoa, self.temporal_domain.shape.extents)
        log.debug('Temporal doa: %s', tdoa.slices)
        slice_.extend(tdoa.slices)

        if self.spatial_domain is not None:
            sdoa = get_valid_DomainOfApplication(sdoa, self.spatial_domain.shape.extents)
            log.debug('Spatial doa: %s', sdoa.slices)
            slice_.extend(sdoa.slices)

        log.debug('Getting slice: %s', slice_)

        return_value = self._range_value[param_name][slice_]
        return return_value
Exemple #7
0
    def get_records(self, max_count=MAX_RECORDS_PER_GRANULE):
        records = []
        for ch_num, chunk in enumerate(self._chunks):
            if ch_num < self._chunk_index:
                continue

            before_records = len(records)
            dev_type = chunk["dev_type"]
            dev_parser = DEVICE_TYPES[dev_type]
            try:
                clss = named_any(dev_parser)
                chunk_parser = clss(chunk)
                new_records = chunk_parser.get_records(max_count=max_count)
                start_idx = 0
                if ch_num == self._chunk_index and self._record_index > 0:
                    # Before we stopped in the middle of a chunk
                    new_records = new_records[self._record_index :]
                    start_idx = self._record_index

                self._chunk_index = ch_num
                if before_records + len(new_records) > max_count:
                    records.extend(new_records[: max_count - before_records])
                    self._record_index = start_idx + (max_count - before_records)
                    break
                else:
                    records.extend(new_records)
                    if len(records) == max_count:
                        self._chunk_index += 1
                        self._record_index = 0
                        break

            except Exception as ex:
                log.warn("Error: %s", ex)

        return records
    def _stop_driver(self, force=True):
        log.warn("DRIVER: _stop_driver")
        if self._dvr_client:
            self._dvr_client.stop_sampling()

        # Force the driver state to be stored
        self._flush_state()
Exemple #9
0
 def _parse_chunk(self):
     records = []
     if "<ERROR" in self.chunk["content"]:
         log.warn("ERROR chunk")
     else:
         print self.chunk
     return records
    def test_read_package(self):
        """ assert can read and parse a file """
        parser = SIOControllerPackageParser(DATA_FILE)  # no error? then we parsed the file into profiles and records!

        records = parser.get_records()

        log.warn("Got %s particles", len(records))
Exemple #11
0
    def get_server_config(cls, config=None):
        default_server = get_safe(config, "container.datastore.default_server", "postgresql")

        server_cfg = get_safe(config, "server.%s" % default_server, None)
        if not server_cfg:
            # Support tests that mock out the CFG
            pg_cfg = get_safe(config, "server.postgresql", None)
            if pg_cfg:
                server_cfg = pg_cfg
            else:
                raise BadRequest("No datastore config available!")
                # server_cfg = dict(
                #     type='postgresql',
                #     host='localhost',
                #     port=5432,
                #     username='******',
                #     password=None,
                #     admin_username=None,
                #     admin_password=None,
                #     default_database='postgres',
                #     database='ion',
                #     connection_pool_max=5)
        else:
            # HACK for CEI system start compliance:
            # If couchdb password is set and current is empty, use couchdb password instead
            couch_cfg = get_safe(config, "server.couchdb", None)
            if couch_cfg and get_safe(couch_cfg, "password") and not get_safe(server_cfg, "password"):
                server_cfg["admin_username"] = couch_cfg["username"]
                server_cfg["admin_password"] = couch_cfg["password"]
                server_cfg["password"] = couch_cfg["password"]
                if get_safe(couch_cfg, "host") == "couchdb.dev.oceanobservatories.org":
                    server_cfg["host"] = "pg-dev02.oceanobservatories.org"
                log.warn("Substituted username/password using couchdb. New config: %s", server_cfg)

        return server_cfg
    def _extract_parameter_data(self):
        for span in self._cov.get_spans(self.spans):
            intersection = None
            span_np_dict = {}
            for param_name in span.param_dict.keys():
                span_np_dict[param_name] = span.param_dict[param_name].get_data()

                for param in self.view_criteria.criteria.values():
                    if param.param_name in span.param_dict and param.param_name == param_name:
                        indexes = np.argwhere( (span_np_dict[param_name]>=param.value[0]) &
                                               (span_np_dict[param_name]<=param.value[1]) )
                        if len(indexes.shape) > 1:
                            indexes = indexes.ravel()
                        if intersection is None:
                            intersection = indexes
                        else:
                            intersection = np.intersect1d(intersection, indexes)
            for param_name, np_array in span_np_dict.iteritems():
                if param_name in self.np_array_dict:
                    self.np_array_dict[param_name] = np.append(self.np_array_dict[param_name], np_array[intersection])
                else:
                    self.np_array_dict[param_name] = np_array[intersection]

        self.data_size = None
        for key, val in self.np_array_dict.iteritems():
            if self.data_size is None:
                self.data_size = len(val)
            elif len(val) != self.data_size:
                log.warn("Parameter arrays aren't consistent size results may be meaningless")
            if len(val) < self.data_size:
                self.data_size = len(val)
Exemple #13
0
 def define_viewset(self, design_name, design_doc, datastore_name=None, keepviews=False):
     """
     Create or update a design document (i.e. a set of views).
     If design exists, only updates if view definitions are different to prevent rebuild of indexes.
     """
     ds, datastore_name = self._get_datastore(datastore_name)
     doc_name = self._get_design_name(design_name)
     try:
         ds[doc_name] = dict(views=design_doc)
     #except ResourceConflict:
     except Exception:
         # View exists
         old_design = ds[doc_name]
         if not keepviews:
             try:
                 try:
                     del ds[doc_name]
                 except ResourceNotFound:
                     pass
                 ds[doc_name] = dict(views=design_doc)
             except Exception as ex:
                 # In case this gets executed concurrently and 2 processes perform the same creates
                 log.warn("Error defining datastore %s view %s (concurrent create?): %s", datastore_name, doc_name, str(ex))
         else:
             ddiff = DictDiffer(old_design.get("views", {}), design_doc)
             if ddiff.changed():
                 old_design["views"] = design_doc
                 ds.save(old_design)
Exemple #14
0
    def convert_to_attachments(self):

        assert(self.csv_reader is not None)
        assert(self.qa_zip_obj is not None)

        #create attachment resources for each document in the zip
        log.debug("creating attachment objects")
        attachments = []
        for row in self.csv_reader:
            att_name = row["filename"]
            att_desc = row["description"]
            att_content_type = row["content_type"]
            att_keywords = string.split(row["keywords"], ",")

            if not att_name in self.qa_zip_obj.namelist():
                return None, ("Manifest refers to a file called '%s' which is not in the zip" % att_name)

            attachments.append(IonObject(RT.Attachment,
                                         name=att_name,
                                         description=att_desc,
                                         content=self.qa_zip_obj.read(att_name),
                                         content_type=att_content_type,
                                         keywords=att_keywords,
                                         attachment_type=AttachmentType.BLOB))

        log.debug("Sanity checking manifest vs zip file")
        if len(self.qa_zip_obj.namelist()) - 1 > len(attachments):
            log.warn("There were %d files in the zip but only %d in the manifest",
                     len(self.qa_zip_obj.namelist()) - 1,
                     len(attachments))

        return attachments, ""
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace("%r: ignoring event type %r. Only handle %r directly",
                          self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info("%r/%s: (%s) status report triggered by diagnostic event:\n"
                     "%s\n"
                     "%40s : %s\n",
                     self._platform_id, state, self.resource_id, statuses,
                     "invalidated_children", invalidated_children)
    def stop_launched_simulator(cls):
        """
        Utility to stop the process launched with launch_simulator.
        The stop is attempted a couple of times in case of errors (with a few
        seconds of sleep in between).

        @return None if process seems to have been stopped properly.
                Otherwise the exception of the last attempt to stop it.
        """
        if cls._sim_process:
            sim_proc, cls._sim_process = cls._sim_process, None
            attempts = 3
            attempt = 0
            while attempt <= attempts:
                attempt += 1
                log.debug("[OMSim] stopping launched simulator (attempt=%d) ...", attempt)
                try:
                    sim_proc.stop()
                    log.debug("[OMSim] simulator process seems to have stopped properly")
                    return None

                except Exception as ex:
                    if attempt < attempts:
                        sleep(10)
                    else:
                        log.warn("[OMSim] error while stopping simulator process: %s", ex)
                        return ex
    def _device_terminated_event(self, origin, pid):
        """
        Handles the ProcessLifecycleEvent TERMINATED event received for the
        given origin:

        - notifies platform to invalidate the associated child
        - removes process lifecycle subscriber associated with the given origin
        - set UNKNOWN for the corresponding child_agg_status
        - update rollup_status and do publication in case of change

        @param origin    the origin (resource_id) associated with the PID used
                         for the subscriber to ProcessLifecycleEvents

        @param pid       the corresp PID
        """

        # notify platform:
        self._agent._child_terminated(origin)

        if origin not in self.aparam_child_agg_status:
            log.warn("%r: OOIION-1077 _device_terminated_event: unrecognized origin=%r",
                     self._platform_id, origin)
            return

        log.debug("%r: OOIION-1077 _device_terminated_event: origin=%r",
                  self._platform_id, origin)

        self._stop_subscriber_process_lifecycle_event(origin, pid)

        # set entries to UNKNOWN:
        self._initialize_child_agg_status(origin)

        # update rollup_status and publish in case of change:
        for status_name in AggregateStatusType._str_map.keys():
            self._update_rollup_status_and_publish(status_name, origin)
Exemple #18
0
    def flush(self):
        if self.mode == 'r':
            log.warn('SimplePersistenceLayer not open for writing: mode=%s', self.mode)
            return

        log.debug('Flushing MasterManager...')
        self.master_manager.flush()
Exemple #19
0
 def delete_doc_mult(self, object_ids, datastore_name=None, object_type=None):
     ds, datastore_name = self._get_datastore(datastore_name)
     for oid in object_ids:
         try:
             ds.delete(oid)
         except Exception:
             log.warn("Could not delete %s" % oid)
Exemple #20
0
    def insert_spans(self, uuid, spans, cur):
        log.debug("Inserting spans")
        try:
            for span in spans:
                cols, values = self.span_values(uuid, span)
                dic = dict(zip(cols, values))
                if len(cols) > 0:
                    span_addr = span.address.get_db_str()
                    statement = ''
                    if self._span_exists(span_addr):
                        statement = ''.join(['UPDATE ', self.span_table_name, ' SET '])
                        for k, v in dic.iteritems():
                            statement = ''.join([statement, k, '=', v, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, " WHERE span_address = '", span_addr, "'"])
                    else:
                        statement = """INSERT into """ + self.span_table_name + """ ("""
                        for col in cols:
                            statement = ''.join([statement, col, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, """) VALUES ("""])
                        for val in values:
                            statement = ''.join([statement, val, ', '])
                        statement = statement.rstrip(', ')
                        statement = ''.join([statement, """)"""])

                    log.trace("Inserting span into datastore: %s", statement)
                    with self.span_store.pool.cursor(**self.datastore.cursor_args) as cur:
                        cur.execute(statement)
        except Exception as ex:
            log.warn('Unable to insert spans %s %s', str(spans), ex.message)
 def stop_es(origin, es):
     log.debug("%r: destroying event subscriber: origin=%r; es=%r",
               self._platform_id, origin, es)
     try:
         self._agent._destroy_event_subscriber(es)
     except Exception as ex:
         log.warn("%r: error destroying event subscriber: origin=%r; es=%r: %s",
                  self._platform_id, origin, es, ex)
 def add_site_models(site_id, model_ids):
     if site_id in site_models:
         log.info("Site '%s' was already collected in deployment '%s'", site_id, deployment_id)
         if model_ids != site_models[site_id]:
             log.warn("Device '%s' being assigned a different model.  old=%s, new=%s",
                      site_id, site_models[site_id], model_ids)
     site_models[site_id] = model_ids
     self._model_lookup[site_id] = model_ids
Exemple #23
0
    def append_parameter(self, parameter_context):
        """
        Append a ParameterContext to the coverage

        @deprecated use a ParameterDictionary during construction of the coverage
        """
        log.warn('SimplexCoverage.append_parameter() is deprecated: use a ParameterDictionary during construction of the coverage')
        self._append_parameter(parameter_context)
 def add_device_model(device_id, model_id):
     if device_id in device_models:
         log.info("Device '%s' was already collected in deployment '%s'", device_id, deployment_id)
         if model_id != device_models[device_id]:
             log.warn("Device '%s' being assigned a different model.  old='%s', new='%s'",
                      device_id, device_models[device_id], model_id)
     device_models[device_id] = model_id
     self._model_lookup[device_id] = model_id
Exemple #25
0
    def __getitem__(self, slice_):
        if not isinstance(slice_, (list,tuple)):
            slice_ = [slice_]
        log.debug('getitem slice_: %s', slice_)

        arr_shp = self._get_array_shape_from_slice(slice_)

        ret_arr = np.empty(arr_shp, dtype=self.dtype)
        ret_arr.fill(self.fill_value)
        ret_origin = [0 for x in range(ret_arr.ndim)]
        log.trace('Shape of returned array: %s', ret_arr.shape)

        if arr_shp == 0:
            return ret_arr

        brick_origin_offset = 0

        bricks = self._bricks_from_slice(slice_)
        log.trace('Slice %s indicates bricks: %s', slice_, bricks)

        for idx, brick_guid in bricks:
            brick_file_path = '{0}/{1}.hdf5'.format(self.brick_path, brick_guid)

            # Figuring out which part of brick to set values - also appropriately increments the ret_origin
            log.trace('Return array origin: %s', ret_origin)
            try:
                brick_slice, value_slice, brick_origin_offset = self._calc_slices(slice_, brick_guid, ret_arr, ret_origin, brick_origin_offset)
                if brick_slice is None:
                    raise ValueError('Brick contains no values for specified slice')
            except ValueError as ve:
                log.warn(ve.message + '; moving to next brick')
                continue

            log.trace('Brick slice to extract: %s', brick_slice)
            log.trace('Value slice to fill: %s', value_slice)

            if not os.path.exists(brick_file_path):
                log.trace('Found virtual brick file: %s', brick_file_path)
            else:
                log.trace('Found real brick file: %s', brick_file_path)

                with h5py.File(brick_file_path) as brick_file:
                    v = brick_file[brick_guid].__getitem__(*brick_slice)

                # Check if object type
                if self.dtype == '|O8':
                    if not hasattr(v, '__iter__'):
                        v = [v]
                    v = [unpack(x) for x in v]

                ret_arr[value_slice] = v

        if ret_arr.size == 1:
            if ret_arr.ndim==0:
                ret_arr=ret_arr[()]
            else:
                ret_arr=ret_arr[0]
        return ret_arr
    def _generate_stream_config(self):
        log.debug("_generate_stream_config for %s", self.agent_instance_obj.name)
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name,
                                                    #'stream_def_id':stream_def_id,
                                                    'records_per_granule': stream_cfg.records_per_granule,
                                                    'granule_publish_rate':stream_cfg.granule_publish_rate,
                                                     }

        #retrieve the output products
        device_id = device_obj._id
        data_product_objs = self.RR2.find_data_products_of_instrument_device_using_has_output_product(device_id)

        stream_config = {}
        for d in data_product_objs:
            stream_def_id = self.RR2.find_stream_definition_id_of_data_product_using_has_stream_definition(d._id)
            for model_stream_name, stream_info_dict  in streams_dict.items():
                # read objects from cache to be compared
                pdict = self.RR2.find_resource_by_name(RT.ParameterDictionary, stream_info_dict.get('param_dict_name'))
                stream_def_id = self._meet_in_the_middle(d._id, pdict._id)

                if stream_def_id:
                    #model_param_dict = self.RR2.find_resources_by_name(RT.ParameterDictionary,
                    #                                         stream_info_dict.get('param_dict_name'))[0]
                    #model_param_dict = self._get_param_dict_by_name(stream_info_dict.get('param_dict_name'))
                    #stream_route = self.RR2.read(product_stream_id).stream_route
                    product_stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(d._id)
                    stream_def = psm.read_stream_definition(stream_def_id)
                    stream_route = psm.read_stream_route(stream_id=product_stream_id)
                    
                    from pyon.core.object import IonObjectSerializer
                    stream_def_dict = IonObjectSerializer().serialize(stream_def)
                    sdtype = stream_def_dict.pop('type_')

                    if model_stream_name in stream_config:
                        log.warn("Overwiting stream_config[%s]", model_stream_name)

                    stream_config[model_stream_name] = {'routing_key'           : stream_route.routing_key,
                                                        'stream_id'             : product_stream_id,
                                                        'stream_definition_ref' : stream_def_id,
                                                        'stream_def_dict'       : stream_def_dict,
                                                        'exchange_point'        : stream_route.exchange_point,
                                                        'parameter_dictionary'  : stream_def.parameter_dictionary,
                                                        'records_per_granule'   : stream_info_dict.get('records_per_granule'),
                                                        'granule_publish_rate'  : stream_info_dict.get('granule_publish_rate'),
                    }

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", str(stream_config) )
        return stream_config
    def _generate_stream_config(self):
        log.debug("_generate_stream_config for %s", self.agent_instance_obj.name)
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj  = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            streams_dict[stream_cfg.stream_name] = {'param_dict_name':stream_cfg.parameter_dictionary_name}

        #retrieve the output products
        # TODO: What about platforms? other things?
        device_id = device_obj._id
        data_product_objs = self.RR2.find_data_products_of_instrument_device_using_has_output_product(device_id)

        stream_config = {}
        for dp in data_product_objs:
            stream_def_id = self.RR2.find_stream_definition_id_of_data_product_using_has_stream_definition(dp._id)
            for stream_name, stream_info_dict in streams_dict.items():
                # read objects from cache to be compared
                pdict = self.RR2.find_resource_by_name(RT.ParameterDictionary, stream_info_dict.get('param_dict_name'))
                stream_def_id = self._find_streamdef_for_dp_and_pdict(dp._id, pdict._id)

                if stream_def_id:
                    #model_param_dict = self.RR2.find_resources_by_name(RT.ParameterDictionary,
                    #                                         stream_info_dict.get('param_dict_name'))[0]
                    #model_param_dict = self._get_param_dict_by_name(stream_info_dict.get('param_dict_name'))
                    #stream_route = self.RR2.read(product_stream_id).stream_route
                    product_stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(dp._id)
                    stream_def = psm.read_stream_definition(stream_def_id)
                    stream_route = psm.read_stream_route(stream_id=product_stream_id)

                    from pyon.core.object import IonObjectSerializer
                    stream_def_dict = IonObjectSerializer().serialize(stream_def)
                    stream_def_dict.pop('type_')

                    if stream_name in stream_config:
                        log.warn("Overwriting stream_config[%s]", stream_name)

                    stream_config[stream_name] = {  'routing_key'           : stream_route.routing_key,  # TODO: Serialize stream_route together
                                                    'stream_id'             : product_stream_id,
                                                    'stream_definition_ref' : stream_def_id,
                                                    'stream_def_dict'       : stream_def_dict,  # This is very large
                                                    'exchange_point'        : stream_route.exchange_point,
                                                    # This is redundant and very large - the param dict is in the stream_def_dict
                                                    #'parameter_dictionary'  : stream_def.parameter_dictionary,

                    }
        if len(stream_config) < len(streams_dict):
            log.warn("Found only %s matching streams by stream definition (%s) than %s defined in the agent (%s).",
                     len(stream_config), stream_config.keys(), len(streams_dict), streams_dict.keys())

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", stream_config)
        return stream_config
Exemple #28
0
    def get_dirty_values_async_result(self):
        if self.mode == 'r':
            log.warn('Coverage not open for writing: mode=%s', self.mode)
            from gevent.event import AsyncResult
            ret = AsyncResult()
            ret.set(True)
            return ret

        return self._persistence_layer.get_dirty_values_async_result()
    def pickle_save(cls, cov_obj, file_path, use_ascii=False):
        if not isinstance(cov_obj, AbstractCoverage):
            raise StandardError('cov_obj must be an instance or subclass of AbstractCoverage: object is {0}'.format(type(cov_obj)))

        with open(file_path, 'w') as f:
            pickle.dump(cov_obj, f, 0 if use_ascii else 2)

        log.info('Saved to pickle \'%s\'', file_path)
        log.warn('\'pickle_save\' and \'pickle_load\' are not 100% safe, use at your own risk!!')
Exemple #30
0
    def flush_values(self):
        if self.mode == 'r':
            log.warn('PersistenceLayer not open for writing: mode=%s', self.mode)
            return

        for k, v in self.value_list.iteritems():
            v.flush_values()

        return self.get_dirty_values_async_result()
Exemple #31
0
    def compute_status_list(self, child_agg_status, keys):
        ret = []
        if not isinstance(child_agg_status, dict):
            return ComputedListValue(
                reason="Top platform's child_agg_status is '%s'" %
                type(child_agg_status).__name__)

        for k in keys:
            # map None to UNKNOWN
            #if not type("") == type(k):
            #    raise BadRequest("attempted to compute_status_list with type(v) = %s : %s" % (type(k), k))
            if k in child_agg_status:
                ret.append(self._crush_status_dict(child_agg_status[k]))
            else:
                log.warn(
                    "Status for device '%s' not found in parent platform's child_agg_status",
                    k)
                ret.append(DeviceStatusType.STATUS_UNKNOWN)

        return ComputedListValue(status=ComputedValueAvailability.PROVIDED,
                                 value=ret)
Exemple #32
0
    def stop(self):
        """
        Stop the process.
        """
        if self._rsn_oms is not None:
            log.debug("[OMSim] x_exit_simulator -> %r", self._rsn_oms.x_exit_simulator())

        if self._process:
            try:
                log.debug("[OMSim] terminating process %s", self._process.pid)
                self._process.send_signal(signal.SIGINT)
                log.debug("[OMSim] waiting process %s", self._process.pid)
                self._process.wait()
                log.debug("[OMSim] process killed")

            except OSError:
                log.warn("[OMSim] Could not stop process, pid: %s" % self._process.pid)

            sleep(4)

        self._process = None
        self._rsn_oms = None
    def generate_config(self):
        """
        create the generic parts of the configuration including resource_id, egg_uri, and org
        """
        if self.generated_config:
            log.warn(
                "Generating config again for the same Instance object (%s)",
                self.agent_instance_obj.name)

        self._check_associations()

        agent_config = self._generate_skeleton_config_block()

        device_obj = self._get_device()
        agent_obj = self._get_agent()

        log.debug("complement agent_config with resource_id")
        if 'agent' not in agent_config:
            agent_config['agent'] = {'resource_id': device_obj._id}
        elif 'resource_id' not in agent_config.get('agent'):
            agent_config['agent']['resource_id'] = device_obj._id

        log.debug("add egg URI if available")
        if agent_obj.driver_uri:
            agent_config['driver_config']['process_type'] = (
                DriverProcessType.EGG, )
            agent_config['driver_config']['dvr_egg'] = agent_obj.driver_uri
        else:
            agent_config['driver_config']['process_type'] = (
                DriverProcessType.PYTHON_MODULE, )

        if log.isEnabledFor(logging.INFO):
            tree = self._summarize_children(agent_config)
            log.info("Children of %s are %s", self.agent_instance_obj.name,
                     tree)

        self.generated_config = True

        return agent_config
Exemple #34
0
    def _device_added_event(self, evt):
        """
        Handles the device_added event to do all related preparations and
        updates statuses.
        """

        # look at the event's origin itself to make sure is included:
        self._prepare_new_child(evt.origin)

        # the actual child added is in the values component of the event:
        if isinstance(evt.values, (list, tuple)):
            # normally it will be just one element
            for sub_resource_id in evt.values:
                self._prepare_new_child(sub_resource_id)
        else:
            log.warn(
                "%r: Got device_added event with invalid values member: %r",
                self._platform_id, evt)
            return

        # finally re-publish event so ancestors also get notified:
        # only adjustment is that now I'm the origin:
        evt = dict(event_type=evt.type_,
                   sub_type=evt.sub_type,
                   origin_type=evt.origin_type,
                   origin=self.resource_id,
                   description=evt.description,
                   values=evt.values)
        try:
            log.debug('%r: _device_added_event: re-publishing: %s',
                      self._platform_id, evt)

            self._event_publisher.publish_event(**evt)

        except Exception:
            log.exception('%r: platform agent could not publish event: %s',
                          self._platform_id, evt)
Exemple #35
0
    def _notify_listener(self, url, event_instance):
        """
        Notifies event to given listener.
        """
        if url == "http://NO_OMS_NOTIFICATIONS":  # pragma: no cover
            # developer convenience -see ion.agents.platform.rsn.oms_event_listener
            return

        log.debug("Notifying event_instance=%s to listener=%s",
                  str(event_instance), url)

        # include url in event instance for diagnostic/debugging purposes:
        event_instance['listener_url'] = url

        # prepare payload (JSON format):
        payload = json.dumps(event_instance, indent=2)
        log.trace("payload=\n%s", payload)
        headers = {"Content-type": "application/json", "Accept": "text/plain"}

        conn = None
        try:
            o = urlparse(url)
            url4conn = o.netloc
            path = o.path

            conn = httplib.HTTPConnection(url4conn)
            conn.request("POST", path, body=payload, headers=headers)
            response = conn.getresponse()
            data = response.read()
            log.trace("RESPONSE: %s, %s, %s", response.status, response.reason,
                      data)
        except Exception as e:
            # the actual listener is no longer there; just log a message
            log.warn("event notification HTTP request failed: %r: %s", url, e)
        finally:
            if conn:
                conn.close()
Exemple #36
0
    def _device_terminated_event(self, origin, pid):
        """
        Handles the ProcessLifecycleEvent TERMINATED event received for the
        given origin:

        - notifies platform to invalidate the associated child
        - removes process lifecycle subscriber associated with the given origin
        - set UNKNOWN for the corresponding child_agg_status
        - update rollup_status and do publication in case of change

        @param origin    the origin (resource_id) associated with the PID used
                         for the subscriber to ProcessLifecycleEvents

        @param pid       the corresp PID
        """

        # notify platform:
        self._agent._child_terminated(origin)

        if origin not in self.aparam_child_agg_status:
            log.warn(
                "%r: OOIION-1077 _device_terminated_event: unrecognized origin=%r",
                self._platform_id, origin)
            return

        log.debug("%r: OOIION-1077 _device_terminated_event: origin=%r",
                  self._platform_id, origin)

        self._stop_subscriber_process_lifecycle_event(origin, pid)

        # set entries to UNKNOWN:
        self._initialize_child_agg_status(origin)

        # update rollup_status and publish in case of change:
        for status_name in AggregateStatusType._str_map.keys():
            self._update_rollup_status_and_publish(status_name, origin)
Exemple #37
0
    def get_cumulative_status_dict(self,
                                   device_id,
                                   child_device_ids=None,
                                   status_dict=None):

        h_agent, reason = self.get_device_agent(device_id)
        log.trace("Got h_agent = %s, reason = %s", h_agent, reason)
        if None is h_agent:
            log.warn('no agent for device %s, reason=%s', device_id, reason)
            return None, reason

        if status_dict and device_id in status_dict:
            this_status = status_dict.get(device_id, {})
        else:

            # read child agg status
            try:
                #retrieve the platform status from the platform agent
                this_status = h_agent.get_agent(['aggstatus'])['aggstatus']
                log.debug("this_status for %s is %s", device_id, this_status)

            except Unauthorized:
                log.warn(
                    "The requester does not have the proper role to access the status of this agent"
                )
                return None, "InstrumentDevice(get_agent) has been denied"

        out_status = {device_id: this_status}

        if DriverTypingMethod.ByAgent == self.dtm:
            # we're done if the agent doesn't support child_agg_status
            if not "child_agg_status" in [
                    c.name for c in h_agent.get_capabilities()
            ]:
                return out_status, None
        elif DriverTypingMethod.ByRR == self.dtm:
            device_obj = self.RR2.read(device_id)
            if RT.PlatformDevice != device_obj._get_type():
                return out_status, None

        try:
            child_agg_status = h_agent.get_agent(['child_agg_status'
                                                  ])['child_agg_status']
            log.debug('get_cumulative_status_dict child_agg_status : %s',
                      child_agg_status)
            if child_agg_status:
                out_status.update(child_agg_status)
            return out_status, None
        except Unauthorized:
            log.warn(
                "The requester does not have the proper role to access the child_agg_status of this agent"
            )
            return out_status, "Error getting child status: 'child_agg_status' has been denied"
Exemple #38
0
    def _build_parsed_values(self):
        """
        Take the velocity data sample and parse it into values with appropriate tags.
        @throws SampleException If there is a problem with sample creation

        typedef struct {
            unsigned char cSync; // sync = 0xa5
            unsigned char cId; // identification (0x01=normal, 0x80=diag)
            unsigned short hSize; // size of structure (words)
            PdClock clock; // date and time
            short hError; // error code:
            unsigned short hAnaIn1; // analog input 1
            unsigned short hBattery; // battery voltage (0.1 V)
            union {
                unsigned short hSoundSpeed; // speed of sound (0.1 m/s)
                unsigned short hAnaIn2; // analog input 2
            } u;
            short hHeading; // compass heading (0.1 deg)
            short hPitch; // compass pitch (0.1 deg)
            short hRoll; // compass roll (0.1 deg)
            unsigned char cPressureMSB; // pressure MSB
            char cStatus; // status:
            unsigned short hPressureLSW; // pressure LSW
            short hTemperature; // temperature (0.01 deg C)
            short hVel[3]; // velocity
            unsigned char cAmp[3]; // amplitude
            char cFill;
            short hChecksum; // checksum
        } PdMeas;
        """
        try:
            unpack_format = (
                ('sync', '<4s'),  # cSync, cId, hSize
                ('timestamp', '6s'),  # PdClock
                (
                    'error', 'H'
                ),  # defined as signed short, but represents bitmap, using unsigned
                ('analog1', 'H'),
                ('battery_voltage', 'H'),
                ('sound_speed', 'H'),
                ('heading', 'h'),
                ('pitch', 'h'),
                ('roll', 'h'),
                ('pressure_msb', 'B'),
                ('status', 'B'
                 ),  # defined as char, but represents bitmap, using unsigned
                ('pressure_lsw', 'H'),
                ('temperature', 'h'),
                ('velocity_beam1', 'h'),
                ('velocity_beam2', 'h'),
                ('velocity_beam3', 'h'),
                ('amplitude_beam1', 'B'),
                ('amplitude_beam2', 'B'),
                ('amplitude_beam3', 'B'),
            )

            data = unpack_from_format(self._data_particle_type, unpack_format,
                                      self.raw_data)

            if not validate_checksum('<20H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)",
                         self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(data.timestamp)
            self.set_internal_timestamp(
                (timestamp - datetime(1900, 1, 1)).total_seconds())

            pressure = data.pressure_msb * 0x10000 + data.pressure_lsw

        except Exception as e:
            log.error(
                'Error creating particle velpt_velocity_data, raw data: %r',
                self.raw_data)
            raise SampleException(e)

        key = AquadoppVelocityDataParticleKey

        result = [{
            VID: key.TIMESTAMP,
            VAL: str(timestamp)
        }, {
            VID: key.ERROR,
            VAL: data.error
        }, {
            VID: key.ANALOG1,
            VAL: data.analog1
        }, {
            VID: key.BATTERY_VOLTAGE,
            VAL: data.battery_voltage
        }, {
            VID: key.SOUND_SPEED_ANALOG2,
            VAL: data.sound_speed
        }, {
            VID: key.HEADING,
            VAL: data.heading
        }, {
            VID: key.PITCH,
            VAL: data.pitch
        }, {
            VID: key.ROLL,
            VAL: data.roll
        }, {
            VID: key.STATUS,
            VAL: data.status
        }, {
            VID: key.PRESSURE,
            VAL: pressure
        }, {
            VID: key.TEMPERATURE,
            VAL: data.temperature
        }, {
            VID: key.VELOCITY_BEAM1,
            VAL: data.velocity_beam1
        }, {
            VID: key.VELOCITY_BEAM2,
            VAL: data.velocity_beam2
        }, {
            VID: key.VELOCITY_BEAM3,
            VAL: data.velocity_beam3
        }, {
            VID: key.AMPLITUDE_BEAM1,
            VAL: data.amplitude_beam1
        }, {
            VID: key.AMPLITUDE_BEAM2,
            VAL: data.amplitude_beam2
        }, {
            VID: key.AMPLITUDE_BEAM3,
            VAL: data.amplitude_beam3
        }]

        return result
Exemple #39
0
 def __init__(self, *args, **kwargs):
     super(TwoDelegateDatasetAgent, self).__init__(*args, **kwargs)
     self._fsm.add_handler(ResourceAgentState.STREAMING,
                           ResourceAgentEvent.EXECUTE_RESOURCE,
                           self._handler_streaming_execute_resource)
     log.warn("DRIVER: __init__")
Exemple #40
0
 def _stop_driver(self, force=True):
     log.warn("DRIVER: _stop_driver")
     self._dvr_client = None
Exemple #41
0
    def _run(self, guid):
        while not self._do_stop:
            try:
                log.debug('%s making work request', guid)
                self.req_sock.send(pack((REQUEST_WORK, guid)))
                msg = None
                while msg is None:
                    try:
                        msg = self.req_sock.recv(zmq.NOBLOCK)
                    except zmq.ZMQError, e:
                        if e.errno == zmq.EAGAIN:
                            if self._do_stop:
                                break
                            else:
                                time.sleep(0.1)
                        else:
                            raise

                if msg is not None:
                    brick_key, brick_metrics, work = unpack(msg)
                    work = list(work)  # lists decode as a tuples
                    try:
                        log.debug('*%s*%s* got work for %s, metrics %s: %s',
                                  time.time(), guid, brick_key, brick_metrics,
                                  work)
                        brick_path, bD, cD, data_type, fill_value = brick_metrics
                        if data_type == '|O8':
                            data_type = h5py.special_dtype(vlen=str)
                        # TODO: Uncomment this to properly turn 0 & 1 chunking into True


#                        if 0 in cD or 1 in cD:
#                            cD = True
                        with h5py.File(brick_path, 'a') as f:
                            # TODO: Due to usage concerns, currently locking chunking to "auto"
                            f.require_dataset(brick_key,
                                              shape=bD,
                                              dtype=data_type,
                                              chunks=None,
                                              fillvalue=fill_value)
                            for w in list(
                                    work
                            ):  # Iterate a copy - WARN, this is NOT deep, if the list contains objects, they're NOT copied
                                brick_slice, value = w
                                if isinstance(brick_slice, tuple):
                                    brick_slice = list(brick_slice)

                                log.debug('slice_=%s, value=%s', brick_slice,
                                          value)
                                f[brick_key].__setitem__(*brick_slice,
                                                         val=value)
                                # Remove the work AFTER it's completed (i.e. written)
                                work.remove(w)
                        log.debug('*%s*%s* done working on %s', time.time(),
                                  guid, brick_key)
                        self.resp_sock.send(
                            pack((SUCCESS, guid, brick_key, None)))
                    except Exception as ex:
                        log.error('Exception: %s', ex.message)
                        log.warn('%s send failure response with work %s', guid,
                                 work)
                        # TODO: Send the remaining work back
                        self.resp_sock.send(
                            pack((FAILURE, guid, brick_key, work)))
            except Exception as ex:
                log.error('Exception: %s', ex.message)
                log.error('%s send failure response with work %s', guid, None)
                # TODO: Send a response - I don't know what I was working on...
                self.resp_sock.send(pack((FAILURE, guid, None, None)))
Exemple #42
0
def describe_deployments(deployments, context, instruments=None, instrument_status=None):
    """

    @param deployments  list of Deployment resource objects
    @param context  object to get the resource_registry from (e.g. container)
    @param instruments  list of InstrumentDevice resource objects
    @param instrument_status  coindexed list of status for InstrumentDevice to be added to respective Deployment
    @retval list with Deployment info dicts coindexed with argument deployments list
    """
    instruments = instruments or []
    instrument_status = instrument_status or []
    if not deployments:
        return []
    rr = context.resource_registry
    deployment_ids = [d._id for d in deployments]
    descriptions = {}
    for d in deployments:
        descriptions[d._id] = {'is_primary': False}
        # add start, end time
        time_constraint = None
        for constraint in d.constraint_list:
            if constraint.type_ == OT.TemporalBounds:
                if time_constraint:
                    log.warn('deployment %s has more than one time constraint (using first)', d.name)
                else:
                    time_constraint = constraint
        if time_constraint:
            descriptions[d._id]['start_time'] = time.strftime(TIME_FORMAT, time.gmtime(
                float(time_constraint.start_datetime))) if time_constraint.start_datetime else ""
            descriptions[d._id]['end_time'] = time.strftime(TIME_FORMAT, time.gmtime(
                float(time_constraint.end_datetime))) if time_constraint.end_datetime else ""
        else:
            descriptions[d._id]['start_time'] = descriptions[d._id]['end_time'] = ""

    # first get the all site and instrument objects
    site_ids = []
    objects, associations = rr.find_subjects_mult(objects=deployment_ids, id_only=False)
    if log.isEnabledFor(TRACE):
        log.trace('have %d deployment-associated objects, %d are hasDeployment', len(associations),
                  sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects, associations):
        # if this is a hasDeployment association...
        if assoc.p == PRED.hasDeployment:
            description = descriptions[assoc.o]

            # always save the id in one known field (used by UI)
            description['resource_id'] = assoc.o

            # save site or device info in the description
            type = obj.type_
            if type in (RT.InstrumentSite, RT.PlatformSite):
                description['site_id'] = obj._id
                description['site_name'] = obj.name
                description['site_type'] = type
                if obj._id not in site_ids:
                    site_ids.append(obj._id)
            elif type in (RT.InstrumentDevice, RT.PlatformDevice):
                description['device_id'] = obj._id
                description['device_name'] = obj.name
                description['device_type'] = type
                for instrument, status in zip(instruments, instrument_status):
                    if obj._id == instrument._id:
                        description['device_status'] = status
            else:
                log.warn('unexpected association: %s %s %s %s %s', assoc.st, assoc.s, assoc.p, assoc.ot, assoc.o)

    # Make the code below more robust by ensuring that all description entries are present, even
    # if Deployment is missing some associations (OOIION-1183)
    for d in descriptions.values():
        if "site_id" not in d:
            d['site_id'] = d['site_name'] = d['site_type'] = None
        if "device_id" not in d:
            d['device_id'] = d['device_name'] = d['device_type'] = None

    # now look for hasDevice associations to determine which deployments are "primary" or "active"
    objects2, associations = rr.find_objects_mult(subjects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects2, associations):
        if assoc.p == PRED.hasDevice:
            found_match = False
            for description in descriptions.itervalues():
                if description.get('site_id', None) == assoc.s and description.get('device_id', None) == assoc.o:
                    if found_match:
                        log.warn('more than one primary deployment for site %s (%s) and device %s (%s)',
                                 assoc.s, description['site_name'], assoc.o, description['device_name'])
                    description['is_primary'] = found_match = True

    # finally get parents of sites using hasSite
    objects3, associations = rr.find_subjects_mult(objects=site_ids)
    if log.isEnabledFor(TRACE):
        log.trace('have %d site-associated objects, %d are hasDeployment', len(associations), sum([1 if assoc.p==PRED.hasDeployment else 0 for assoc in associations]))
    for obj, assoc in zip(objects3, associations):
        if assoc.p == PRED.hasSite:
            found_match = False
            for description in descriptions.itervalues():
                if description.get('site_id', None) == assoc.o:
                    if found_match:
                        log.warn('more than one parent for site %s (%s)', assoc.o, description['site_name'])
                    description['parent_site_id'] = obj._id
                    description['parent_site_name'] = obj.name
                    description['parent_site_description'] = obj.description

    # convert to array
    descriptions_list = [descriptions[d._id] for d in deployments]

    if log.isEnabledFor(DEBUG):
        log.debug('%d deployments, %d associated sites/devices, %d activations, %d missing status',
                  len(deployments), len(objects), len(objects2),
                  sum([0 if 'device_status' in d else 1 for d in descriptions_list]))

    return descriptions_list
Exemple #43
0
    def _build_parsed_values(self):
        """
        Take the velocity data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorVelocityDataParticle: raw data =%r', self.raw_data)

        try:

            unpack_string = '<2s4B2H3h6BH'

            (sync_id, analog_input2_lsb, count, pressure_msb,
             analog_input2_msb, pressure_lsw, analog_input1, velocity_beam1,
             velocity_beam2, velocity_beam3, amplitude_beam1, amplitude_beam2,
             amplitude_beam3, correlation_beam1, correlation_beam2,
             correlation_beam3,
             checksum) = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<11H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)",
                         self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            analog_input2 = analog_input2_msb * 0x100 + analog_input2_lsb
            pressure = pressure_msb * 0x10000 + pressure_lsw

        except Exception as e:
            log.error(
                'Error creating particle vel3d_cd_velocity_data, raw data: %r',
                self.raw_data)
            raise SampleException(e)

        result = [{
            VID: VectorVelocityDataParticleKey.ANALOG_INPUT2,
            VAL: analog_input2
        }, {
            VID: VectorVelocityDataParticleKey.COUNT,
            VAL: count
        }, {
            VID: VectorVelocityDataParticleKey.PRESSURE,
            VAL: pressure
        }, {
            VID: VectorVelocityDataParticleKey.ANALOG_INPUT1,
            VAL: analog_input1
        }, {
            VID: VectorVelocityDataParticleKey.VELOCITY_BEAM1,
            VAL: velocity_beam1
        }, {
            VID: VectorVelocityDataParticleKey.VELOCITY_BEAM2,
            VAL: velocity_beam2
        }, {
            VID: VectorVelocityDataParticleKey.VELOCITY_BEAM3,
            VAL: velocity_beam3
        }, {
            VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM1,
            VAL: amplitude_beam1
        }, {
            VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM2,
            VAL: amplitude_beam2
        }, {
            VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM3,
            VAL: amplitude_beam3
        }, {
            VID: VectorVelocityDataParticleKey.CORRELATION_BEAM1,
            VAL: correlation_beam1
        }, {
            VID: VectorVelocityDataParticleKey.CORRELATION_BEAM2,
            VAL: correlation_beam2
        }, {
            VID: VectorVelocityDataParticleKey.CORRELATION_BEAM3,
            VAL: correlation_beam3
        }]

        log.debug('VectorVelocityDataParticle: particle=%s', result)
        return result
    def _collect_agent_instance_associations(self):
        """
        Collect related resources to this agent instance

        Returns a dict of objects necessary to start this instance, keyed on the values of self._lookup_means()
            PRED.hasAgentInstance   -> device_obj
            PRED.hasModel           -> model_obj
            PRED.hasAgentDefinition -> agent_obj
            RT.ProcessDefinition    -> process_def_obj

        """
        assert self.agent_instance_obj

        lookup_means = self._lookup_means()

        assert lookup_means
        assert PRED.hasAgentInstance in lookup_means
        assert PRED.hasModel in lookup_means
        assert PRED.hasAgentDefinition in lookup_means
        #assert PRED.hasProcessDefinition in lookup_means

        lu = lookup_means

        ret = {}

        log.debug("retrieve the associated device")
        device_obj = self.RR2.find_subject(
            subject_type=lu[PRED.hasAgentInstance],
            predicate=PRED.hasAgentInstance,
            object=self.agent_instance_obj._id)

        ret[lu[PRED.hasAgentInstance]] = device_obj
        device_id = device_obj._id

        log.debug("%s '%s' connected to %s '%s' (L4-CI-SA-RQ-363)",
                  lu[PRED.hasAgentInstance], str(device_id),
                  type(self.agent_instance_obj).__name__,
                  str(self.agent_instance_obj._id))

        #        log.debug("retrieve the model associated with the device")
        #        model_obj = self.RR2.find_object(subject=device_id,
        #                                         predicate=PRED.hasModel,
        #                                         object_type=lu[PRED.hasModel])
        #
        #        ret[lu[PRED.hasModel]] = model_obj
        #        model_id = model_obj

        #retrive the stream info for this model
        #todo: add stream info to the platform model create
        #        streams_dict = platform_models_objs[0].custom_attributes['streams']
        #        if not streams_dict:
        #            raise BadRequest("Device model does not contain stream configuation used in launching the agent. Model: '%s", str(platform_models_objs[0]) )
        #TODO: get the agent from the instance not from the model!!!!!!!
        log.debug("retrieve the agent associated with the model")
        agent_obj = self.RR2.find_object(
            subject=self.agent_instance_obj._id,
            predicate=PRED.hasAgentDefinition,
            object_type=lu[PRED.hasAgentDefinition])

        ret[lu[PRED.hasAgentDefinition]] = agent_obj
        agent_id = agent_obj._id

        if not agent_obj.stream_configurations:
            raise BadRequest(
                "Agent '%s' does not contain stream configuration used in launching"
                % str(agent_obj))

        log.debug("retrieve the process definition associated with this agent")
        process_def_obj = self.RR2.find_object(
            subject=agent_id,
            predicate=PRED.hasProcessDefinition,
            object_type=RT.ProcessDefinition)

        ret[RT.ProcessDefinition] = process_def_obj

        #retrieve the output products
        data_product_objs = self.RR2.find_objects(device_id,
                                                  PRED.hasOutputProduct,
                                                  RT.DataProduct,
                                                  id_only=False)

        if not data_product_objs:
            raise NotFound("No output Data Products attached to this Device " +
                           str(device_id))

        #retrieve the streams assoc with each defined output product
        for data_product_obj in data_product_objs:
            product_id = data_product_obj._id
            try:
                self.RR2.find_stream_id_of_data_product_using_has_stream(
                    product_id)  # check one stream per product
            except NotFound:
                errmsg = "Device '%s' (%s) has data products %s.  Data product '%s' (%s) has no stream ID." % \
                    (device_obj.name,
                     device_obj._id,
                     [dp._id for dp in data_product_objs],
                     data_product_obj.name,
                     product_id)
                raise NotFound(errmsg)

            # some products may not be persisted
            try:
                # check one dataset per product
                self.RR2.find_dataset_id_of_data_product_using_has_dataset(
                    product_id)
            except NotFound:
                log.warn(
                    "Data product '%s' of device %s ('%s') does not appear to be persisted -- no dataset",
                    product_id, device_obj.name, device_obj._id)

        self.associated_objects = ret
    def _generate_stream_config(self):
        log.debug("_generate_stream_config for %s",
                  self.agent_instance_obj.name)
        dsm = self.clients.dataset_management
        psm = self.clients.pubsub_management

        agent_obj = self._get_agent()
        device_obj = self._get_device()

        streams_dict = {}
        for stream_cfg in agent_obj.stream_configurations:
            #create a stream def for each param dict to match against the existing data products
            streams_dict[stream_cfg.stream_name] = {
                'param_dict_name': stream_cfg.parameter_dictionary_name
            }

        #retrieve the output products
        # TODO: What about platforms? other things?
        device_id = device_obj._id
        data_product_objs = self.RR2.find_data_products_of_instrument_device_using_has_output_product(
            device_id)

        stream_config = {}
        for d in data_product_objs:
            stream_def_id = self.RR2.find_stream_definition_id_of_data_product_using_has_stream_definition(
                d._id)
            for stream_name, stream_info_dict in streams_dict.items():
                # read objects from cache to be compared
                pdict = self.RR2.find_resource_by_name(
                    RT.ParameterDictionary,
                    stream_info_dict.get('param_dict_name'))
                stream_def_id = self._find_streamdef_for_dp_and_pdict(
                    d._id, pdict._id)

                if stream_def_id:
                    #model_param_dict = self.RR2.find_resources_by_name(RT.ParameterDictionary,
                    #                                         stream_info_dict.get('param_dict_name'))[0]
                    #model_param_dict = self._get_param_dict_by_name(stream_info_dict.get('param_dict_name'))
                    #stream_route = self.RR2.read(product_stream_id).stream_route
                    product_stream_id = self.RR2.find_stream_id_of_data_product_using_has_stream(
                        d._id)
                    stream_def = psm.read_stream_definition(stream_def_id)
                    stream_route = psm.read_stream_route(
                        stream_id=product_stream_id)

                    from pyon.core.object import IonObjectSerializer
                    stream_def_dict = IonObjectSerializer().serialize(
                        stream_def)
                    stream_def_dict.pop('type_')

                    if stream_name in stream_config:
                        log.warn("Overwriting stream_config[%s]", stream_name)

                    stream_config[stream_name] = {
                        'routing_key': stream_route.
                        routing_key,  # TODO: Serialize stream_route together
                        'stream_id': product_stream_id,
                        'stream_definition_ref': stream_def_id,
                        'stream_def_dict': stream_def_dict,
                        'exchange_point': stream_route.exchange_point,
                        # TODO: This is redundant and very large - the param dict is in the stream_def_dict ???
                        'parameter_dictionary':
                        stream_def.parameter_dictionary,
                    }

        log.debug("Stream config generated")
        log.trace("generate_stream_config: %s", stream_config)
        return stream_config
Exemple #46
0
    def _got_device_aggregate_status_event(self, evt, *args, **kwargs):
        """
        Reacts to a DeviceAggregateStatusEvent from a platform's child.

        - notifies platform that child is running in case of any needed revalidation
        - updates the local image of the child status for the corresponding status name
        - updates the rollup status for that status name
        - if this rollup status changes, then a subsequent DeviceAggregateStatusEvent
          is published.

        The consolidation operation is taken from observatory_util.py.

        @param evt    DeviceAggregateStatusEvent from child.
        """

        with self._lock:
            if not self._active:
                log.warn(
                    "%r: _got_device_aggregate_status_event called but "
                    "manager has been destroyed", self._platform_id)
                return

        log.debug("%r: _got_device_aggregate_status_event: %s",
                  self._platform_id, evt)

        if evt.type_ != "DeviceAggregateStatusEvent":
            # should not happen.
            msg = "%r: Got event for different event_type=%r but subscribed to %r" % (
                self._platform_id, evt.type_, "DeviceAggregateStatusEvent")
            log.error(msg)
            raise PlatformException(msg)

        if evt.origin not in self.aparam_child_agg_status:
            # should not happen.
            msg = "%r: got event from unrecognized origin=%s" % (
                self._platform_id, evt.origin)
            log.error(msg)
            raise PlatformException(msg)

        status_name = evt.status_name
        child_origin = evt.origin
        child_status = evt.status

        # tell platform this child is running in case of any needed revalidation:
        self._agent._child_running(child_origin)

        with self._lock:
            old_status = self.aparam_child_agg_status[child_origin][
                status_name]
            if child_status == old_status:
                #
                # My image of the child status is not changing, so nothing to do:
                #
                return

            # update the specific status
            self.aparam_child_agg_status[child_origin][
                status_name] = child_status

            # TODO any need to pass child's alerts_list in the next call? See OOIION-1275
            new_rollup_status = self._update_rollup_status_and_publish(
                status_name, child_origin)

        if new_rollup_status and log.isEnabledFor(
                logging.TRACE):  # pragma: no cover
            self._log_agg_status_update(log.trace, evt, new_rollup_status)
Exemple #47
0
    def describe_deployments(self, deployments, status_map=None):
        """
        For a list of deployment IDs, generate a list of dicts with information about the deployments
        suitable for the UI table: [ { 'ui_column': 'string_value'... } , ...]
        @param deployments  list of Deployment resource objects
        @param status_map  map of device id to device status dict
        @retval list with Deployment info dicts coindexed with argument deployments list
        """
        dep_info_list = []
        dep_site_map, dep_dev_map = self.get_deployments_relations(
            deployments, return_objects=True)
        site_structure = status_map.get("_system", {}).get(
            "devices", None) if status_map else None

        dep_by_id = {}
        for dep in deployments:
            dep_info = {}
            dep_info_list.append(dep_info)
            dep_by_id[dep._id] = dep_info

            # Set temporal bounds
            temp_const = self.get_temporal_constraint(dep)
            if temp_const:
                dep_info['start_time'] = time.strftime(
                    TIME_FORMAT, time.gmtime(float(temp_const.start_datetime))
                ) if temp_const.start_datetime else ""
                dep_info['end_time'] = time.strftime(
                    TIME_FORMAT, time.gmtime(float(temp_const.end_datetime))
                ) if temp_const.end_datetime else ""
            else:
                dep_info['start_time'] = dep_info['end_time'] = ""

            # Set device information
            device_obj = dep_dev_map.get(dep._id, None)
            if device_obj:
                dep_info['device_id'] = device_obj._id
                dep_info['device_name'] = device_obj.name
                dep_info['device_type'] = device_obj.type_
                dep_info['device_status'] = status_map.get(
                    device_obj._id, {}).get("agg",
                                            DeviceStatusType.STATUS_UNKNOWN)
            else:
                log.warn("Deployment %s has no Device", dep._id)
                dep_info['device_id'] = dep_info['device_name'] = dep_info[
                    'device_type'] = dep_info['device_status'] = None

            # Set site information
            site_obj = dep_site_map.get(dep._id, None)
            if site_obj:
                dep_info['site_id'] = site_obj._id
                dep_info['site_name'] = site_obj.name
                dep_info['site_type'] = site_obj.type_
            else:
                log.warn("Deployment %s has no Site", dep._id)
                dep_info['site_id'] = dep_info['site_name'] = dep_info[
                    'site_type'] = None

            # Set status information
            if status_map and dep.lcstate == LCS.DEPLOYED:
                dep_info["is_primary"] = DeviceStatusType.STATUS_OK
                if site_structure and site_obj and device_obj and site_obj._id in site_structure:
                    try:
                        # Additionally check deployment date
                        now = time.time()
                        if temp_const and (
                                now < float(temp_const.start_datetime)
                                or now > float(temp_const.end_datetime)):
                            dep_info[
                                "is_primary"] = DeviceStatusType.STATUS_WARNING

                        # Additionally check assoc between site and device
                        site_deps = site_structure[site_obj._id]
                        if not any(True for st, did, dt in site_deps
                                   if did == device_obj._id and dt in
                                   (RT.PlatformDevice, RT.InstrumentDevice)):
                            dep_info[
                                "is_primary"] = DeviceStatusType.STATUS_WARNING
                    except Exception:
                        log.exception("Error determining site structure")
            else:
                dep_info["is_primary"] = DeviceStatusType.STATUS_UNKNOWN

            # Set site parent - seems unused currently, not gonna bother
            parent_site_obj = None
            if parent_site_obj:
                dep_info['parent_site_id'] = parent_site_obj._id
                dep_info['parent_site_name'] = parent_site_obj.name
                dep_info[
                    'parent_site_description'] = parent_site_obj.description
            else:
                #log.warn("Deployment %s has no parent Site", dep._id)
                dep_info['parent_site_id'] = dep_info[
                    'parent_site_name'] = dep_info[
                        'parent_site_description'] = None

        return dep_info_list
Exemple #48
0
    def _read_chunk(self, f):
        #log.info("Start parsing chunk @%s", f.tell())
        chunk = {}
        skipped = ""
        b = f.read(1)
        while b != CHUNK_START and b != END_OF_FILE:
            skipped += b
            b = f.read(1)
        if b == END_OF_FILE:
            return None
        if skipped:
            log.warn("Skipped %s bytes: %s", len(skipped), skipped)
        dev_type = f.read(2)
        #log.info("Found dev_type: %s", dev_type)
        if dev_type not in DEVICE_TYPES:
            raise ParserException("Unknown device type: %s" % dev_type)
        chunk["dev_type"] = dev_type

        header = f.read(30)

        cid, inum, spc1, clen, pflag, ts, spc2, bnum, spc3, crc, stx = struct.unpack("5s2s1s4s1s8s1s2s1s4s1s", header)
        if not (spc1 == spc2 == spc3 == "_"):
            raise ParserException("Could not parse spacers")
        if not stx == CONTENT_STX:
            raise ParserException("Could not parse content STX")

        chunk.update(dict(
            controller_id=int(cid),
            inst_num=int(inum),
            content_length=int(clen, 16),
            processing_flag=pflag,
            timestamp=int(ts, 16),
            block_num=int(bnum, 16),
            content_crc=int(crc, 16),
            end=1
        ))
        content = f.read(chunk["content_length"])
        if len(content) != chunk["content_length"]:
            raise ParserException("Content too short: %s instead of %s" % (len(content), chunk["content_length"]))
        if CONTENT_ETX in content:
            log.warn("Content contains ETX marker")
        # if f.read(1) != CONTENT_ETX:
        #     raise ParserException("Content ETX expected")

        chunk["content"] = content

        b = f.read(1)
        if b == CONTENT_ETX and crc16_iso14443b(content):
            # Yay, content is good - can follow on
            chunk["parse_status"] = "OK"
        elif b == CONTENT_ETX:
            chunk["parse_status"] = "bad CRC"
            log.warn("Content length ok but bad CRC")
        else:
            extra = ""
            while b != CONTENT_ETX and b != CHUNK_START and b != END_OF_FILE:
                extra += b
                b = f.read(1)
            if b == END_OF_FILE:
                raise ParserException("Unexpected EOF")
            if b == CHUNK_START:
                f.seek(-1, 1)

            content += extra

            chunk["content_length"] = len(content)
            chunk["content"] = content
            chunk["parse_status"] = "content overrun"

            if len(extra) > 0:
                log.warn("Found %s extra content bytes to ETX instead of %s", len(extra), chunk["content_length"])

        #import pprint
        #pprint.pprint(chunk)

        log.info("Chunk %(dev_type)s %(controller_id)s.%(inst_num)s f=%(processing_flag)s len=%(content_length)s ts=%(timestamp)s bn=%(block_num)s stat=%(parse_status)s" % chunk)

        return chunk
Exemple #49
0
    def _build_parsed_values(self):
        """
        Take the system data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorSystemDataParticle: raw data =%r', self.raw_data)

        try:

            unpack_string = '<4s6s2H4h2bHH'

            (sync, timestamp, battery, sound_speed, heading, pitch, roll,
             temperature, error, status, analog_input,
             cksum) = struct.unpack_from(unpack_string, self.raw_data)

            if not validate_checksum('<13H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)",
                         self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(timestamp)
            self.set_internal_timestamp(
                (timestamp - datetime(1900, 1, 1)).total_seconds())

        except Exception as e:
            log.error(
                'Error creating particle vel3d_cd_system_data, raw data: %r',
                self.raw_data)
            raise SampleException(e)

        result = [{
            VID: VectorSystemDataParticleKey.TIMESTAMP,
            VAL: str(timestamp)
        }, {
            VID: VectorSystemDataParticleKey.BATTERY,
            VAL: battery
        }, {
            VID: VectorSystemDataParticleKey.SOUND_SPEED,
            VAL: sound_speed
        }, {
            VID: VectorSystemDataParticleKey.HEADING,
            VAL: heading
        }, {
            VID: VectorSystemDataParticleKey.PITCH,
            VAL: pitch
        }, {
            VID: VectorSystemDataParticleKey.ROLL,
            VAL: roll
        }, {
            VID: VectorSystemDataParticleKey.TEMPERATURE,
            VAL: temperature
        }, {
            VID: VectorSystemDataParticleKey.ERROR,
            VAL: error
        }, {
            VID: VectorSystemDataParticleKey.STATUS,
            VAL: status
        }, {
            VID: VectorSystemDataParticleKey.ANALOG_INPUT,
            VAL: analog_input
        }]

        log.debug('VectorSystemDataParticle: particle=%r', result)

        return result
Exemple #50
0
    def _start_subscriber_process_lifecycle_event(self, origin):
        """
        @param origin    Child's resource_id. The associated PID retrieved via
                         ResourceAgentClient._get_agent_process_id is used for
                         the event subscriber itself, but we still index
                         _event_subscribers with the given origin.
        """
        def _got_process_lifecycle_event(evt, *args, **kwargs):
            with self._lock:
                if not self._active:
                    log.warn(
                        "%r: _got_process_lifecycle_event called but "
                        "manager has been destroyed", self._platform_id)
                    return

                if evt.type_ != "ProcessLifecycleEvent":
                    log.trace(
                        "%r: ignoring event type %r. Only handle "
                        "ProcessLifecycleEvent directly.", self._platform_id,
                        evt.type_)
                    return

                # evt.origin is a PID
                pid = evt.origin

                if not pid in self._rids:
                    log.warn(
                        "%r: OOIION-1077 ignoring event from pid=%r. "
                        "Expecting one of %s", self._platform_id, pid,
                        self._rids.keys())
                    return

                origin = self._rids[pid]

                # # Before the _rids mapping, a preliminary mechanism to check
                # # whether the event came from the expected origin relied on
                # # the process ID having origin as a substring:
                #
                # if not origin in pid:
                #     log.warn("%r: OOIION-1077 ignoring event from origin %r. "
                #              "Expecting an origin containing %r",
                #              self._platform_id, pid, origin)
                #     return
                # # BUT this was definitely weak. Although the PID for an
                # # initial agent process seems to satisfy this assumption,
                # # this is not anymore the case upon a re-start of that agent.

                log.debug(
                    "%r: OOIION-1077  _got_process_lifecycle_event: "
                    "pid=%r origin=%r state=%r(%s)", self._platform_id, pid,
                    origin, ProcessStateEnum._str_map[evt.state], evt.state)

                if evt.state is ProcessStateEnum.TERMINATED:
                    self._device_terminated_event(origin, pid)

        # use associated process ID for the subscription:
        pid = ResourceAgentClient._get_agent_process_id(origin)

        if pid is None:
            log.warn(
                "%r: OOIION-1077 ResourceAgentClient._get_agent_process_id"
                " returned None for origin=%r. Subscriber not created.",
                self._platform_id, origin)
            return

        sub = self._agent._create_event_subscriber(
            event_type="ProcessLifecycleEvent",
            origin_type='DispatchedProcess',
            origin=pid,
            callback=_got_process_lifecycle_event)

        with self._lock:
            # but note that we use the given origin as index in _event_subscribers:
            self._event_subscribers[origin] = sub

            # and capture the pid -> origin mapping:
            self._rids[pid] = origin

        log.debug(
            "%r: OOIION-1077 registered ProcessLifecycleEvent subscriber "
            "with pid=%r (origin=%r)", self._platform_id, pid, origin)
Exemple #51
0
    def _build_parsed_values(self):
        """
        Take the head config data and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        try:
            unpack_string = '<4s2s2H12s176s22sHh2s'
            sync, config, head_freq, head_type, head_serial, system_data, _, num_beams, cksum, _ = struct.unpack(
                unpack_string, self.raw_data)

            if not validate_checksum('<111H', self.raw_data, -4):
                log.warn("Failed checksum in %s from instrument (%r)",
                         self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.
                              QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            config = common.convert_word_to_bit_field(config)
            system_data = base64.b64encode(system_data)
            head_serial = head_serial.split('\x00', 1)[0]

            pressure_sensor = config[-1]
            mag_sensor = config[-2]
            tilt_sensor = config[-3]
            tilt_mount = config[-4]

        except Exception:
            log.error('Error creating particle head config, raw data: %r',
                      self.raw_data)
            raise SampleException

        result = [{
            VID: NortekHeadConfigDataParticleKey.PRESSURE_SENSOR,
            VAL: pressure_sensor
        }, {
            VID: NortekHeadConfigDataParticleKey.MAG_SENSOR,
            VAL: mag_sensor
        }, {
            VID: NortekHeadConfigDataParticleKey.TILT_SENSOR,
            VAL: tilt_sensor
        }, {
            VID: NortekHeadConfigDataParticleKey.TILT_SENSOR_MOUNT,
            VAL: tilt_mount
        }, {
            VID: NortekHeadConfigDataParticleKey.HEAD_FREQ,
            VAL: head_freq
        }, {
            VID: NortekHeadConfigDataParticleKey.HEAD_TYPE,
            VAL: head_type
        }, {
            VID: NortekHeadConfigDataParticleKey.HEAD_SERIAL,
            VAL: head_serial
        }, {
            VID: NortekHeadConfigDataParticleKey.SYSTEM_DATA,
            VAL: system_data,
            DataParticleKey.BINARY: True
        }, {
            VID: NortekHeadConfigDataParticleKey.NUM_BEAMS,
            VAL: num_beams
        }]

        log.debug('NortekHeadConfigDataParticle: particle=%r', result)
        return result
Exemple #52
0
def dataqc_gradienttest(dat,
                        x,
                        ddatdx,
                        mindx,
                        startdat,
                        toldat,
                        strict_validation=False):
    """
    Description

        Data quality control algorithm testing if changes between successive
        data points fall within a certain range.

        Input data dat are given as a function of coordinate x. The algorithm
        will flag dat values as bad if the change deltaDAT/deltaX between
        successive dat values exceeds thresholds given in ddatdx. Once the
        threshold is exceeded, following dat are considered bad until a dat
        value returns to within toldat of the last known good value.

        It is possible to remove data points that are too close together in x
        coordinates (use mindx).

        By default, the first value of dat is considered good. To change this,
        use startdat and toldat to set as the first good data point the first
        one that comes within toldat of startdat.

    Implemented by:

        2012-07-17: DPS authored by Mathias Lankhorst. Example code provided
        for Matlab.
        2013-04-06: Christopher Wingard. Initial python implementation.

    Usage:

        outdat, outx, outqc = dataqc_gradienttest(dat, x, ddatdx, mindx,
                                                  startdat, toldat);

            where

        outdat = same as dat except that NaNs and values not meeting mindx are
            removed.
        outx = same as x except that NaNs and values not meeting mindx are
            removed.
        outqc = output quality control flags for outdat. 0 means bad data, 1
            means good data.

        dat = input dataset, a numeric real vector.
        x = coordinate (e.g. time, distance) along which dat is given. Must be
            of the same size as dat and strictly increasing.
        ddatdx = two-element vector defining the valid range of ddat/dx
            from one point to the next.
        mindx = scalar. minimum dx for which this test will be applied (data
            that are less than mindx apart will be deleted). defaults to zero
            if NaN/empty.
        startdat = start value (scalar) of dat that is presumed good. defaults
            to first non-NaN value of dat if NaN/empty.
        toldat = tolerance value (scalar) for dat; threshold to within which
            dat must return to be counted as good, after exceeding a ddatdx
            threshold detected bad data.

    References:

        OOI (2012). Data Product Specification for Gradient Test. Document
            Control Number 1341-100010.
            https://alfresco.oceanobservatories.org/ (See: Company Home >> OOI
            >> Controlled >> 1000 System Level >>
            1341-10010_Data_Product_SPEC_GRDTEST_OOI.pdf)
    """

    if strict_validation:
        if not utils.isvector(dat) or not utils.isvector(x):
            raise ValueError('\'dat\' and \'x\' must be vectors')

        if len(dat) != len(x):
            raise ValueError('\'dat\' and \'x\' must be of equal len')

        if not all(np.diff(x) > 0):
            raise ValueError('\'x\' must be montonically increasing')

    dat = np.asanyarray(dat, dtype=np.float).flatten()
    x = np.asanyarray(x, dtype=np.float).flatten()

    if np.isnan(mindx):
        mindx = 0
    mindx = mindx or 0
    if np.isnan(startdat):
        startdat = 0
    startdat = startdat or 0

    # No strict validation here, they are scalards and they must be validated
    # before going into the C-layer
    if not utils.isscalar(mindx):
        raise ValueError("'mindx' must be scalar, NaN, or empty.")
    if not utils.isscalar(startdat):
        raise ValueError("'startdat' must be scalar, NaN, or empty.")

    # Confirm that there are still data points left, else abort:
    if np.abs(x[0] - x[-1]) < mindx:
        out = np.zeros(x.shape)
        out.fill(1)
        log.warn('Too few values to inspect')
        return out

    grad_min = ddatdx[0]
    grad_max = ddatdx[1]
    out = gradientvalues(dat, x, grad_min, grad_max, mindx, startdat, toldat)

    return out