Esempio n. 1
0
 def _reject_all(self):
     while self._msg_buffer:
         msg = self._msg_buffer.pop()
         try:
             msg.reject()
         except:
             log.critical('Failed to reject message')
Esempio n. 2
0
 def _ack_all(self):
     while self._msg_buffer:
         msg = self._msg_buffer.pop()
         try:
             msg.ack()
         except:
             log.critical('Failed to ack message')
Esempio n. 3
0
 def _reject_all(self):
     while self._msg_buffer:
         msg = self._msg_buffer.pop()
         try:
             msg.reject()
         except:
             log.critical('Failed to reject message')
Esempio n. 4
0
 def _ack_all(self):
     while self._msg_buffer:
         msg = self._msg_buffer.pop()
         try:
             msg.ack()
         except:
             log.critical('Failed to ack message')
    def get_coverage_doctor(self, dataset_id, data_product_id=None):
        # Get the associated objects required for rebuilding
        dset_obj = self.get_dataset_object(dataset_id)

        if data_product_id is None:
            # Go find the first data product associated with dataset_id
            data_product_id, _ = self.resource_registry.find_subjects(object=dataset_id, predicate=PRED.hasDataset, id_only=True)
            data_product_id = data_product_id[0] if len(data_product_id) > 0 else None

        if data_product_id is None:
            raise ValueError('Cannot find any Data Products associated with dataset_id \'{0}\''.format(dataset_id))

        dprod_obj = self.get_data_product_object(data_product_id)

        # Get the path to the editible coverage - also ensures ingestion is paused
        cpth = None
        try:
            with self.get_editable_coverage(dataset_id) as cov:
                cpth = cov.persistence_dir
        except IOError, ex:
            fs = 'Unable to open reference coverage: \''
            accessibility_errors = [
                 'unable to create file (File accessability: Unable to open file)',
                 'unable to open file (File accessibilty: Unable to open file)',
                 'unable to open file (File accessability: Unable to open file)']
            if fs in ex.message:  # The view coverage couldn't load it's underlying reference coverage
                cpth = ex.message[len(fs):-1]

            for err in accessibility_errors:
                if err in ex.message:
                    cpth = self.get_coverage_path(dataset_id)
                    self.pause_ingestion(self.get_stream_id(dataset_id))
                    break
            else:
                log.critical("Unmatched error: %s", ex.message)
                raise
Esempio n. 6
0
    def run_qc(self, data_product, reference_designator, parameter, qc_mapping):
        '''
        Determines which algorithm the parameter should run, then evaluates the QC
        '''

        # We key off of the OOI Short Name
        # DATAPRD_ALGRTHM_QC
        dp_ident, alg, qc = parameter.ooi_short_name.split('_')
        if dp_ident not in qc_mapping:
            return # No input!
        input_name = qc_mapping[dp_ident]

        try:
            doc = self.container.object_store.read_doc(reference_designator)
        except NotFound:
            return # NO QC lookups found
        if dp_ident not in doc:
            log.critical("Data product %s not in doc", dp_ident)
            return # No data product of this listing in the RD's entry
        # Lookup table has the rows for the QC inputs
        lookup_table = doc[dp_ident]

        # An instance of the coverage is loaded if we need to run an algorithm
        dataset_id = self.get_dataset(data_product)
        coverage = self.get_coverage(dataset_id)
        if not coverage.num_timesteps: # No data = no qc
            coverage.close()
            return

        try:
            # Get the lookup table info then run
            if alg.lower() == 'glblrng':
                row = self.recent_row(lookup_table['global_range'])
                min_value = row['min_value']
                max_value = row['max_value']
                self.process_glblrng(coverage, parameter, input_name, min_value, max_value)

            elif alg.lower() == 'stuckvl':
                row = self.recent_row(lookup_table['stuck_value'])
                resolution = row['resolution']
                N = row['consecutive_values']
                self.process_stuck_value(coverage, parameter,input_name, resolution, N)

            elif alg.lower() == 'trndtst':
                row = self.recent_row(lookup_table['trend_test'])
                ord_n = row['polynomial_order']
                nstd = row['standard_deviation']
                self.process_trend_test(coverage, parameter, input_name, ord_n, nstd)

            elif alg.lower() == 'spketst':
                row = self.recent_row(lookup_table['spike_test'])
                acc = row['accuracy']
                N = row['range_multiplier']
                L = row['window_length']
                self.process_spike_test(coverage, parameter, input_name, acc, N, L)

            elif alg.lower() == "gradtst":
                row = self.recent_row(lookup_table["gradient_test"])
                ddatdx = row["ddatdx"]
                mindx = row["mindx"]
                startdat = row["startdat"]
                if isinstance(startdat, basestring) and not startdat:
                    startdat = np.nan
                if isinstance(mindx, basestring) and not mindx:
                    mindx = np.nan
                toldat = row["toldat"]
                self.process_gradient_test(coverage, parameter, input_name, ddatdx, mindx, startdat, toldat)

            elif alg.lower() == 'loclrng':
                pass

        except KeyError: # No lookup table
            self.set_error(coverage, parameter)


        finally:
            coverage.close()
Esempio n. 7
0
    def run_qc(self, data_product, reference_designator, parameter, qc_mapping):
        '''
        Determines which algorithm the parameter should run, then evaluates the QC
        '''

        # We key off of the OOI Short Name
        # DATAPRD_ALGRTHM_QC
        log.error("Running QC %s (%s)", data_product.name, parameter.name)
        dp_ident, alg, qc = parameter.ooi_short_name.split('_')
        log.error("Identifier: %s", dp_ident)
        log.error("Test: %s", alg)
        if dp_ident not in qc_mapping:
            return # No input!
        input_name = qc_mapping[dp_ident]

        try:
            doc = self.container.object_store.read_doc(reference_designator)
        except NotFound:
            return # NO QC lookups found
        if dp_ident not in doc[reference_designator]:
            log.critical("Data product %s not in doc", dp_ident)
            return # No data product of this listing in the RD's entry
        # Lookup table has the rows for the QC inputs
        lookup_table = doc[reference_designator][dp_ident]
        log.error("lookup table found")

        # An instance of the coverage is loaded if we need to run an algorithm
        dataset_id = self.get_dataset(data_product)
        coverage = self.get_coverage(dataset_id)
        if not coverage.num_timesteps: # No data = no qc
            coverage.close()
            return

        try:
            # Get the lookup table info then run
            if alg.lower() == 'glblrng':
                row = self.recent_row(lookup_table['global_range'])
                min_value = row['min_value']
                max_value = row['max_value']
                self.process_glblrng(coverage, parameter, input_name, min_value, max_value)

            elif alg.lower() == 'stuckvl':
                log.error("Running Stuck Value")
                row = self.recent_row(lookup_table['stuck_value'])
                resolution = row['resolution']
                N = row['consecutive_values']
                self.process_stuck_value(coverage, parameter,input_name, resolution, N)

            elif alg.lower() == 'trndtst':
                log.error("Running Trend Test")
                row = self.recent_row(lookup_table['trend_test'])
                ord_n = row['polynomial_order']
                nstd = row['standard_deviation']
                self.process_trend_test(coverage, parameter, input_name, ord_n, nstd)

            elif alg.lower() == 'spketst':
                log.error("Runnign Spike Test")
                row = self.recent_row(lookup_table['spike_test'])
                acc = row['accuracy']
                N = row['range_multiplier']
                L = row['window_length']
                self.process_spike_test(coverage, parameter, input_name, acc, N, L)
        finally:
            coverage.close()
    def run_qc(self, data_product, reference_designator, parameter, qc_mapping,
               parameters):
        '''
        Determines which algorithm the parameter should run, then evaluates the QC

        data_product         - Data Product Resource
        reference_designator - reference designator string
        parameter            - parameter context resource
        qc_mapping           - a dictionary of { data_product_name : parameter_name }
        '''

        # We key off of the OOI Short Name
        # DATAPRD_ALGRTHM_QC
        dp_ident, alg, qc = parameter.ooi_short_name.split('_')
        if dp_ident not in qc_mapping:
            return  # No input!
        input_name = self.calibrated_candidates(data_product, parameter,
                                                qc_mapping, parameters)

        try:
            doc = self.container.object_store.read_doc(reference_designator)
        except NotFound:
            return  # NO QC lookups found
        if dp_ident not in doc:
            log.critical("Data product %s not in doc", dp_ident)
            return  # No data product of this listing in the RD's entry
        # Lookup table has the rows for the QC inputs
        lookup_table = doc[dp_ident]

        # An instance of the coverage is loaded if we need to run an algorithm
        dataset_id = self.get_dataset(data_product)
        coverage = self.get_coverage(dataset_id)
        if not coverage.num_timesteps:  # No data = no qc
            coverage.close()
            return

        try:
            # Get the lookup table info then run
            if alg.lower() == 'glblrng':
                row = self.recent_row(lookup_table['global_range'])
                min_value = row['min_value']
                max_value = row['max_value']
                self.process_glblrng(coverage, parameter, input_name,
                                     min_value, max_value)

            elif alg.lower() == 'stuckvl':
                row = self.recent_row(lookup_table['stuck_value'])
                resolution = row['resolution']
                N = row['consecutive_values']
                self.process_stuck_value(coverage, parameter, input_name,
                                         resolution, N)

            elif alg.lower() == 'trndtst':
                row = self.recent_row(lookup_table['trend_test'])
                ord_n = row['polynomial_order']
                nstd = row['standard_deviation']
                self.process_trend_test(coverage, parameter, input_name, ord_n,
                                        nstd)

            elif alg.lower() == 'spketst':
                row = self.recent_row(lookup_table['spike_test'])
                acc = row['accuracy']
                N = row['range_multiplier']
                L = row['window_length']
                self.process_spike_test(coverage, parameter, input_name, acc,
                                        N, L)

            elif alg.lower() == "gradtst":
                row = self.recent_row(lookup_table["gradient_test"])
                ddatdx = row["ddatdx"]
                mindx = row["mindx"]
                startdat = row["startdat"]
                if isinstance(startdat, basestring) and not startdat:
                    startdat = np.nan
                if isinstance(mindx, basestring) and not mindx:
                    mindx = np.nan
                toldat = row["toldat"]
                self.process_gradient_test(coverage, parameter, input_name,
                                           ddatdx, mindx, startdat, toldat)

            elif alg.lower() == 'loclrng':
                row = self.recent_row(lookup_table["local_range"])
                table = row['table']
                dims = []
                datlimz = []
                for key in table.iterkeys():
                    # Skip the datlims
                    if 'datlim' in key:
                        continue
                    dims.append(key)
                    datlimz.append(table[key])

                datlimz = np.column_stack(datlimz)
                datlim = np.column_stack([table['datlim1'], table['datlim2']])
                self.process_local_range_test(coverage, parameter, input_name,
                                              datlim, datlimz, dims)

        except KeyError:  # No lookup table
            self.set_error(coverage, parameter)

        finally:
            coverage.close()