def _get_new_ctd_packet(self, length):

        rdt = RecordDictionaryTool(stream_definition_id=self.stream_def._id)
        #Explicitly make these numpy arrays...
        c = numpy.array([random.uniform(0.0, 75.0) for i in xrange(length)])
        t = numpy.array([random.uniform(-1.7, 21.0) for i in xrange(length)])
        p = numpy.array([random.lognormvariate(1, 2) for i in xrange(length)])
        lat = numpy.array(
            [random.uniform(-90.0, 90.0) for i in xrange(length)])
        lon = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)])
        h = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)])

        start_time = ntplib.system_to_ntp_time(time.time()) - (length + 1)
        tvar = numpy.array([start_time + i for i in xrange(1, length + 1)])

        rdt['time'] = tvar
        rdt['lat'] = lat
        rdt['lon'] = lon
        rdt['temp'] = t
        rdt['conductivity'] = c
        rdt['pressure'] = p

        #        rdt['coordinates'] = rdt0
        #        rdt['data'] = rdt1

        g = rdt.to_granule(data_producer_id=self.id)

        return g
Esempio n. 2
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        conductivity = rdt['conductivity']
        pressure = rdt['pressure']
        temperature = rdt['temp']

        longitude = rdt['lon'] if rdt['lon'] is not None else 0
        latitude = rdt['lat'] if rdt['lat'] is not None else 0

        sp = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)

        log.debug("Density algorithm calculated the sp (practical salinity) values: %s", sp)

        sa = SA_from_SP(sp, pressure, longitude, latitude)

        log.debug("Density algorithm calculated the sa (actual salinity) values: %s", sa)

        dens_value = rho(sa, temperature, pressure)

        for key, value in rdt.iteritems():
            if key in out_rdt:
                if key=='conductivity' or key=='temp' or key=='pressure':
                    continue
                out_rdt[key] = value[:]

        out_rdt['density'] = dens_value

        log.debug("Density algorithm returning density values: %s", out_rdt['density'])

        return out_rdt.to_granule()
Esempio n. 3
0
    def execute(input=None,
                context=None,
                config=None,
                params=None,
                state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        conductivity = rdt['conductivity']
        pressure = rdt['pressure']
        temperature = rdt['temp']

        sal_value = SP_from_cndr(r=conductivity / cte.C3515,
                                 t=temperature,
                                 p=pressure)

        log.debug(
            "Salinity algorithm calculated the sp (practical salinity) values: %s",
            sal_value)

        for key, value in rdt.iteritems():
            if key in out_rdt:
                if key == 'conductivity' or key == 'temp' or key == 'pressure':
                    continue
                out_rdt[key] = value[:]

        out_rdt['salinity'] = sal_value

        return out_rdt.to_granule()
Esempio n. 4
0
    def validate_multiple_vis_queue_messages(self, msg1, msg2):

        assertions = self.assertTrue

        # verify that the salinity in msg2 is a result of content from msg1
        rdt1 = RecordDictionaryTool.load_from_granule(msg1)
        rdt2 = RecordDictionaryTool.load_from_granule(msg2)

        # msg1 should not have salinity
        # assertions(rdt1['salinity'] == None)

        conductivity = rdt1['conductivity']
        pressure = rdt1['pressure']
        temperature = rdt1['temp']

        msg1_sal_value = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)
        msg2_sal_value = rdt2['salinity']
        b = msg1_sal_value == msg2_sal_value

        if isinstance(b,bool):
            assertions(b)
        else:
            assertions(b.all())

        return
Esempio n. 5
0
    def check_salinity_algorithm_execution(self, publish_granule,
                                           granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        sal_value = SP_from_cndr(r=conductivity / cte.C3515,
                                 t=temperature,
                                 p=pressure)

        out_salinity = output_rdt_transform['salinity']

        #-----------------------------------------------------------------------------
        # Check that the output data from the transform has the correct density values
        #-----------------------------------------------------------------------------
        self.assertTrue(numpy.array_equal(sal_value, out_salinity))
Esempio n. 6
0
    def execute(input=None,
                context=None,
                config=None,
                params=None,
                state=None):
        '''
        @param input Granule
        @retval result Granule
        '''

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        conductivity = rdt['conductivity']
        cond_value = (conductivity / 100000.0) - 0.5

        for key, value in rdt.iteritems():
            if key in out_rdt:
                out_rdt[key] = value[:]

        # Update the conductivity values
        out_rdt['conductivity'] = cond_value

        # build the granule for conductivity
        return out_rdt.to_granule()
Esempio n. 7
0
    def validate_multiple_vis_queue_messages(self, msg1, msg2):

        assertions = self.assertTrue

        # verify that the salinity in msg2 is a result of content from msg1
        rdt1 = RecordDictionaryTool.load_from_granule(msg1)
        rdt2 = RecordDictionaryTool.load_from_granule(msg2)

        # msg1 should not have salinity
        # assertions(rdt1['salinity'] == None)

        conductivity = rdt1['conductivity']
        pressure = rdt1['pressure']
        temperature = rdt1['temp']

        msg1_sal_value = SP_from_cndr(r=conductivity / cte.C3515,
                                      t=temperature,
                                      p=pressure)
        msg2_sal_value = rdt2['salinity']
        b = msg1_sal_value == msg2_sal_value

        if isinstance(b, bool):
            assertions(b)
        else:
            assertions(b.all())

        return
    def check_density_algorithm_execution(self, publish_granule, granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        longitude = input_rdt_to_transform['lon']
        latitude = input_rdt_to_transform['lat']

        sp = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)
        sa = SA_from_SP(sp, pressure, longitude, latitude)
        dens_value = rho(sa, temperature, pressure)

        out_density = output_rdt_transform['density']

        log.debug("density output from the transform: %s", out_density)
        log.debug("values of density expected from the transform: %s", dens_value)

        numpy.testing.assert_array_almost_equal(out_density, dens_value, decimal=3)
Esempio n. 9
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params['stream_def_id'])

        # Fill the time values
        out_rdt['time'] = rdt['time']

        # The calibration coefficients
        temp_calibration_coeffs= params['calibration_coeffs']['temp_calibration_coeffs']
        pres_calibration_coeffs= params['calibration_coeffs']['pres_calibration_coeffs']
        cond_calibration_coeffs = params['calibration_coeffs']['cond_calibration_coeffs']

        log.debug("params['calibration_coeffs']: %s", params['calibration_coeffs'])

        # Set the temperature values for the output granule
        out_rdt = CTDBP_L1_TransformAlgorithm.calculate_temperature(    input_rdt = rdt,
                                                                        out_rdt = out_rdt,
                                                                        temp_calibration_coeffs= temp_calibration_coeffs )

        # Set the pressure values for the output granule
        out_rdt = CTDBP_L1_TransformAlgorithm.calculate_pressure(   input_rdt= rdt,
                                                                    out_rdt = out_rdt,
                                                                    pres_calibration_coeffs= pres_calibration_coeffs)

        # Set the conductivity values for the output granule
        # Note that since the conductivity caculation depends on whether TEMPWAT_L1, PRESWAT_L1 have been calculated, we need to do this last
        out_rdt = CTDBP_L1_TransformAlgorithm.calculate_conductivity(   input_rdt = rdt,
                                                                        out_rdt = out_rdt,
                                                                        cond_calibration_coeffs = cond_calibration_coeffs
        )

        # build the granule for the L1 stream
        return out_rdt.to_granule()
    def handle_attribute_value_event(self, driver_event):
        if log.isEnabledFor(logging.TRACE):  # pragma: no cover
            # show driver_event as retrieved (driver_event.vals_dict might be large)
            log.trace("%r: driver_event = %s", self._platform_id, driver_event)
            log.trace("%r: vals_dict:\n%s",
                      self._platform_id, self._pp.pformat(driver_event.vals_dict))

        elif log.isEnabledFor(logging.DEBUG):  # pragma: no cover
            log.debug("%r: driver_event = %s", self._platform_id, driver_event.brief())

        stream_name = driver_event.stream_name

        publisher = self._data_publishers.get(stream_name, None)
        if not publisher:
            log.warn('%r: no publisher configured for stream_name=%r. '
                     'Configured streams are: %s',
                     self._platform_id, stream_name, self._data_publishers.keys())
            return

        param_dict = self._param_dicts[stream_name]
        stream_def = self._stream_defs[stream_name]

        if isinstance(stream_def, str):
            rdt = RecordDictionaryTool(param_dictionary=param_dict.dump(),
                                       stream_definition_id=stream_def)
        else:
            rdt = RecordDictionaryTool(stream_definition=stream_def)

        self._publish_granule_with_multiple_params(publisher, driver_event,
                                                   param_dict, rdt)
Esempio n. 11
0
    def _get_data(cls, config):
        new_flst = get_safe(config, 'constraints.new_files', [])
        hdr_cnt = get_safe(config, 'header_count', SlocumParser.DEFAULT_HEADER_SIZE)
        for f in new_flst:
            try:
                parser = SlocumParser(f[0], hdr_cnt)
                #CBM: Not in use yet...
    #            ext_dset_res = get_safe(config, 'external_dataset_res', None)
    #            t_vname = ext_dset_res.dataset_description.parameters['temporal_dimension']
    #            x_vname = ext_dset_res.dataset_description.parameters['zonal_dimension']
    #            y_vname = ext_dset_res.dataset_description.parameters['meridional_dimension']
    #            z_vname = ext_dset_res.dataset_description.parameters['vertical_dimension']
    #            var_lst = ext_dset_res.dataset_description.parameters['variables']

                max_rec = get_safe(config, 'max_records', 1)
                dprod_id = get_safe(config, 'data_producer_id', 'unknown data producer')
                #tx_yml = get_safe(config, 'taxonomy')
                #ttool = TaxyTool.load(tx_yml) #CBM: Assertion inside RDT.__setitem__ requires same instance of TaxyTool
                pdict = ParameterDictionary.load(get_safe(config, 'param_dictionary'))

                cnt = calculate_iteration_count(len(parser.sensor_map), max_rec)
                for x in xrange(cnt):
                    #rdt = RecordDictionaryTool(taxonomy=ttool)
                    rdt = RecordDictionaryTool(param_dictionary=pdict)

                    for name in parser.sensor_map:
                        d = parser.data_map[name][x*max_rec:(x+1)*max_rec]
                        rdt[name]=d

                    #g = build_granule(data_producer_id=dprod_id, taxonomy=ttool, record_dictionary=rdt)
                    g = rdt.to_granule()
                    yield g
            except SlocumParseException as spe:
                # TODO: Decide what to do here, raise an exception or carry on
                log.error('Error parsing data file: \'{0}\''.format(f))
Esempio n. 12
0
    def recv_packet(self, packet, stream_route, stream_id):
        if packet == {}:
            return

        l0_values = RecordDictionaryTool.load_from_granule(packet)
        l1_values = RecordDictionaryTool(
            stream_definition_id=self.stream_definition_id)
        log.debug(
            "CTDBP L1 transform using L0 values: tempurature %s, pressure %s, conductivity %s",
            l0_values['temperature'], l0_values['pressure'],
            l0_values['conductivity'])

        #for key, value in 'lat', 'lon', 'time', ...:   <-- do we want to be a little more specific here?
        for key, value in l0_values.iteritems():
            if key in l1_values:
                l1_values[key] = value[:]

        l1_values['temp'] = self.calculate_temperature(l0=l0_values)
        l1_values['pressure'] = self.calculate_pressure(l0=l0_values)
        l1_values['conductivity'] = self.calculate_conductivity(l0=l0_values,
                                                                l1=l1_values)

        log.debug(
            'calculated L1 values: temp %s, pressure %s, conductivity %s',
            l1_values['temp'], l1_values['pressure'],
            l1_values['conductivity'])
        self.publisher.publish(msg=l1_values.to_granule())
Esempio n. 13
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        conductivity = rdt["conductivity"]
        pressure = rdt["pressure"]
        temperature = rdt["temp"]

        longitude = rdt["lon"] if rdt["lon"] is not None else 0
        latitude = rdt["lat"] if rdt["lat"] is not None else 0

        sp = SP_from_cndr(r=conductivity / cte.C3515, t=temperature, p=pressure)

        log.debug("Density algorithm calculated the sp (practical salinity) values: %s", sp)

        sa = SA_from_SP(sp, pressure, longitude, latitude)

        log.debug("Density algorithm calculated the sa (actual salinity) values: %s", sa)

        dens_value = rho(sa, temperature, pressure)

        for key, value in rdt.iteritems():
            if key in out_rdt:
                if key == "conductivity" or key == "temp" or key == "pressure":
                    continue
                out_rdt[key] = value[:]

        out_rdt["density"] = dens_value

        log.debug("Density algorithm returning density values: %s", out_rdt["density"])

        return out_rdt.to_granule()
Esempio n. 14
0
    def _get_new_ctd_packet(self, length):

        rdt = RecordDictionaryTool(stream_definition_id=self.stream_def._id)
        #Explicitly make these numpy arrays...
        c = numpy.array([random.uniform(0.0,75.0)  for i in xrange(length)]) 
        t = numpy.array([random.uniform(-1.7, 21.0) for i in xrange(length)]) 
        p = numpy.array([random.lognormvariate(1,2) for i in xrange(length)])
        lat = numpy.array([random.uniform(-90.0, 90.0) for i in xrange(length)]) 
        lon = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)]) 
        h = numpy.array([random.uniform(0.0, 360.0) for i in xrange(length)])

        start_time = ntplib.system_to_ntp_time(time.time()) - (length + 1)
        tvar = numpy.array([start_time + i for i in xrange(1,length+1)])


        rdt['time'] = tvar
        rdt['lat'] = lat
        rdt['lon'] = lon
        rdt['temp'] = t
        rdt['conductivity'] = c
        rdt['pressure'] = p

#        rdt['coordinates'] = rdt0
#        rdt['data'] = rdt1

        g = rdt.to_granule(data_producer_id=self.id)

        return g
    def check_density_algorithm_execution(self, publish_granule, granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        longitude = input_rdt_to_transform['lon']
        latitude = input_rdt_to_transform['lat']

        sp = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)
        sa = SA_from_SP(sp, pressure, longitude, latitude)
        dens_value = rho(sa, temperature, pressure)

        out_density = output_rdt_transform['density']

        #-----------------------------------------------------------------------------
        # Check that the output data from the transform has the correct density values
        #-----------------------------------------------------------------------------
        self.assertTrue(dens_value.all() == out_density.all())
Esempio n. 16
0
    def check_density_algorithm_execution(self, publish_granule,
                                          granule_from_transform):

        #------------------------------------------------------------------
        # Calculate the correct density from the input granule data
        #------------------------------------------------------------------
        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        conductivity = input_rdt_to_transform['conductivity']
        pressure = input_rdt_to_transform['pressure']
        temperature = input_rdt_to_transform['temp']

        longitude = input_rdt_to_transform['lon']
        latitude = input_rdt_to_transform['lat']

        sp = SP_from_cndr(r=conductivity / cte.C3515,
                          t=temperature,
                          p=pressure)
        sa = SA_from_SP(sp, pressure, longitude, latitude)
        dens_value = rho(sa, temperature, pressure)

        out_density = output_rdt_transform['density']

        log.debug("density output from the transform: %s", out_density)
        log.debug("values of density expected from the transform: %s",
                  dens_value)

        numpy.testing.assert_array_almost_equal(out_density,
                                                dens_value,
                                                decimal=3)
Esempio n. 17
0
    def test_granule_publish(self):
        log.debug("test_granule_publish ")
        self.loggerpids = []


        #retrieve the param dict from the repository
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
        stream_definition_id = self.pubsubclient.create_stream_definition('parsed stream', parameter_dictionary_id=pdict_id)


        tdom, sdom = time_series_domain()

        dp_obj = IonObject(RT.DataProduct,
            name=str(uuid.uuid4()),
            description='ctd stream test',
            temporal_domain = tdom.dump(),
            spatial_domain = sdom.dump())

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=stream_definition_id)


        # Retrieve the id of the output stream of the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug( 'test_granule_publish: Data product streams1 = %s', stream_ids)

        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        #create the publisher from the stream route
        stream_route = self.pubsubclient.read_stream_route(stream_ids[0])
        publisher = StandaloneStreamPublisher(stream_ids[0], stream_route)

        # this is one sample from the ctd driver
        tomato = {"driver_timestamp": 3555971105.1268806, "instrument_id": "ABC-123", "pkt_format_id": "JSON_Data", "pkt_version": 1, "preferred_timestamp": "driver_timestamp", "quality_flag": "ok", "stream_name": "parsed", "values": [{"value": 22.9304, "value_id": "temp"}, {"value": 51.57381, "value_id": "conductivity"}, {"value": 915.551, "value_id": "pressure"}]}

        for value in tomato['values']:
            log.debug("test_granule_publish: Looping tomato values  key: %s    val: %s ", str(value['value']), str(value['value_id']))

            if value['value_id'] in rdt:
                rdt[value['value_id']] = numpy.array( [ value['value'] ] )
                log.debug("test_granule_publish: Added data item  %s  val: %s ", str(value['value']), str(value['value_id']) )

        g = rdt.to_granule()

        publisher.publish(g)

        gevent.sleep(3)

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)
  
        #--------------------------------------------------------------------------------
        # Cleanup data products
        #--------------------------------------------------------------------------------
        dp_ids, _ = self.rrclient.find_resources(restype=RT.DataProduct, id_only=True)

        for dp_id in dp_ids:
            self.dataproductclient.delete_data_product(dp_id)
Esempio n. 18
0
    def _get_data(cls, config):
        """
        Retrieves config['constraints']['count'] number of random samples of length config['constraints']['array_len']
        @param config Dict of configuration parameters - must contain ['constraints']['count'] and ['constraints']['count']
        """
        ext_dset_res = get_safe(config, 'external_dataset_res', None)

        # Get the Dataset object from the config (should have been instantiated in _init_acquisition_cycle)
        ds = get_safe(config, 'dataset_object')

        if ext_dset_res and ds:
            t_vname = ext_dset_res.dataset_description.parameters['temporal_dimension']
            x_vname = ext_dset_res.dataset_description.parameters['zonal_dimension']
            y_vname = ext_dset_res.dataset_description.parameters['meridional_dimension']
            z_vname = ext_dset_res.dataset_description.parameters['vertical_dimension']
            var_lst = ext_dset_res.dataset_description.parameters['variables']

            t_slice = get_safe(config, 'constraints.temporal_slice', (slice(0, 1)))
            #TODO: Using 'eval' here is BAD - need to find a less sketchy way to pass constraints
            if isinstance(t_slice, str):
                t_slice = eval(t_slice)

            lon = ds.variables[x_vname][:]
            lat = ds.variables[y_vname][:]
            z = ds.variables[z_vname][:]

            t_arr = ds.variables[t_vname][t_slice]
            data_arrays = {}
            for varn in var_lst:
                data_arrays[varn] = ds.variables[varn][t_slice]

            max_rec = get_safe(config, 'max_records', 1)
            #dprod_id = get_safe(config, 'data_producer_id', 'unknown data producer')

            stream_def = get_safe(config, 'stream_def')

            cnt = calculate_iteration_count(t_arr.size, max_rec)
            for x in xrange(cnt):
                ta = t_arr[x * max_rec:(x + 1) * max_rec]

                # Make a 'master' RecDict
                rdt = RecordDictionaryTool(stream_definition_id=stream_def)

                # Assign coordinate values to the RecDict
                rdt[x_vname] = lon
                rdt[y_vname] = lat
                rdt[z_vname] = z

                # Assign data values to the RecDict
                rdt[t_vname] = ta
                for key, arr in data_arrays.iteritems():
                    d = arr[x * max_rec:(x + 1) * max_rec]
                    rdt[key] = d

                g = rdt.to_granule()
                yield g

            ds.close()
    def test_granule_publish(self):
        log.debug("test_granule_publish ")
        self.loggerpids = []


        #retrieve the param dict from the repository
        pdict_id = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict',id_only=True)
        stream_definition_id = self.pubsubclient.create_stream_definition('parsed stream', parameter_dictionary_id=pdict_id)



        dp_obj = IonObject(RT.DataProduct,
            name=str(uuid.uuid4()),
            description='ctd stream test')

        data_product_id1 = self.dpclient.create_data_product(data_product=dp_obj, stream_definition_id=stream_definition_id)


        # Retrieve the id of the output stream of the out Data Product
        stream_ids, _ = self.rrclient.find_objects(data_product_id1, PRED.hasStream, None, True)
        log.debug( 'test_granule_publish: Data product streams1 = %s', stream_ids)

        pid = self.create_logger('ctd_parsed', stream_ids[0] )
        self.loggerpids.append(pid)

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        #create the publisher from the stream route
        stream_route = self.pubsubclient.read_stream_route(stream_ids[0])
        publisher = StandaloneStreamPublisher(stream_ids[0], stream_route)

        # this is one sample from the ctd driver
        tomato = {"driver_timestamp": 3555971105.1268806, "instrument_id": "ABC-123", "pkt_format_id": "JSON_Data", "pkt_version": 1, "preferred_timestamp": "driver_timestamp", "quality_flag": "ok", "stream_name": "parsed", "values": [{"value": 22.9304, "value_id": "temp"}, {"value": 51.57381, "value_id": "conductivity"}, {"value": 915.551, "value_id": "pressure"}]}

        for value in tomato['values']:
            log.debug("test_granule_publish: Looping tomato values  key: %s    val: %s ", str(value['value']), str(value['value_id']))

            if value['value_id'] in rdt:
                rdt[value['value_id']] = numpy.array( [ value['value'] ] )
                log.debug("test_granule_publish: Added data item  %s  val: %s ", str(value['value']), str(value['value_id']) )

        g = rdt.to_granule()

        publisher.publish(g)

        gevent.sleep(3)

        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)
  
        #--------------------------------------------------------------------------------
        # Cleanup data products
        #--------------------------------------------------------------------------------
        dp_ids, _ = self.rrclient.find_resources(restype=RT.DataProduct, id_only=True)

        for dp_id in dp_ids:
            self.dataproductclient.delete_data_product(dp_id)
    def check_pres_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['pressure']
        input_data = input_rdt_to_transform['pressure']

        self.assertTrue(input_data.all() == output_data.all())
    def check_cond_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform["conductivity"]
        input_data = input_rdt_to_transform["conductivity"]

        self.assertTrue(((input_data / 100000.0) - 0.5).all() == output_data.all())
    def check_temp_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['temp']
        input_data = input_rdt_to_transform['temp']

        self.assertTrue(((input_data / 10000.0) - 10).all() == output_data.all())
    def check_pres_algorithm_execution(self, publish_granule, granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(granule_from_transform)

        output_data = output_rdt_transform['pressure']
        input_data = input_rdt_to_transform['pressure']

        self.assertTrue(numpy.array_equal((input_data/ 100.0) + 0.5,output_data))
Esempio n. 24
0
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @retval an iterable that returns well-formed Granule objects on each iteration
        """
        new_flst = get_safe(config, 'constraints.new_files', [])
        parser_mod = get_safe(config, 'parser_mod', '')
        parser_cls = get_safe(config, 'parser_cls', '')

        module = __import__(parser_mod, fromlist=[parser_cls])
        classobj = getattr(module, parser_cls)

        for f in new_flst:
            try:
                size = os.stat(f[0]).st_size
                try:
                    #find the new data check index in config
                    index = -1
                    for ndc in config['set_new_data_check']:
                        if ndc[0] == f[0]:
                            index = config['set_new_data_check'].index(ndc)
                            break
                except:
                    log.error('File name not found in attachment')

                parser = classobj(f[0], f[3])

                max_rec = get_safe(config, 'max_records', 1)
                stream_def = get_safe(config, 'stream_def')
                while True:
                    particles = parser.get_records(max_count=max_rec)
                    if not particles:
                        break

                    rdt = RecordDictionaryTool(stream_definition_id=stream_def)

                    populate_rdt(rdt, particles)

                    g = rdt.to_granule()

                    # TODO: record files already read for future additions...
                    #                    #update new data check with the latest file position
                    if 'set_new_data_check' in config and index > -1:
                        # WRONG: should only record this after file finished parsing,
                        # but may not have another yield at that point to trigger update
                        config['set_new_data_check'][index] = (f[0], f[1],
                                                               f[2], size)

                    yield g

#                parser.close()

            except Exception as ex:
                # TODO: Decide what to do here, raise an exception or carry on
                log.error('Error parsing data file \'{0}\': {1}'.format(f, ex))
    def _get_data(cls, config):
        """
        Iterable function that acquires data from a source iteratively based on constraints provided by config
        Passed into BaseDataHandler._publish_data and iterated to publish samples.
        @param config dict containing configuration parameters, may include constraints, formatters, etc
        @retval an iterable that returns well-formed Granule objects on each iteration
        """
        new_flst = get_safe(config, "constraints.new_files", [])
        parser_mod = get_safe(config, "parser_mod", "")
        parser_cls = get_safe(config, "parser_cls", "")

        module = __import__(parser_mod, fromlist=[parser_cls])
        classobj = getattr(module, parser_cls)

        for f in new_flst:
            try:
                size = os.stat(f[0]).st_size
                try:
                    # find the new data check index in config
                    index = -1
                    for ndc in config["set_new_data_check"]:
                        if ndc[0] == f[0]:
                            index = config["set_new_data_check"].index(ndc)
                            break
                except:
                    log.error("File name not found in attachment")

                parser = classobj(f[0], f[3])

                max_rec = get_safe(config, "max_records", 1)
                stream_def = get_safe(config, "stream_def")
                while True:
                    particles = parser.get_records(max_count=max_rec)
                    if not particles:
                        break

                    rdt = RecordDictionaryTool(stream_definition_id=stream_def)

                    populate_rdt(rdt, particles)

                    g = rdt.to_granule()

                    # TODO: record files already read for future additions...
                    #                    #update new data check with the latest file position
                    if "set_new_data_check" in config and index > -1:
                        # WRONG: should only record this after file finished parsing,
                        # but may not have another yield at that point to trigger update
                        config["set_new_data_check"][index] = (f[0], f[1], f[2], size)

                    yield g

            #                parser.close()

            except Exception as ex:
                # TODO: Decide what to do here, raise an exception or carry on
                log.error("Error parsing data file '{0}': {1}".format(f, ex))
    def test_stream_ingestion_worker(self):
        self.start_ingestion_worker()

        context_ids, time_ctxt = self._create_param_contexts()
        pdict_id = self.dataset_management_client.create_parameter_dictionary(
            name='stream_ingestion_pdict',
            parameter_context_ids=context_ids,
            temporal_context='ingestion_timestamp')
        self.addCleanup(
            self.dataset_management_client.delete_parameter_dictionary,
            pdict_id)

        dataset_id = self.dataset_management_client.create_dataset(
            name='fake_dataset',
            description='fake_dataset',
            stream_id=self.stream_id,
            spatial_domain=self.spatial_dom.dump(),
            temporal_domain=self.time_dom.dump(),
            parameter_dictionary_id=pdict_id)
        self.addCleanup(self.dataset_management_client.delete_dataset,
                        dataset_id)

        self.cov = self._create_coverage(dataset_id=dataset_id,
                                         parameter_dict_id=pdict_id,
                                         time_dom=self.time_dom,
                                         spatial_dom=self.spatial_dom)
        self.addCleanup(self.cov.close)

        rdt = RecordDictionaryTool(stream_definition_id=self.stream_def_id)
        rdt['conductivity'] = 1
        rdt['pressure'] = 2
        rdt['salinity'] = 3

        self.start_listener(dataset_id)

        self.publisher.publish(rdt.to_granule())
        self.data_modified = Event()
        self.data_modified.wait(30)

        cov = self.get_coverage(dataset_id)
        self.assertIsNotNone(cov.get_parameter_values('raw'))

        deserializer = IonObjectDeserializer(obj_registry=get_obj_registry())

        granule = retrieve_stream(dataset_id)
        rdt_complex = RecordDictionaryTool.load_from_granule(granule)
        rdt_complex['raw'] = [
            deserializer.deserialize(i) for i in rdt_complex['raw']
        ]
        for gran in rdt_complex['raw']:
            rdt_new = RecordDictionaryTool.load_from_granule(gran)
            self.assertIn(1, rdt_new['conductivity'])
            self.assertIn(2, rdt_new['pressure'])
            self.assertIn(3, rdt_new['salinity'])

        cov.close()
    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        for field in rdt:
            rdt[field] = numpy.array([random.uniform(0.0, 75.0) for i in xrange(length)])

        g = rdt.to_granule()

        return g
    def test_event_transform_worker(self):
        self.data_process_objs = []
        self._output_stream_ids = []
        self.event_verified = Event()


        # test that a data process (type: data-product-in / event-out) can be defined and launched.
        # verify that event fields are correctly populated


        self.parameter_dict_id = self.dataset_management_client.read_parameter_dictionary_by_name(name='ctd_parsed_param_dict', id_only=True)

        # create the StreamDefinition
        self.stream_def_id = self.pubsub_client.create_stream_definition(name='stream_def', parameter_dictionary_id=self.parameter_dict_id)
        self.addCleanup(self.pubsub_client.delete_stream_definition, self.stream_def_id)

        # create the DataProduct
        input_dp_obj = IonObject(  RT.DataProduct, name='input_data_product', description='input test stream',
                                             temporal_domain = self.time_dom.dump(),  spatial_domain = self.spatial_dom.dump())
        self.input_dp_id = self.dataproductclient.create_data_product(data_product=input_dp_obj,  stream_definition_id=self.stream_def_id)

        # retrieve the Stream for this data product
        stream_ids, assoc_ids = self.rrclient.find_objects(self.input_dp_id, PRED.hasStream, RT.Stream, True)
        self.stream_id = stream_ids[0]

        # create the DPD and two DPs
        self.event_data_process_id = self.create_event_data_processes()

        # retrieve subscription from data process
        subscription_objs, _ = self.rrclient.find_objects(subject=self.event_data_process_id, predicate=PRED.hasSubscription, object_type=RT.Subscription, id_only=False)
        log.debug('test_event_transform_worker subscription_obj:  %s', subscription_objs[0])

        # create a queue to catch the published granules
        self.subscription_id = self.pubsub_client.create_subscription(name='parsed_subscription', stream_ids=[self.stream_id], exchange_name=subscription_objs[0].exchange_name)
        self.addCleanup(self.pubsub_client.delete_subscription, self.subscription_id)

        self.pubsub_client.activate_subscription(self.subscription_id)
        self.addCleanup(self.pubsub_client.deactivate_subscription, self.subscription_id)

        stream_route = self.pubsub_client.read_stream_route(self.stream_id)
        self.publisher = StandaloneStreamPublisher(stream_id=self.stream_id, stream_route=stream_route )

        self.start_event_transform_listener()

        self.data_modified = Event()

        rdt = RecordDictionaryTool(stream_definition_id=self.stream_def_id)
        rdt['time']         = [0] # time should always come first
        rdt['conductivity'] = [1]
        rdt['pressure']     = [2]
        rdt['salinity']     = [8]

        self.publisher.publish(rdt.to_granule())

        self.assertTrue(self.event_verified.wait(self.wait_time))
Esempio n. 29
0
    def recv_packet(self, msg, stream_route, stream_id):
        ''' receive packet for ingestion '''
        log.debug('received granule for stream %s', stream_id)

        if msg == {}:
            log.error('Received empty message from stream: %s', stream_id)
            return
        # Message validation
        if not isinstance(msg, Granule):
            log.error('Ingestion received a message that is not a granule: %s', msg)
            return


        rdt = RecordDictionaryTool.load_from_granule(msg)
        if rdt is None:
            log.error('Invalid granule (no RDT) for stream %s', stream_id)
            return
        if not len(rdt):
            log.debug('Empty granule for stream %s', stream_id)
            return

        dp_id_list = self.retrieve_dataprocess_for_stream(stream_id)

        for dp_id in dp_id_list:

            function, argument_list = self.retrieve_function_and_define_args(dp_id)

            args = []
            rdt = RecordDictionaryTool.load_from_granule(msg)

            #create the input arguments list
            #todo: this logic is tied to the example funcation, generalize
            for func_param, record_param in argument_list.iteritems():
                args.append(rdt[record_param])
            try:
                #run the calc
                #todo: nothing in the data process resource to specify multi-out map
                result = function(*args)

                out_stream_definition, output_parameter = self.retrieve_dp_output_params(dp_id)

                rdt = RecordDictionaryTool(stream_definition_id=out_stream_definition)
                publisher = self._publisher_map.get(dp_id,'')

                rdt[ output_parameter ] = result

                if publisher:
                    publisher.publish(rdt.to_granule())
                else:
                    log.error('Publisher not found for data process %s', dp_id)

                self.update_dp_metrics( dp_id )

            except ImportError:
                log.error('Error running transform')
Esempio n. 30
0
    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        for field in rdt:
            rdt[field] = numpy.array(
                [random.uniform(0.0, 75.0) for i in xrange(length)])

        g = rdt.to_granule()

        return g
    def test_transform_worker(self):
        self.loggerpids = []
        self.data_process_objs = []
        self._output_stream_ids = []

        self.start_transform_worker()

        self.parameter_dict_id = self.dataset_management_client.read_parameter_dictionary_by_name(name='ctd_parsed_param_dict', id_only=True)

        # create the StreamDefinition
        self.stream_def_id = self.pubsub_client.create_stream_definition(name='stream_def', parameter_dictionary_id=self.parameter_dict_id)
        self.addCleanup(self.pubsub_client.delete_stream_definition, self.stream_def_id)

        # create the DataProduct
        input_dp_obj = IonObject(  RT.DataProduct, name='input_data_product', description='input test stream',
                                             temporal_domain = self.time_dom.dump(),  spatial_domain = self.spatial_dom.dump())
        self.input_dp_id = self.dataproductclient.create_data_product(data_product=input_dp_obj,  stream_definition_id=self.stream_def_id)

        #retrieve the Stream for this data product
        stream_ids, assoc_ids = self.rrclient.find_objects(self.input_dp_id, PRED.hasStream, RT.Stream, True)
        self.stream_id = stream_ids[0]

        #create a queue to catch the published granules
        self.subscription_id = self.pubsub_client.create_subscription(name='parsed_subscription', stream_ids=[self.stream_id], exchange_name='parsed_subscription')
        self.addCleanup(self.pubsub_client.delete_subscription, self.subscription_id)

        self.pubsub_client.activate_subscription(self.subscription_id)
        self.addCleanup(self.pubsub_client.deactivate_subscription, self.subscription_id)

        stream_route = self.pubsub_client.read_stream_route(self.stream_id)
        self.publisher = StandaloneStreamPublisher(stream_id=self.stream_id, stream_route=stream_route )

        self.start_event_listener()

        self.dp_list = self.create_data_processes()

        self.data_modified = Event()
        self.data_modified.wait(5)

        rdt = RecordDictionaryTool(stream_definition_id=self.stream_def_id)
        rdt['time']         = [0] # time should always come first
        rdt['conductivity'] = [1]
        rdt['pressure']     = [2]
        rdt['salinity']     = [8]

        self.publisher.publish(rdt.to_granule())


        self.data_modified.wait(5)

        # Cleanup processes
        for pid in self.loggerpids:
            self.processdispatchclient.cancel_process(pid)
    def _get_new_ctd_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)
        rdt['time'] = numpy.arange(self.i, self.i+length)

        for field in rdt:
            if isinstance(rdt._pdict.get_context(field).param_type, QuantityType):
                rdt[field] = numpy.array([random.uniform(0.0,75.0)  for i in xrange(length)])

        g = rdt.to_granule()
        self.i+=length

        return g
Esempio n. 33
0
    def check_temp_algorithm_execution(self, publish_granule,
                                       granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        output_data = output_rdt_transform['temp']
        input_data = input_rdt_to_transform['temp']

        self.assertTrue(
            numpy.array_equal(((input_data / 10000.0) - 10), output_data))
Esempio n. 34
0
    def check_cond_algorithm_execution(self, publish_granule,
                                       granule_from_transform):

        input_rdt_to_transform = RecordDictionaryTool.load_from_granule(
            publish_granule)
        output_rdt_transform = RecordDictionaryTool.load_from_granule(
            granule_from_transform)

        output_data = output_rdt_transform['conductivity']
        input_data = input_rdt_to_transform['conductivity']

        self.assertTrue(((input_data / 100000.0) -
                         0.5).all() == output_data.all())
Esempio n. 35
0
    def _get_new_ctd_L0_packet(self, stream_definition_id, length):

        rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)
        rdt['time'] = numpy.arange(self.i, self.i+length)

        for field in rdt:
            if isinstance(rdt._pdict.get_context(field).param_type, QuantityType):
                rdt[field] = numpy.array([random.uniform(0.0,75.0)  for i in xrange(length)])

        g = rdt.to_granule()
        self.i+=length

        return g
    def _publish_granules(self, stream_id=None, stream_route=None, values = None,number=None, length=None):

        pub = StandaloneStreamPublisher(stream_id, stream_route)

        stream_def = self.pubsub_management.read_stream_definition(stream_id=stream_id)
        stream_def_id = stream_def._id
        rdt = RecordDictionaryTool(stream_definition_id=stream_def_id)

        for i in xrange(number):
            rdt['input_voltage'] = values
            rdt['preferred_timestamp'] = numpy.array([random.uniform(0,1000)  for l in xrange(length)])
            g = rdt.to_granule()
            pub.publish(g)
    def _trigger_func(self, stream_id):

        self.last_time = 0

        parameter_dictionary = self._create_parameter()

        #@todo - add lots of comments in here
        while not self.finished.is_set():

            length = 10

            #Explicitly make these numpy arrays...
            c = numpy.array(
                [random.uniform(0.0, 75.0) for i in xrange(length)])

            t = numpy.array(
                [random.uniform(-1.7, 21.0) for i in xrange(length)])

            p = numpy.array(
                [random.lognormvariate(1, 2) for i in xrange(length)])

            lat = numpy.array(
                [random.uniform(-90.0, 90.0) for i in xrange(length)])

            lon = numpy.array(
                [random.uniform(0.0, 360.0) for i in xrange(length)])

            tvar = numpy.array(
                [self.last_time + i for i in xrange(1, length + 1)])

            self.last_time = max(tvar)

            rdt = RecordDictionaryTool(param_dictionary=parameter_dictionary)

            rdt['temp'] = t  # ExampleDataProducer_algorithm.execute(t)
            rdt['conductivity'] = c  # ExampleDataProducer_algorithm.execute(c)
            rdt['pressure'] = p  # ExampleDataProducer_algorithm.execute(p)
            rdt['time'] = tvar
            rdt['lat'] = lat
            rdt['lon'] = lon

            log.info("logging published Record Dictionary:\n %s",
                     rdt.pretty_print())

            g = rdt.to_granule()

            log.info('Sending %d values!' % length)
            if (isinstance(g, Granule)):
                self.publish(g, stream_id)

            time.sleep(2.0)
Esempio n. 38
0
    def _trigger_func(self, stream_id):
        log.debug("SimpleCtdDataProducer:_trigger_func ")

        parameter_dictionary = get_param_dict('ctd_parsed_param_dict')
        rdt = RecordDictionaryTool(param_dictionary=parameter_dictionary)

        # The base SimpleCtdPublisher provides a gevent Event that indicates when the process is being
        # shut down. We can use a simple pattern here to accomplish both a safe shutdown of this loop
        # when the process shuts down *AND* do the timeout between loops in a very safe/efficient fashion.
        #
        # By using this instead of a sleep in the loop itself, we can immediatly interrupt this loop when
        # the process is being shut down instead of having to wait for the sleep to terminate.
        while not self.finished.wait(timeout=2):

            length = 10

            #Explicitly make these numpy arrays...
            c = numpy.array(
                [random.uniform(0.0, 75.0) for i in xrange(length)])

            t = numpy.array(
                [random.uniform(-1.7, 21.0) for i in xrange(length)])

            p = numpy.array(
                [random.lognormvariate(1, 2) for i in xrange(length)])

            lat = numpy.array(
                [random.uniform(-90.0, 90.0) for i in xrange(length)])

            lon = numpy.array(
                [random.uniform(0.0, 360.0) for i in xrange(length)])

            h = numpy.array(
                [random.uniform(0.0, 360.0) for i in xrange(length)])

            tvar = numpy.array(
                [self.last_time + i for i in xrange(1, length + 1)])

            self.last_time = max(tvar)

            rdt['time'] = tvar
            rdt['lat'] = lat
            rdt['lon'] = lon
            rdt['temp'] = t
            rdt['conductivity'] = c
            rdt['pressure'] = p

            g = rdt.to_granule()
            log.debug('SimpleCtdDataProducer: Sending %d values!' % length)
            self.publisher.publish(g)
Esempio n. 39
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        pressure = rdt['pressure']
        pres_value = (pressure / 100.0) + 0.5

        for key, value in rdt.iteritems():
            if key in out_rdt:
                out_rdt[key] = value[:]

        out_rdt['pressure'] = pres_value

        return out_rdt.to_granule()
Esempio n. 40
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        pressure = rdt['pressure']
        pres_value = (pressure / 100.0) + 0.5

        for key, value in rdt.iteritems():
            if key in out_rdt:
                out_rdt[key] = value[:]

        out_rdt['pressure'] = pres_value

        return out_rdt.to_granule()
    def execute(input=None, context=None, config=None, params=None, state=None):

        rdt = RecordDictionaryTool.load_from_granule(input)
        out_rdt = RecordDictionaryTool(stream_definition_id=params)

        temperature = rdt['temp']
        temp_value = (temperature / 10000.0) - 10

        for key, value in rdt.iteritems():
            if key in out_rdt:
                out_rdt[key] = value[:]

        out_rdt['temp'] = temp_value

        return out_rdt.to_granule()
    def add_granule(self,stream_id, granule):
        '''
        Appends the granule's data to the coverage and persists it.
        '''
        #--------------------------------------------------------------------------------
        # Coverage determiniation and appending
        #--------------------------------------------------------------------------------
        dataset_id = self.get_dataset(stream_id)
        if not dataset_id:
            log.error('No dataset could be determined on this stream: %s', stream_id)
            return
        coverage = self.get_coverage(stream_id)
        if not coverage:
            log.error('Could not persist coverage from granule, coverage is None')
            return
        #--------------------------------------------------------------------------------
        # Actual persistence
        #-------------------------------------------------------------------------------- 
        rdt = RecordDictionaryTool.load_from_granule(granule)
        elements = len(rdt)
        if not elements:
            return
        coverage.insert_timesteps(elements)
        start_index = coverage.num_timesteps - elements

        for k,v in rdt.iteritems():
            if k == 'image_obj':
                log.trace( '%s:', k)
            else:
                log.trace( '%s: %s', k, v)

            slice_ = slice(start_index, None)
            coverage.set_parameter_values(param_name=k, tdoa=slice_, value=v)
            coverage.flush()
Esempio n. 43
0
    def validate_highcharts_transform_results(self, results):

        assertions = self.assertTrue

        # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases
        if isinstance(results,Granule):
            results =[results]

        for g in results:

            if isinstance(g,Granule):

                rdt = RecordDictionaryTool.load_from_granule(g)
                hc_data_arr = get_safe(rdt, 'hc_data')

                if hc_data_arr == None:
                    log.debug("hc_data in granule is None")
                    continue

                assertions(len(hc_data_arr) >= 0) # Need to come up with a better check

                hc_data = hc_data_arr[0]
                assertions(len(hc_data) >= 0)

                assertions(len(hc_data[0]["name"]) >= 0)
                assertions(len(hc_data[0]["data"]) >= 0)
    def validate_output_granule(self, msg, route, stream_id):
        self.assertIn( stream_id, self._output_stream_ids)

        rdt = RecordDictionaryTool.load_from_granule(msg)
        log.debug('validate_output_granule  rdt: %s', rdt)
        sal_val = rdt['salinity']
        np.testing.assert_array_equal(sal_val, np.array([3]))
Esempio n. 45
0
    def execute(input=None,
                context=None,
                config=None,
                params=None,
                state=None):
        """
        @param input granule
        @retval result_list list of dictionaries containing granules as values
        """

        result_list = []
        for x in input:
            rdt = RecordDictionaryTool.load_from_granule(x)

            conductivity = rdt['conductivity']
            pressure = rdt['pressure']
            temperature = rdt['temperature']
            time = rdt['time']

            # build the granule for conductivity, temperature and pressure
            granule = ctdbp_L0_algorithm._build_granule(
                stream_definition_id=params['L0_stream'],
                field_names=[
                    'conductivity', 'pressure', 'temperature', 'time'
                ],  # these are the field names for the output record dictionary
                values=[conductivity, pressure, temperature, time])

            result_list.append(granule)

        return result_list
Esempio n. 46
0
    def validate_mpl_graphs_transform_results(self, results):

        cc = self.container
        assertions = self.assertTrue

        # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases
        if isinstance(results, Granule):
            results = [results]

        found_data = False
        for g in results:
            if isinstance(g, Granule):
                rdt = RecordDictionaryTool.load_from_granule(g)

                graphs = get_safe(rdt, 'matplotlib_graphs')

                if graphs == None:
                    continue

                for graph in graphs[0]:

                    # At this point only dictionaries containing image data should be passed
                    # For some reason non dictionary values are filtering through.
                    if not isinstance(graph, dict):
                        continue

                    assertions(
                        graph['viz_product_type'] == 'matplotlib_graphs')
                    # check to see if the list (numpy array) contains actual images
                    assertions(
                        imghdr.what(graph['image_name'], h=graph['image_obj'])
                        == 'png')
                    found_data = True
        return found_data
Esempio n. 47
0
    def validate_google_dt_transform_results(self, results):

        cc = self.container
        assertions = self.assertTrue

        # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases
        if isinstance(results,Granule):
            results =[results]

        for g in results:

            if isinstance(g,Granule):

                tx = TaxyTool.load_from_granule(g)
                rdt = RecordDictionaryTool.load_from_granule(g)

                gdt_data = get_safe(rdt, 'google_dt_components')

                # IF this granule does not contains google dt, skip
                if gdt_data == None:
                    continue

                gdt = gdt_data[0]

                assertions(gdt['viz_product_type'] == 'google_dt' )
                assertions(len(gdt['data_description']) >= 0) # Need to come up with a better check
                assertions(len(gdt['data_content']) >= 0)
Esempio n. 48
0
    def recv_packet(self, msg, stream_route, stream_id):
        '''
        The consumer callback to parse and manage the granule.
        The message is ACK'd once the function returns
        '''
        log.trace('received granule for stream %s', stream_id)

        if msg == {}:
            log.error('Received empty message from stream: %s', stream_id)
            return
        # Message validation
        if not isinstance(msg, Granule):
            log.error('Ingestion received a message that is not a granule: %s',
                      msg)
            return

        rdt = RecordDictionaryTool.load_from_granule(msg)
        if rdt is None:
            log.error('Invalid granule (no RDT) for stream %s', stream_id)
            return
        if not len(rdt):
            log.debug('Empty granule for stream %s', stream_id)
            return

        self.persist_or_timeout(stream_id, rdt)
    def execute(input=None, context=None, config=None, params=None, state=None):
        """
        Find if the input data has values, which are out of range

        @param input granule
        @param context parameter context
        @param config DotDict
        @param params list
        @param state
        @return bad_values, bad_value_times tuple of lists
        """

        rdt = RecordDictionaryTool.load_from_granule(input)

        # Retrieve the name used for the variable_name, the name used for timestamps and the range of valid values from the config
        valid_values = config.get_safe('valid_values', [-100,100])
        variable_name = config.get_safe('variable_name', 'input_voltage')
        time_field_name = config.get_safe('time_field_name', 'preferred_timestamp')

        # These variable_names will store the bad values and the timestamps of those values
        bad_values = []
        bad_value_times = []

        # retrieve the values and the times from the record dictionary
        values = rdt[variable_name][:]
        times = rdt[time_field_name][:]

        for val, t in zip(values, times):
            if val < valid_values[0] or val > valid_values[1]:
                bad_values.append(val)
                bad_value_times.append(t)

        # return the list of bad values and their timestamps
        return bad_values, bad_value_times
    def validate_output_granule(self, msg, route, stream_id):
        self.assertIn(stream_id, self._output_stream_ids)

        rdt = RecordDictionaryTool.load_from_granule(msg)
        log.debug('validate_output_granule  rdt: %s', rdt)
        sal_val = rdt['salinity']
        np.testing.assert_array_equal(sal_val, np.array([3]))
    def recv_packet(self, msg, stream_route, stream_id):
        '''
        The consumer callback to parse and manage the granule.
        The message is ACK'd once the function returns
        '''
        log.trace('received granule for stream %s', stream_id)

        if msg == {}:
            log.error('Received empty message from stream: %s', stream_id)
            return
        # Message validation
        if not isinstance(msg, Granule):
            log.error('Ingestion received a message that is not a granule: %s', msg)
            return


        rdt = RecordDictionaryTool.load_from_granule(msg)
        if rdt is None:
            log.error('Invalid granule (no RDT) for stream %s', stream_id)
            return
        if not len(rdt):
            log.debug('Empty granule for stream %s', stream_id)
            return

        self.persist_or_timeout(stream_id, rdt)
Esempio n. 52
0
    def validate_mpl_graphs_transform_results(self, results):

        cc = self.container
        assertions = self.assertTrue

        # if its just one granule, wrap it up in a list so we can use the following for loop for a couple of cases
        if isinstance(results,Granule):
            results =[results]

        found_data = False
        for g in results:
            if isinstance(g,Granule):
                rdt = RecordDictionaryTool.load_from_granule(g)

                graphs = get_safe(rdt, 'matplotlib_graphs')

                if graphs == None:
                    continue

                for graph in graphs[0]:

                    # At this point only dictionaries containing image data should be passed
                    # For some reason non dictionary values are filtering through.
                    if not isinstance(graph, dict):
                        continue

                    assertions(graph['viz_product_type'] == 'matplotlib_graphs' )
                    # check to see if the list (numpy array) contains actual images
                    assertions(imghdr.what(graph['image_name'], h = graph['image_obj']) == 'png')
                    found_data = True
        return found_data
Esempio n. 53
0
    def _execute_transform(self, msg, streams):
        stream_in_id,stream_out_id = streams
        stream_def_in = self.read_stream_def(stream_in_id)
        stream_def_out = self.read_stream_def(stream_out_id)

        rdt_temp = self._merge_rdt(stream_def_in, stream_def_out)
        
        rdt_in = RecordDictionaryTool.load_from_granule(msg)
        for field in rdt_temp.fields:
            if not isinstance(rdt_temp._pdict.get_context(field).param_type, ParameterFunctionType):
                try:
                    rdt_temp[field] = rdt_in[field]
                except KeyError:
                    pass

        rdt_temp.fetch_lookup_values()

        for lookup_field in rdt_temp.lookup_values():
            s = lookup_field
            stored_value = self._get_lookup_value(rdt_temp.context(s).lookup_value)
            if stored_value is not None:
                rdt_temp[s] = stored_value
        
        for field in rdt_temp.fields:
            if isinstance(rdt_temp._pdict.get_context(field).param_type, ParameterFunctionType):
                rdt_temp[field] = rdt_temp[field]

        
        rdt_out = RecordDictionaryTool(stream_definition_id=stream_def_out._id)

        for field in rdt_out.fields:
            rdt_out[field] = rdt_temp[field]
        
        return rdt_out 
Esempio n. 54
0
    def publish_loop(self):

        sine_ampl = 2.0  # Amplitude in both directions
        samples = 60

        startTime = time.time()
        count = samples  #something other than zero

        self.dataset_management = DatasetManagementServiceClient(
            node=self.container.node)

        while not self.finished.is_set():
            count = time.time() - startTime
            sine_curr_deg = (count % samples) * 360 / samples

            c = numpy.array(
                [sine_ampl * math.sin(math.radians(sine_curr_deg))])
            t = numpy.array(
                [sine_ampl * 2 * math.sin(math.radians(sine_curr_deg + 45))])
            p = numpy.array(
                [sine_ampl * 4 * math.sin(math.radians(sine_curr_deg + 60))])

            lat = numpy.array([32.8])
            lon = numpy.array([-119.6])
            # convert time to ntp time. Standard notation in the system
            tvar = numpy.array([ntplib.system_to_ntp_time(time.time())])

            parameter_dictionary = self._create_parameter()
            #parameter_dictionary = self.dataset_management.read_parameter_dictionary_by_name('ctd_parsed_param_dict')

            rdt = RecordDictionaryTool(param_dictionary=parameter_dictionary)

            h = numpy.array([random.uniform(0.0, 360.0)])

            rdt['time'] = tvar
            rdt['lat'] = lat
            rdt['lon'] = lon
            rdt['temp'] = t
            rdt['conductivity'] = c
            rdt['pressure'] = p

            g = rdt.to_granule(data_producer_id=self.id)

            log.info('SinusoidalCtdPublisher sending 1 record!')
            self.publisher.publish(g, self.stream_id)

            time.sleep(1.0)
Esempio n. 55
0
    def _merge_rdt(self, stream_def_in, stream_def_out):
        incoming_pdict_dump = stream_def_in.parameter_dictionary
        outgoing_pdict_dump = stream_def_out.parameter_dictionary

        merged_pdict = self._merge_pdicts(incoming_pdict_dump,
                                          outgoing_pdict_dump)
        rdt_temp = RecordDictionaryTool(param_dictionary=merged_pdict)
        return rdt_temp
Esempio n. 56
0
    def _build_granule(stream_definition_id=None,
                       field_name='',
                       value=None,
                       time=None):
        '''
        @param param_dictionary ParameterDictionary
        @param field_name str
        @param value numpy.array

        @retval Granule
        '''
        root_rdt = RecordDictionaryTool(
            stream_definition_id=stream_definition_id)
        root_rdt[field_name] = value
        root_rdt['time'] = time

        return root_rdt.to_granule()
Esempio n. 57
0
    def _check_application_of_L1_algorithm(self, granule = None):
        """ Check the algorithm applied by the L1 transform """
        rdt = RecordDictionaryTool.load_from_granule(granule)

        list_of_expected_keys = [ 'time', 'pressure', 'conductivity', 'temp']

        for key in list_of_expected_keys:
            self.assertIn(key, rdt)
Esempio n. 58
0
    def _check_application_of_L2_salinity_algorithm(self, granule = None):
        """ Check the algorithm applied by the L2 transform """
        rdt = RecordDictionaryTool.load_from_granule(granule)

        list_of_expected_keys = ['time', 'salinity']

        for key in list_of_expected_keys:
            self.assertIn(key, rdt)