def _trigger_func(self, stream_id):

        #point_def = usgs_stream_definition(stream_id=stream_id)
        point_constructor = PointSupplementConstructor(point_definition=self._usgs_def, stream_id=stream_id)

        damsP = DataAcquisitionManagementServicePlaceholder()
        dsh = damsP.get_data_handler(ds_id=USGS)
        dsh._block_size = 1

        lon = 0
        lon_iter = dsh.acquire_data(var_name='lon')
        for lon_vn, lon_slice_, lon_rng, lon_data in lon_iter:
            lon = lon_data[0]
        lat = 0
        lat_iter = dsh.acquire_data(var_name='lat')
        for lat_vn, lat_slice_, lat_rng, lat_data in lat_iter:
            lat = lat_data[0]

        location = (lon,lat)

        data_iter = dsh.acquire_data(var_name='time')

        index = 0
        for vn, slice_, rng, data in data_iter: #loop through each time step
            point_id = point_constructor.add_point(time=data[0], location=location)

            temp_iter = dsh.acquire_data(var_name=['water_temperature','water_height','streamflow'], slice_=index)
            for temp_vn, temp_slice_, temp_rng, temp_data in temp_iter:
                point_constructor.add_scalar_point_coverage(point_id=point_id, coverage_id=temp_vn, value=temp_data[0])

            ctd_packet = point_constructor.close_stream_granule()

            self.publisher.publish(ctd_packet)

            index += 1
    def execute(self, granule):
        """Processes incoming data!!!!
        """
        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)

        conductivity = psd.get_values('conductivity')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))


        # The L1 conductivity data product algorithm takes the L0 conductivity data product and converts it
        # into Siemens per meter (S/m)
        #    SBE 37IM Output Format 0
        #    1) Standard conversion from 5-character hex string to decimal
        #    2)Scaling
        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(conductivity)):
            scaled_conductivity =  ( conductivity[i] / 100000.0 ) - 0.5
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=scaled_conductivity)

        return psc.close_stream_granule()
Example #3
0
    def make_points(self,definition,stream_id='I am very special', N=100):
        from prototype.sci_data.constructor_apis import PointSupplementConstructor
        import numpy as np
        import random


        definition.stream_resource_id = stream_id


        total = N
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            granule.identifiables[definition.data_stream_id].values = hdf_string
            yield granule
        return
    def execute(self, granule):
        """
        Example process to double the salinity value
        """

        #  pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        salinity = psd.get_values('salinity')

        salinity *= 2.0

        print ('Doubled salinity: %s' % str(salinity))


        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])

        for i in xrange(len(salinity)):
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='salinity', value=salinity[i])

        return psc.close_stream_granule()
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        rdt = RecordDictionaryTool.load_from_granule(granule)
        #todo: use only flat dicts for now, may change later...
#        rdt0 = rdt['coordinates']
#        rdt1 = rdt['data']

        pressure = get_safe(rdt, 'pres') #psd.get_values('conductivity')

        longitude = get_safe(rdt, 'lon') # psd.get_values('longitude')
        latitude = get_safe(rdt, 'lat')  #psd.get_values('latitude')
        time = get_safe(rdt, 'time') # psd.get_values('time')
        height = get_safe(rdt, 'height') # psd.get_values('time')

        log.warn('Got pressure: %s' % str(pressure))


        # L1
        # 1) The algorithm input is the L0 pressure data product (p_hex) and, in the case of the SBE 37IM, the pressure range (P_rng) from metadata.
        # 2) Convert the hexadecimal string to a decimal string
        # 3) For the SBE 37IM only, convert the pressure range (P_rng) from psia to dbar SBE 37IM
        #    Convert P_rng (input from metadata) from psia to dbar
        # 4) Perform scaling operation
        #    SBE 37IM
        #    L1 pressure data product (in dbar):


        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        scaled_pressure = pressure

        for i in xrange(len(pressure)):
            #todo: get pressure range from metadata (if present) and include in calc
            scaled_pressure[i] = ( pressure[i])

        root_rdt = RecordDictionaryTool(taxonomy=self.tx)

        #todo: use only flat dicts for now, may change later...
#        data_rdt = RecordDictionaryTool(taxonomy=self.tx)
#        coord_rdt = RecordDictionaryTool(taxonomy=self.tx)

        root_rdt['pres'] = scaled_pressure
        root_rdt['time'] = time
        root_rdt['lat'] = latitude
        root_rdt['lon'] = longitude
        root_rdt['height'] = height

#        root_rdt['coordinates'] = coord_rdt
#        root_rdt['data'] = data_rdt

        return build_granule(data_producer_id='ctd_L1_pressure', taxonomy=self.tx, record_dictionary=root_rdt)

        return psc.close_stream_granule()
    def make_some_data(self):
        import numpy as np

        stream_id = 'I am very special'
        definition = SBE37_CDM_stream_definition()
        definition.stream_resource_id = stream_id

        self.couch.create(definition)

        total = 200
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            with open(FileSystem.get_hierarchical_url(FS.CACHE, '%s.hdf5' % sha1),'w') as f:
                f.write(hdf_string)
            granule.identifiables[definition.data_stream_id].values = ''
            self.couch.create(granule)
Example #7
0
    def _trigger_func(self, stream_id):

        point_def = ctd_stream_definition(stream_id=stream_id)
        point_constructor = PointSupplementConstructor(point_definition=point_def)

        while True:

            length = 1

            c = [random.uniform(0.0,75.0)  for i in xrange(length)]

            t = [random.uniform(-1.7, 21.0) for i in xrange(length)]

            p = [random.lognormvariate(1,2) for i in xrange(length)]

            lat = [random.uniform(-90.0, 90.0) for i in xrange(length)]

            lon = [random.uniform(0.0, 360.0) for i in xrange(length)]

            tvar = [self.last_time + i for i in xrange(1,length+1)]

            self.last_time = max(tvar)

            point_id = point_constructor.add_point(time=tvar,location=(lon[0],lat[0]))
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='temperature', values=t)
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='pressure', values=p)
            point_constructor.add_point_coverage(point_id=point_id, coverage_id='conductivity', values=c)

            ctd_packet = point_constructor.get_stream_granule()

            log.warn('SimpleCtdPublisher sending %d values!' % length)
            self.publisher.publish(ctd_packet)

            time.sleep(2.0)
    def make_points(self,definition,stream_id='I am very special', N=100):
        from prototype.sci_data.constructor_apis import PointSupplementConstructor
        import numpy as np
        import random


        definition.stream_resource_id = stream_id


        total = N
        n = 10 # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1,n)

            psc = PointSupplementConstructor(point_definition=definition, stream_id=stream_id)
            for x in xrange(r):
                i+=1
                point_id = psc.add_point(time=i, location=(0,0,0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=np.random.normal(loc=48.0,scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[definition.data_stream_id].values
            granule.identifiables[definition.data_stream_id].values = hdf_string
            yield granule
        return
Example #9
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)


        conductivity = psd.get_values('conductivity')
        pressure = psd.get_values('pressure')
        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')



        log.warn('Got conductivity: %s' % str(conductivity))
        log.warn('Got pressure: %s' % str(pressure))
        log.warn('Got temperature: %s' % str(temperature))


        sp = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)

        sa = SA_from_SP(sp, pressure, longitude, latitude)

        density = rho(sa, temperature, pressure)

        log.warn('Got density: %s' % str(density))

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(density)):
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='density', value=density[i])

        return psc.close_stream_granule()
def _create_packet(definition):
    """
    Create a ctd_packet for scientific data
    """

    point_def = definition
    psc = PointSupplementConstructor(point_definition=point_def)

    point_id = psc.add_point(time=1, location=(0,0,0))

    psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=random.normalvariate(mu=8.888, sigma=2.0))
    psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure',value=float(10.0) )
    psc.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=float(0.0))

    ctd_packet = psc.close_stream_granule()
    return ctd_packet
    def _trigger_func(self, stream_id):

        point_def = ctd_stream_definition(stream_id=stream_id)
        point_constructor = PointSupplementConstructor(
            point_definition=point_def)

        while True:

            length = 1

            c = [random.uniform(0.0, 75.0) for i in xrange(length)]

            t = [random.uniform(-1.7, 21.0) for i in xrange(length)]

            p = [random.lognormvariate(1, 2) for i in xrange(length)]

            lat = [random.uniform(-90.0, 90.0) for i in xrange(length)]

            lon = [random.uniform(0.0, 360.0) for i in xrange(length)]

            tvar = [self.last_time + i for i in xrange(1, length + 1)]

            self.last_time = max(tvar)

            point_id = point_constructor.add_point(time=tvar,
                                                   location=(lon[0], lat[0]))
            point_constructor.add_point_coverage(point_id=point_id,
                                                 coverage_id='temperature',
                                                 values=t)
            point_constructor.add_point_coverage(point_id=point_id,
                                                 coverage_id='pressure',
                                                 values=p)
            point_constructor.add_point_coverage(point_id=point_id,
                                                 coverage_id='conductivity',
                                                 values=c)

            ctd_packet = point_constructor.get_stream_granule()

            log.info('SimpleCtdPublisher sending %d values!' % length)
            self.publisher.publish(ctd_packet)

            time.sleep(2.0)
Example #12
0
    def make_some_data(self):
        import numpy as np

        stream_id = 'I am very special'
        definition = SBE37_CDM_stream_definition()
        definition.stream_resource_id = stream_id

        self.couch.create(definition)

        total = 200
        n = 10  # at most n records per granule
        i = 0

        while i < total:
            r = random.randint(1, n)

            psc = PointSupplementConstructor(point_definition=definition,
                                             stream_id=stream_id)
            for x in xrange(r):
                i += 1
                point_id = psc.add_point(time=i, location=(0, 0, 0))
                psc.add_scalar_point_coverage(
                    point_id=point_id,
                    coverage_id='temperature',
                    value=np.random.normal(loc=48.0, scale=4.0, size=1)[0])
                psc.add_scalar_point_coverage(point_id=point_id,
                                              coverage_id='pressure',
                                              value=np.float32(1.0))
                psc.add_scalar_point_coverage(point_id=point_id,
                                              coverage_id='conductivity',
                                              value=np.float32(2.0))
            granule = psc.close_stream_granule()
            hdf_string = granule.identifiables[
                definition.data_stream_id].values
            sha1 = hashlib.sha1(hdf_string).hexdigest().upper()
            with open(
                    FileSystem.get_hierarchical_url(FS.CACHE,
                                                    '%s.hdf5' % sha1),
                    'w') as f:
                f.write(hdf_string)
            granule.identifiables[definition.data_stream_id].values = ''
            self.couch.create(granule)
Example #13
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)


        pressure = psd.get_values('pressure')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got pressure: %s' % str(pressure))


        # L1
        # 1) The algorithm input is the L0 pressure data product (p_hex) and, in the case of the SBE 37IM, the pressure range (P_rng) from metadata.
        # 2) Convert the hexadecimal string to a decimal string
        # 3) For the SBE 37IM only, convert the pressure range (P_rng) from psia to dbar SBE 37IM
        #    Convert P_rng (input from metadata) from psia to dbar
        # 4) Perform scaling operation
        #    SBE 37IM
        #    L1 pressure data product (in dbar):


        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(pressure)):
            #todo: get pressure range from metadata (if present) and include in calc
            scaled_pressure = ( pressure[i])
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=scaled_pressure)

        return psc.close_stream_granule()
    def execute(self, granule):
        """
        Example process to double the salinity value
        """

        #  pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=granule)

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        salinity = psd.get_values('salinity')

        salinity *= 2.0

        print('Doubled salinity: %s' % str(salinity))

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(
            point_definition=self.outgoing_stream_def,
            stream_id=self.streams['output'])

        for i in xrange(len(salinity)):
            point_id = psc.add_point(time=time[i],
                                     location=(longitude[i], latitude[i],
                                               height[i]))
            psc.add_scalar_point_coverage(point_id=point_id,
                                          coverage_id='salinity',
                                          value=salinity[i])

        return psc.close_stream_granule()
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)


        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got temperature: %s' % str(temperature))


        # The L1 temperature data product algorithm takes the L0 temperature data product and converts it into Celcius.
        # Once the hexadecimal string is converted to decimal, only scaling (dividing by a factor and adding an offset) is
        # required to produce the correct decimal representation of the data in Celsius.
        # The scaling function differs by CTD make/model as described below.
        #    SBE 37IM, Output Format 0
        #    1) Standard conversion from 5-character hex string (Thex) to decimal (tdec)
        #    2) Scaling: T [C] = (tdec / 10,000) - 10

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(temperature)):
            scaled_temperature = ( temperature[i] / 10000.0) - 10
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=scaled_temperature)

        return psc.close_stream_granule()
def _create_packet(definition):
    """
    Create a ctd_packet for scientific data
    """

    point_def = definition
    psc = PointSupplementConstructor(point_definition=point_def)

    point_id = psc.add_point(time=1, location=(0, 0, 0))

    psc.add_scalar_point_coverage(point_id=point_id,
                                  coverage_id='temperature',
                                  value=random.normalvariate(mu=8.888,
                                                             sigma=2.0))
    psc.add_scalar_point_coverage(point_id=point_id,
                                  coverage_id='pressure',
                                  value=float(10.0))
    psc.add_scalar_point_coverage(point_id=point_id,
                                  coverage_id='conductivity',
                                  value=float(0.0))

    ctd_packet = psc.close_stream_granule()
    return ctd_packet
Example #17
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=granule)

        conductivity = psd.get_values('conductivity')
        pressure = psd.get_values('pressure')
        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))
        log.warn('Got pressure: %s' % str(pressure))
        log.warn('Got temperature: %s' % str(temperature))

        sp = SP_from_cndr(r=conductivity / cte.C3515,
                          t=temperature,
                          p=pressure)

        sa = SA_from_SP(sp, pressure, longitude, latitude)

        density = rho(sa, temperature, pressure)

        log.warn('Got density: %s' % str(density))

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(
            point_definition=self.outgoing_stream_def,
            stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(density)):
            point_id = psc.add_point(time=time[i],
                                     location=(longitude[i], latitude[i],
                                               height[i]))
            psc.add_scalar_point_coverage(point_id=point_id,
                                          coverage_id='density',
                                          value=density[i])

        return psc.close_stream_granule()
Example #18
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=granule)

        pressure = psd.get_values('pressure')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got pressure: %s' % str(pressure))

        # L1
        # 1) The algorithm input is the L0 pressure data product (p_hex) and, in the case of the SBE 37IM, the pressure range (P_rng) from metadata.
        # 2) Convert the hexadecimal string to a decimal string
        # 3) For the SBE 37IM only, convert the pressure range (P_rng) from psia to dbar SBE 37IM
        #    Convert P_rng (input from metadata) from psia to dbar
        # 4) Perform scaling operation
        #    SBE 37IM
        #    L1 pressure data product (in dbar):

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(
            point_definition=self.outgoing_stream_def,
            stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(pressure)):
            #todo: get pressure range from metadata (if present) and include in calc
            scaled_pressure = (pressure[i])
            point_id = psc.add_point(time=time[i],
                                     location=(longitude[i], latitude[i],
                                               height[i]))
            psc.add_scalar_point_coverage(point_id=point_id,
                                          coverage_id='pressure',
                                          value=scaled_pressure)

        return psc.close_stream_granule()
Example #19
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=granule)

        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got temperature: %s' % str(temperature))

        # The L1 temperature data product algorithm takes the L0 temperature data product and converts it into Celcius.
        # Once the hexadecimal string is converted to decimal, only scaling (dividing by a factor and adding an offset) is
        # required to produce the correct decimal representation of the data in Celsius.
        # The scaling function differs by CTD make/model as described below.
        #    SBE 37IM, Output Format 0
        #    1) Standard conversion from 5-character hex string (Thex) to decimal (tdec)
        #    2) Scaling: T [C] = (tdec / 10,000) - 10

        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(
            point_definition=self.outgoing_stream_def,
            stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(temperature)):
            scaled_temperature = (temperature[i] / 10000.0) - 10
            point_id = psc.add_point(time=time[i],
                                     location=(longitude[i], latitude[i],
                                               height[i]))
            psc.add_scalar_point_coverage(point_id=point_id,
                                          coverage_id='temperature',
                                          value=scaled_temperature)

        return psc.close_stream_granule()
    def execute(self, granule):
        """Processes incoming data!!!!
        """
        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=granule)

        conductivity = psd.get_values('conductivity')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))

        # The L1 conductivity data product algorithm takes the L0 conductivity data product and converts it
        # into Siemens per meter (S/m)
        #    SBE 37IM Output Format 0
        #    1) Standard conversion from 5-character hex string to decimal
        #    2)Scaling
        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(
            point_definition=self.outgoing_stream_def,
            stream_id=self.streams['output'])
        ### Assumes the config argument for output streams is known and there is only one 'output'.
        ### the stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(conductivity)):
            scaled_conductivity = (conductivity[i] / 100000.0) - 0.5
            point_id = psc.add_point(time=time[i],
                                     location=(longitude[i], latitude[i],
                                               height[i]))
            psc.add_scalar_point_coverage(point_id=point_id,
                                          coverage_id='conductivity',
                                          value=scaled_conductivity)

        return psc.close_stream_granule()
    def _trigger_func(self, stream_id):

        #point_def = usgs_stream_definition(stream_id=stream_id)
        point_constructor = PointSupplementConstructor(point_definition=self._usgs_def, stream_id=stream_id)

        damsP = DataAcquisitionManagementServicePlaceholder()
        dsh = damsP.get_data_handler(ds_id=USGS)
        dsh._block_size = 1

        lon = 0
        lon_iter = dsh.acquire_data(var_name='lon')
        for lon_vn, lon_slice_, lon_rng, lon_data in lon_iter:
            lon = lon_data[0]
        lat = 0
        lat_iter = dsh.acquire_data(var_name='lat')
        for lat_vn, lat_slice_, lat_rng, lat_data in lat_iter:
            lat = lat_data[0]

        location = (lon,lat)

        data_iter = dsh.acquire_data(var_name='time')

        index = 0
        for vn, slice_, rng, data in data_iter: #loop through each time step
            point_id = point_constructor.add_point(time=data[0], location=location)

            temp_iter = dsh.acquire_data(var_name=['water_temperature','water_height','streamflow'], slice_=index)
            for temp_vn, temp_slice_, temp_rng, temp_data in temp_iter:
                point_constructor.add_scalar_point_coverage(point_id=point_id, coverage_id=temp_vn, value=temp_data[0])

            ctd_packet = point_constructor.close_stream_granule()

            self.publisher.publish(ctd_packet)

            index += 1

#        time.sleep(2.0)
Example #22
0
    def execute(self, granule):
        """Processes incoming data!!!!
        """

        # Use the deconstructor to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=granule)


        conductivity = psd.get_values('conductivity')
        pressure = psd.get_values('pressure')
        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')



        log.warn('Got conductivity: %s' % str(conductivity))
        log.warn('Got pressure: %s' % str(pressure))
        log.warn('Got temperature: %s' % str(temperature))


        salinity = SP_from_cndr(r=conductivity/cte.C3515, t=temperature, p=pressure)

        log.warn('Got salinity: %s' % str(salinity))


        # Use the constructor to put data into a granule
        psc = PointSupplementConstructor(point_definition=self.outgoing_stream_def, stream_id=self.streams['output'])

        for i in xrange(len(salinity)):
            point_id = psc.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc.add_scalar_point_coverage(point_id=point_id, coverage_id='salinity', value=salinity[i])

        return psc.close_stream_granule()
Example #23
0
    def process(self, packet):
        """Processes incoming data!!!!
        """

        # Use the PointSupplementStreamParser to pull data from a granule
        psd = PointSupplementStreamParser(
            stream_definition=self.incoming_stream_def, stream_granule=packet)

        conductivity = psd.get_values('conductivity')
        pressure = psd.get_values('pressure')
        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))
        log.warn('Got pressure: %s' % str(pressure))
        log.warn('Got temperature: %s' % str(temperature))

        # do L0 scaling here.....

        # Use the constructor to put data into a granule

        psc_conductivity = PointSupplementConstructor(
            point_definition=self.outgoing_stream_conductivity,
            stream_id=self.streams['conductivity'])

        psc_pressure = PointSupplementConstructor(
            point_definition=self.outgoing_stream_pressure,
            stream_id=self.streams['pressure'])

        psc_temperature = PointSupplementConstructor(
            point_definition=self.outgoing_stream_temperature,
            stream_id=self.streams['temperature'])

        ### The stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(conductivity)):
            point_id = psc_conductivity.add_point(time=time[i],
                                                  location=(longitude[i],
                                                            latitude[i],
                                                            height[i]))
            psc_conductivity.add_scalar_point_coverage(
                point_id=point_id,
                coverage_id='conductivity',
                value=conductivity[i])
        self.conductivity.publish(psc_conductivity.close_stream_granule())

        for i in xrange(len(pressure)):
            point_id = psc_pressure.add_point(time=time[i],
                                              location=(longitude[i],
                                                        latitude[i],
                                                        height[i]))
            psc_pressure.add_scalar_point_coverage(point_id=point_id,
                                                   coverage_id='pressure',
                                                   value=pressure[i])
        self.pressure.publish(psc_pressure.close_stream_granule())

        for i in xrange(len(temperature)):
            point_id = psc_temperature.add_point(time=time[i],
                                                 location=(longitude[i],
                                                           latitude[i],
                                                           height[i]))
            psc_temperature.add_scalar_point_coverage(
                point_id=point_id,
                coverage_id='temperature',
                value=temperature[i])
        self.temperature.publish(psc_temperature.close_stream_granule())

        return
Example #24
0
def ctd_stream_packet(stream_id = None, c=None, t=None, p=None , lat=None, lon=None, height=None, time=None, create_hdf=True):
    """
    ###
    ### This method is deprecated!
    ###


    This is a simple interface for creating a packet of ctd data for a given stream defined by the method above.
    The string names of content are tightly coupled to the method above.
    To send actual data you must have hdf5, numpy and h5py installed.

    @brief build a demo ctd data packet as an ion object. All values arguments are optional, but any argument provided
    should have the same length.
    
    @param stream_id should be the same as the stream_id for the definition - the stream resource ID
    @param c is a list, tuple or ndarray of conductivity values
    @param t is a list, tuple or ndarray of temperature values
    @param p is a list, tuple or ndarray of presure values
    @param lat is a list, tuple or ndarray of latitude values
    @param lon is a list, tuple or ndarray of longitude values
    @param time is a list, tuple or ndarray of time values

    """

    stream_def = ctd_stream_definition(stream_id=stream_id)

    psc = PointSupplementConstructor(point_definition=stream_def, stream_id=stream_id)


    assert time
    assert lat
    assert lon

    def listify(input):
        if hasattr(input, '__iter__'):
            return input
        else:
            return [input,]

    length = False


    if c is not None:
        c = listify(c)
        if length:
            assert length == len(c), 'Conductivity input is the wrong length'
        else:
            length = len(c)

    if t is not None:
        t = listify(t)
        if length:
            assert length == len(t), 'Temperature input is the wrong length'
        else:
            length = len(t)

    if p is not None:
        p = listify(p)
        if length:
            assert length == len(p), 'Pressure input is the wrong length'
        else:
            length = len(p)

    if lat is not None:
        lat = listify(lat)
        if length:
            if 1 == len(lat):
                lat = lat*length
        else:
            length = len(lat)
    else:
        raise RuntimeError('Did not specify longitude')

    if lon is not None:
        lon = listify(lon)
        if length:
            if 1 == len(lon):
                lon = lon*length
        else:
            length = len(lon)
    else:
        raise RuntimeError('Did not specify longitude')

    if height is not None:
        height = listify(height)
        if length:
            if 1 == len(height):
                height = height*length
        else:
            length = len(height)
    else:
        height = [0,]*length

    if time is not None:
        time = listify(time)
        if length:
            if 1 == len(time):
                time = time*length
        else:
            length = len(time)
    else:
        raise RuntimeError('Did not specify time')


    for idx, time_val in enumerate(time):

        p_id = psc.add_point(time=time_val, location=(lon[idx], lat[idx], height[idx]))

        #putting the if inside the loop is slow - but this is a test method only!
        if t:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='temperature', value=t[idx])
        if p:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='pressure', value=p[idx])
        if c:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='conductivity', value=c[idx])



    granule = psc.close_stream_granule()

    if not create_hdf:
        # Remove the hdf string from the granule

        data_stream = granule.identifiables['data_stream']
        data_stream.values = ''


    return granule
Example #25
0
    def process(self, packet):

        """Processes incoming data!!!!
        """

        # Use the PointSupplementStreamParser to pull data from a granule
        psd = PointSupplementStreamParser(stream_definition=self.incoming_stream_def, stream_granule=packet)


        conductivity = psd.get_values('conductivity')
        pressure = psd.get_values('pressure')
        temperature = psd.get_values('temperature')

        longitude = psd.get_values('longitude')
        latitude = psd.get_values('latitude')
        height = psd.get_values('height')
        time = psd.get_values('time')

        log.warn('Got conductivity: %s' % str(conductivity))
        log.warn('Got pressure: %s' % str(pressure))
        log.warn('Got temperature: %s' % str(temperature))

    

        # do L0 scaling here.....


        # Use the constructor to put data into a granule

        psc_conductivity = PointSupplementConstructor(point_definition=self.outgoing_stream_conductivity, stream_id=self.streams['conductivity'])

        psc_pressure = PointSupplementConstructor(point_definition=self.outgoing_stream_pressure, stream_id=self.streams['pressure'])

        psc_temperature = PointSupplementConstructor(point_definition=self.outgoing_stream_temperature, stream_id=self.streams['temperature'])

        ### The stream id is part of the metadata which much go in each stream granule - this is awkward to do at the
        ### application level like this!

        for i in xrange(len(conductivity)):
            point_id = psc_conductivity.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc_conductivity.add_scalar_point_coverage(point_id=point_id, coverage_id='conductivity', value=conductivity[i])
        self.conductivity.publish(psc_conductivity.close_stream_granule())

        for i in xrange(len(pressure)):
            point_id = psc_pressure.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc_pressure.add_scalar_point_coverage(point_id=point_id, coverage_id='pressure', value=pressure[i])
        self.pressure.publish(psc_pressure.close_stream_granule())

        for i in xrange(len(temperature)):
            point_id = psc_temperature.add_point(time=time[i],location=(longitude[i],latitude[i],height[i]))
            psc_temperature.add_scalar_point_coverage(point_id=point_id, coverage_id='temperature', value=temperature[i])
        self.temperature.publish(psc_temperature.close_stream_granule())

        return
Example #26
0
    def _merge(self, msgs):
        '''
        @brief Merges all the granules and datasets into one large dataset (Union)
        @param msgs raw granules from couch
        @return complete dataset
        @description
             n
        D := U [ msgs_i ]
            i=0
        '''
        granule = None
        file_list = list()
        count = len(msgs)
        used_vals = list()

        #-------------------------------------------------------------------------------------
        # Merge each granule to another granule one by one.
        # After each merge operation keep track of what files belong where on the timeline
        #-------------------------------------------------------------------------------------

        for i in xrange(count):
            if i == 0:
                granule = msgs[0]['granule']
                psc = PointSupplementConstructor(
                    point_definition=self.definition)

                res = ReplayProcess.merge_granule(definition=self.definition,
                                                  granule1=granule,
                                                  granule2=None)
                granule = res['granule']
                file_pair = res['files']
                log.debug('file_pair: %s', file_pair)

                if file_pair[0] not in file_list and file_pair[0][
                        0] not in used_vals:
                    file_list.append(tuple(file_pair[0]))
                    used_vals.append(file_pair[0][0])

            else:
                res = ReplayProcess.merge_granule(definition=self.definition,
                                                  granule1=granule,
                                                  granule2=msgs[i]['granule'])

                granule = res['granule']
                file_pair = res['files']
                log.debug('file_pair: %s', file_pair)

                if file_pair[0] not in file_list and file_pair[0][
                        0] not in used_vals:
                    file_list.append(tuple(file_pair[0]))
                    used_vals.append(file_pair[0][0])
                if file_pair[1] not in file_list and file_pair[1][
                        0] not in used_vals:
                    file_list.append(tuple(file_pair[1]))
                    used_vals.append(file_pair[1][0])

        if not granule:
            return
        log.debug('file_list: %s', file_list)
        #-------------------------------------------------------------------------------------
        # Order the lists using a stable sort from python (by the first value in the tuples
        # Then peel off just the file names
        # Then get the appropriate URL for the file using FileSystem
        #-------------------------------------------------------------------------------------
        file_list.sort()
        file_list = list(i[1] for i in file_list)
        file_list = list([
            FileSystem.get_hierarchical_url(FS.CACHE, '%s' % i)
            for i in file_list
        ])

        pairs = self._pair_up(granule)
        var_names = list([i[0] for i in pairs])

        record_count = granule.identifiables[self.element_count_id].value
        codec = HDFEncoder()
        log.debug('acquire_data:')
        log.debug('\tfile_list: %s', file_list)
        log.debug('\tfields: %s', var_names)
        log.debug('\trecords: %s', record_count)

        data = acquire_data(file_list, var_names, record_count).next()

        for row, value in data.iteritems():
            value_path = self._find_vp(pairs, row)
            codec.add_hdf_dataset(value_path, nparray=value['values'])
            #-------------------------------------------------------------------------------------
            # Debugging
            #-------------------------------------------------------------------------------------
            log.debug('row: %s', row)
            log.debug('value path: %s', value_path)
            log.debug('value: %s', value['values'])

        hdf_string = codec.encoder_close()
        self._patch_granule(granule, hdf_string)
        return granule
Example #27
0
def ctd_stream_packet(stream_id = None, c=None, t=None, p=None , lat=None, lon=None, height=None, time=None, create_hdf=True):
    """
    ###
    ### This method is deprecated!
    ###


    This is a simple interface for creating a packet of ctd data for a given stream defined by the method above.
    The string names of content are tightly coupled to the method above.
    To send actual data you must have hdf5, numpy and h5py installed.

    @brief build a demo ctd data packet as an ion object. All values arguments are optional, but any argument provided
    should have the same length.
    
    @param stream_id should be the same as the stream_id for the definition - the stream resource ID
    @param c is a list, tuple or ndarray of conductivity values
    @param t is a list, tuple or ndarray of temperature values
    @param p is a list, tuple or ndarray of presure values
    @param lat is a list, tuple or ndarray of latitude values
    @param lon is a list, tuple or ndarray of longitude values
    @param time is a list, tuple or ndarray of time values

    """

    stream_def = ctd_stream_definition(stream_id=stream_id)

    psc = PointSupplementConstructor(point_definition=stream_def, stream_id=stream_id)


    assert time
    assert lat
    assert lon

    def listify(input):
        if hasattr(input, '__iter__'):
            return input
        else:
            return [input,]

    length = False


    if c is not None:
        c = listify(c)
        if length:
            assert length == len(c), 'Conductivity input is the wrong length'
        else:
            length = len(c)

    if t is not None:
        t = listify(t)
        if length:
            assert length == len(t), 'Temperature input is the wrong length'
        else:
            length = len(t)

    if p is not None:
        p = listify(p)
        if length:
            assert length == len(p), 'Pressure input is the wrong length'
        else:
            length = len(p)

    if lat is not None:
        lat = listify(lat)
        if length:
            if 1 == len(lat):
                lat = lat*length
        else:
            length = len(lat)
    else:
        raise RuntimeError('Did not specify longitude')

    if lon is not None:
        lon = listify(lon)
        if length:
            if 1 == len(lon):
                lon = lon*length
        else:
            length = len(lon)
    else:
        raise RuntimeError('Did not specify longitude')

    if height is not None:
        height = listify(height)
        if length:
            if 1 == len(height):
                height = height*length
        else:
            length = len(height)
    else:
        height = [0,]*length

    if time is not None:
        time = listify(time)
        if length:
            if 1 == len(time):
                time = time*length
        else:
            length = len(time)
    else:
        raise RuntimeError('Did not specify time')


    for idx, time_val in enumerate(time):

        p_id = psc.add_point(time=time_val, location=(lon[idx], lat[idx], height[idx]))

        #putting the if inside the loop is slow - but this is a test method only!
        if t:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='temperature', value=t[idx])
        if p:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='pressure', value=p[idx])
        if c:
            psc.add_scalar_point_coverage(point_id=p_id, coverage_id='conductivity', value=c[idx])



    granule = psc.close_stream_granule()

    if not create_hdf:
        # Remove the hdf string from the granule

        data_stream = granule.identifiables['data_stream']
        data_stream.values = ''


    return granule