def test_exclude_data(self):
        ctd_ds = xr.open_dataset(os.path.join(DATA_DIR, self.ctdpf_fn), decode_times=False)
        ctd_ds = ctd_ds[['obs', 'time', 'deployment', 'temperature', 'pressure',
                         'pressure_temp', 'conductivity', 'ext_volt0']]

        times = ctd_ds.time.values

        ctd_stream_dataset = StreamDataset(self.ctdpf_sk, {}, [], 'UNIT')
        ctd_stream_dataset.events = self.ctd_events
        ctd_stream_dataset._insert_dataset(ctd_ds)

        ctd_stream_dataset.exclude_flagged_data()
        np.testing.assert_array_equal(times, ctd_stream_dataset.datasets[2].time.values)

        # exclude a bit
        start = ntplib.ntp_to_system_time(times[0]) * 1000
        stop = ntplib.ntp_to_system_time(times[100]) * 1000
        anno = self._create_exclusion_anno(start, stop)
        ctd_stream_dataset.annotation_store.add_annotations([anno])

        ctd_stream_dataset.exclude_flagged_data()
        np.testing.assert_array_equal(times[101:], ctd_stream_dataset.datasets[2].time.values)

        # exclude everything
        start = ntplib.ntp_to_system_time(times[0]) * 1000
        stop = ntplib.ntp_to_system_time(times[-1]) * 1000
        anno = self._create_exclusion_anno(start, stop)
        ctd_stream_dataset.annotation_store.add_annotations([anno])

        ctd_stream_dataset.exclude_flagged_data()
        self.assertNotIn(2, ctd_stream_dataset.datasets)
Beispiel #2
0
    def test_exclude_data(self):
        ctd_ds = xr.open_dataset(os.path.join(DATA_DIR, self.ctdpf_fn), decode_times=False)
        ctd_ds = ctd_ds[['obs', 'time', 'deployment', 'temperature', 'pressure',
                         'pressure_temp', 'conductivity', 'ext_volt0']]

        times = ctd_ds.time.values
        store = AnnotationStore()

        ctd_stream_dataset = StreamDataset(self.ctdpf_sk, {}, [], 'UNIT')
        ctd_stream_dataset.events = self.ctd_events
        ctd_stream_dataset._insert_dataset(ctd_ds)
        
        ctd_stream_dataset.exclude_flagged_data(store)
        np.testing.assert_array_equal(times, ctd_stream_dataset.datasets[2].time.values)

        # exclude a bit
        start = ntplib.ntp_to_system_time(times[0]) * 1000
        stop = ntplib.ntp_to_system_time(times[100]) * 1000
        anno = self._create_exclusion_anno(self.ctdpf_sk, start, stop)
        store.add_annotations([anno])

        ctd_stream_dataset.exclude_flagged_data(store)
        np.testing.assert_array_equal(times[101:], ctd_stream_dataset.datasets[2].time.values)

        # exclude everything
        start = ntplib.ntp_to_system_time(times[0]) * 1000
        stop = ntplib.ntp_to_system_time(times[-1]) * 1000
        anno = self._create_exclusion_anno(self.ctdpf_sk, start, stop)
        store.add_annotations([anno])

        ctd_stream_dataset.exclude_flagged_data(store)
        self.assertNotIn(2, ctd_stream_dataset.datasets)
Beispiel #3
0
 def as_millis(self):
     """
     Return the start/stop times in milliseconds since 1-1-1970
     :return: (start, stop)
     """
     return int(ntplib.ntp_to_system_time(self.start) * 1000), int(
         ntplib.ntp_to_system_time(self.stop) * 1000)
Beispiel #4
0
    def __restore_ntp_pck(self, ntp: NTP) -> NTP:
        self.log.debug('Send timestamp for reconstruction: ' + str(ntp.sent))
        sent_time_stamp = datetime.fromtimestamp(
            ntplib.ntp_to_system_time(ntp.sent))
        sent_time_stamp = sent_time_stamp.replace(year=datetime.now().year)
        sent_time_stamp_as_ntp = ntplib.system_to_ntp_time(
            sent_time_stamp.timestamp())
        ntp.sent = sent_time_stamp_as_ntp
        self.log.debug('Send timestamp after reconstruction: ' + str(ntp.sent))
        pck = CP3Package(ntp)

        if NTPMode.from_bit_string(pck.mode()) is NTPMode.CLIENT:
            self.log.debug("Restored in Client mode")
            ntp.ref = 0
            ntp.orig = 0
            ntp.recv = 0
        if NTPMode.from_bit_string(pck.mode()) is NTPMode.SERVER \
                or NTPMode.from_bit_string(pck.mode()) is NTPMode.BROADCAST_SERVER:
            self.log.debug("Restored in Server mode")
            origin_last_32 = pck.origin_timestamp()[32:64]
            received_last_32 = pck.receive_timestamp()[32:64]
            transmit_first_32 = pck.origin_timestamp()[0:32]

            pck.set_origin_timestamp(transmit_first_32 + origin_last_32)
            pck.set_receive_timestamp(transmit_first_32 + received_last_32)
            ntp = pck.ntp()
        self.log.debug("Reconstruction complete.")
        #ntp.show()
        return ntp
    def test_instrument_autosample_samples(self):
        """
        @brief Test for putting instrument in 'auto-sample' state and receiving samples
        """
        self.assert_initialize_driver()

        # command the instrument to auto-sample mode.
        self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.START_AUTOSAMPLE)

        self.assert_current_state(ProtocolStates.AUTOSAMPLE)
           
        # wait for some samples to be generated
        log.debug('test_instrument_start_stop_autosample: waiting 5 seconds for samples')
        gevent.sleep(5)

        # Verify we received at least 2 samples.
        sample_events = [evt for evt in self.events if evt['type']==DriverAsyncEvent.SAMPLE]
        log.debug('test_instrument_start_stop_autosample: # 0f samples = %d' %len(sample_events))
        for sample in sample_events:
            if sample['value'].find(DataParticleType.SAMPLE) != -1:
                log.debug('parsed sample=%s\n' %sample)
                sample_dict = eval(sample['value'])     # turn string into dictionary
                values = sample_dict['values']          # get particle dictionary
                # pull timestamp out of particle
                ntp_timestamp = [item for item in values if item["value_id"] == "timestamp"][0]['value']
                float_timestamp = ntplib.ntp_to_system_time(ntp_timestamp)
                log.debug('dt=%s' %time.ctime(float_timestamp))
        self.assertTrue(len(sample_events) >= 2)

        # stop autosample and return to command mode
        self.driver_client.cmd_dvr('execute_resource', ProtocolEvent.STOP_AUTOSAMPLE)
                
        self.assert_current_state(ProtocolStates.COMMAND)
Beispiel #6
0
def ntp_to_ISO_date(ntp_time):
    try:
        ntp_time = float(ntp_time)
        dt = datetime.datetime.utcfromtimestamp(ntplib.ntp_to_system_time(ntp_time))
        return dt.isoformat()
    except Exception as e:
        return str(ntp_time)
Beispiel #7
0
    def test_restore_pck_server_pck_other_timestamps_filled_with_send(self):
        # Arrange
        client = CP3Handler('')
        pck = CP3Package()
        first_32 = '11111111111111111111000000011111'
        origin_last_32 = '11111111111110011110111111111111'
        received_last_32 = '11111111110111011110111111111111'
        transmit = '1111111111111111111111111111111111111111111111111111111111111111'
        pck.set_origin_timestamp(first_32 + origin_last_32)
        pck.set_receive_timestamp(first_32 + received_last_32)
        pck.set_transmit_timestamp(transmit)
        pck.set_mode(NTPMode.to_bit_string(NTPMode.SERVER))

        # Act
        result_pck = RawNTP(client.restore_pck(pck))

        # Assert
        self.assertNotEqual(result_pck.transmit_timestamp(), transmit)
        self.assertEqual(result_pck.origin_timestamp()[32:64], origin_last_32)
        self.assertEqual(result_pck.receive_timestamp()[32:64],
                         received_last_32)
        self.assertEqual(result_pck.origin_timestamp()[0:32],
                         result_pck.origin_timestamp()[0:32])
        self.assertEqual(result_pck.receive_timestamp()[0:32],
                         result_pck.origin_timestamp()[0:32])
        self.assertEqual(
            datetime.fromtimestamp(
                ntplib.ntp_to_system_time(result_pck.ntp().sent)).year,
            datetime.now().year)
Beispiel #8
0
 def _set_cp3_mode(self,year:int):
     ntp = self.ntp()
     time = ntplib.system_to_ntp_time(datetime.fromtimestamp(ntplib.ntp_to_system_time(ntp.sent))
                               .replace(year=year).timestamp())
     ntp.sent = time
     raw = RawNTP(ntp)
     self.set_transmit_timestamp(raw.transmit_timestamp())
Beispiel #9
0
def ntp_2_ion_ts(ntp_time):
    """
    Converts an NTP time into ION system timestamp.
    The converse operation is ion_ts_2_ntp(ion_ts).

    Note: this should probably be a utility provided by pyon.

    @see https://jira.oceanobservatories.org/tasks/browse/OOIION-631

    @param ntp_time
            float representing an NTP time.

    @retval str representing an integer number, the millis in UNIX epoch,
            corresponding to the given NTP time. This is calculated as
            str( int(round(ntplib.ntp_to_system_time(ntp_time) * 1000)) )
    """

    # convert to system time:
    sys_time = ntplib.ntp_to_system_time(ntp_time)

    # convert to milliseconds rounding to the nearest integer:
    sys_time = int(round(sys_time * 1000))

    # return as str
    return str(sys_time)
Beispiel #10
0
def ntp_2_ion_ts(ntp_time):
    """
    Converts an NTP time into ION system timestamp.
    The converse operation is ion_ts_2_ntp(ion_ts).

    Note: this should probably be a utility provided by pyon.

    @see https://jira.oceanobservatories.org/tasks/browse/OOIION-631

    @param ntp_time
            float representing an NTP time.

    @retval str representing an integer number, the millis in UNIX epoch,
            corresponding to the given NTP time. This is calculated as
            str( int(round(ntplib.ntp_to_system_time(ntp_time) * 1000)) )
    """

    # convert to system time:
    sys_time = ntplib.ntp_to_system_time(ntp_time)

    # convert to milliseconds rounding to the nearest integer:
    sys_time = int(round(sys_time * 1000))

    # return as str
    return str(sys_time)
Beispiel #11
0
def ntp_to_datetime(ntp_time):
    try:
        ntp_time = float(ntp_time)
        unix_time = ntplib.ntp_to_system_time(ntp_time)
        dt = datetime.datetime.utcfromtimestamp(unix_time)
        return dt
    except (ValueError, TypeError):
        return None
Beispiel #12
0
def ntp_to_datetime(ntp_time):
    try:
        ntp_time = float(ntp_time)
        unix_time = ntplib.ntp_to_system_time(ntp_time)
        dt = datetime.datetime.utcfromtimestamp(unix_time)
        return dt
    except (ValueError, TypeError):
        return None
Beispiel #13
0
    def _last_sync(self) -> float:
        """Get last sync time in NTP epoch

        Returns
        -------
        The amplifer time of last sync in system time
        """

        return ntp_to_system_time(self._mstime)
Beispiel #14
0
    def test_restore_pck_client_pck_with_changed_year(self):
        # Arrange
        client = CP3Handler('')
        pck = init_ntp_pck()
        pck.sent = ntplib.system_to_ntp_time(
            datetime.fromtimestamp(ntplib.ntp_to_system_time(
                pck.sent)).replace(year=2006).timestamp())

        # Act
        result_pck = client.restore_pck(CP3Package(ntp_pck=pck))

        # Assert
        self.assertEqual(result_pck.ref, 0)
        self.assertEqual(result_pck.recv, 0)
        self.assertEqual(result_pck.orig, 0)
        self.assertEqual(
            datetime.fromtimestamp(ntplib.ntp_to_system_time(
                result_pck.sent)).year,
            datetime.now().year)
Beispiel #15
0
def ntp_to_string(timestamp, time_format=DATE_FORMAT):
    """
    takes an NTP timestamp (seconds since 1900/1/1) and outputs in provided format
    :param timestamp: ntp timestamp
    :param time_format: datetime compatible time string format
    :return: 
    """
    unix_time = ntplib.ntp_to_system_time(timestamp)
    dt = datetime.datetime.utcfromtimestamp(unix_time)
    return dt.strftime(time_format)
Beispiel #16
0
def ntp_to_string(timestamp, time_format=DATE_FORMAT):
    """
    takes an NTP timestamp (seconds since 1900/1/1) and outputs in provided format
    :param timestamp: ntp timestamp
    :param time_format: datetime compatible time string format
    :return: 
    """
    unix_time = ntplib.ntp_to_system_time(timestamp)
    dt = datetime.datetime.utcfromtimestamp(unix_time)
    return dt.strftime(time_format)
    def test_set_cp3_mode_2_correct_datetime_set(self):
        # Arrange
        pck = CP3Package(ntp_pck=init_ntp_pck())

        # Act
        pck.set_cp3_mode_2()

        # Assert
        time = datetime.fromtimestamp(ntplib.ntp_to_system_time(pck.ntp().sent)).year
        self.assertEqual(time, 2000)
Beispiel #18
0
    def resync(self) -> float:
        """Re-synchronize with the amplifier

        Returns
        -------
        The current NTP epoch time
        """
        t = (datetime.now(timezone.utc) - unix_epoch).total_seconds()
        data = system_to_ntp_time(t)
        self._mstime = self._command('NTPReturnClock', data)
        # TODO: remove this debug info
        print('Sent local time: ' + format_time(t))
        amptime = ntp_to_system_time(self._mstime)
        print('Received amp time: ' + format_time(amptime))
    def test_helpers(self):
        """Helper methods tests."""
        client = ntplib.NTPClient()

        time.sleep(self.POLL_DELAY)
        info = client.request(self.NTP_SERVER)

        self.assertEqual(int(info.tx_time), ntplib.ntp_to_system_time(
                         ntplib.system_to_ntp_time(int(info.tx_time))))

        self.assertTrue(isinstance(ntplib.leap_to_text(info.leap), str))
        self.assertTrue(isinstance(ntplib.mode_to_text(info.mode), str))
        self.assertTrue(isinstance(ntplib.stratum_to_text(info.stratum), str))
        self.assertTrue(isinstance(ntplib.ref_id_to_text(info.ref_id,
                                                         info.stratum), str))
Beispiel #20
0
def flatten(particle):
    for each in particle.get('values', []):
        particle[each['value_id']] = each['value']
    del (particle['values'])
    del (particle['quality_flag'])
    del (particle['stream_name'])
    del (particle['pkt_format_id'])
    del (particle['pkt_version'])
    particle['time'] = particle.get(particle.get('preferred_timestamp', {}))
    particle['ingestion_timestamp'] = ntplib.system_to_ntp_time(time.time())
    ts = ntplib.ntp_to_system_time(particle['time'])
    ts = time.gmtime(ts)
    particle['refdesig'] = 'TEST'
    particle['year'] = ts.tm_year
    particle['jday'] = ts.tm_yday
Beispiel #21
0
def flatten(particle):
    for each in particle.get('values', []):
        particle[each['value_id']] = each['value']
    del(particle['values'])
    del(particle['quality_flag'])
    del(particle['stream_name'])
    del(particle['pkt_format_id'])
    del(particle['pkt_version'])
    particle['time'] = particle.get(particle.get('preferred_timestamp', {}))
    particle['ingestion_timestamp'] = ntplib.system_to_ntp_time(time.time())
    ts = ntplib.ntp_to_system_time(particle['time'])
    ts = time.gmtime(ts)
    particle['refdesig'] = 'TEST'
    particle['year'] = ts.tm_year
    particle['jday'] = ts.tm_yday
Beispiel #22
0
    def test_helpers(self):
        """Helper methods tests."""
        client = ntplib.NTPClient()

        time.sleep(self.POLL_DELAY)
        info = client.request(self.NTP_SERVER)

        self.assertEqual(
            int(info.tx_time),
            ntplib.ntp_to_system_time(
                ntplib.system_to_ntp_time(int(info.tx_time))))

        self.assertTrue(isinstance(ntplib.leap_to_text(info.leap), str))
        self.assertTrue(isinstance(ntplib.mode_to_text(info.mode), str))
        self.assertTrue(isinstance(ntplib.stratum_to_text(info.stratum), str))
        self.assertTrue(
            isinstance(ntplib.ref_id_to_text(info.ref_id, info.stratum), str))
Beispiel #23
0
    def execute(input=None,
                context=None,
                config=None,
                params=None,
                state=None):

        stream_definition_id = params

        #init stuff
        var_tuple = []
        data_description = []
        data_table_content = []
        gdt_allowed_numerical_types = [
            'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
            'uint64', 'float32', 'float64', 'str'
        ]

        rdt = RecordDictionaryTool.load_from_granule(input)
        data_description = []

        if stream_definition_id == None:
            log.error(
                "GoogleDT transform: Need a output stream definition to process graphs"
            )
            return None

        fields = []
        if config and config['parameters']:
            fields = config['parameters']
        else:
            fields = rdt.fields

        # if time was null or missing, do not process
        if 'time' not in rdt: return None
        if rdt['time'] is None:
            return None

        time_fill_value = 0.0  # should be derived from the granule's param dict.
        data_description.append(('time', 'number', 'time'))

        for field in fields:
            if field == 'time':
                continue

            # If a config block was passed, consider only the params listed in it
            if config and config['parameters']:
                if not field in config['parameters']:
                    continue

            # only consider fields which are supposed to be numbers.
            if (rdt[field] != None) and (rdt[field].dtype
                                         not in gdt_allowed_numerical_types):
                continue

            data_description.append((field, 'number', field))

        for i in xrange(len(rdt)):
            varTuple = []

            # Put time first if its not zero. Retrieval returns 0 secs for malformed entries
            if rdt['time'][i] == time_fill_value:
                continue
            # convert timestamp from instrument to UNIX time stamp since thats what Google DT expects
            varTuple.append(ntplib.ntp_to_system_time(rdt['time'][i]))

            for dd in data_description:
                field = dd[0]
                # ignore time since its been already added
                if field == None or field == 'time':
                    continue

                if rdt[field] == None or rdt[field][i] == None:
                    varTuple.append(0.0)
                else:
                    varTuple.append(rdt[field][i])

            # Append the tuples to the data table
            if len(varTuple) > 0:
                data_table_content.append(varTuple)

        out_rdt = RecordDictionaryTool(
            stream_definition_id=stream_definition_id)

        # Prepare granule content
        out_dict = {
            "viz_product_type": "google_dt",
            "data_description": data_description,
            "data_content": data_table_content
        }

        out_rdt["google_dt_components"] = np.array([out_dict])

        log.debug('Google DT transform: Sending a granule')

        out_granule = out_rdt.to_granule()

        return out_granule
    def _process_visualization_message(self, messages):

        gdt_description = None
        gdt_content = []
        viz_product_type = ''

        for message in messages:

            if message == None:
                continue

            message_data = message.body

            if isinstance(message_data,Granule):

                rdt = RecordDictionaryTool.load_from_granule(message_data)
                gdt_components = get_safe(rdt, 'google_dt_components')

                # IF this granule does not contain google dt, skip
                if gdt_components is None:
                    continue

                gdt_component = gdt_components[0]
                viz_product_type = gdt_component['viz_product_type']

                # Process Google DataTable messages
                if viz_product_type == 'google_dt':

                    # If the data description is being put together for the first time,
                    # switch the time format from float to datetime
                    if (gdt_description == None):
                        temp_gdt_description = gdt_component['data_description']
                        gdt_description = [('time', 'datetime', 'time')]

                        for idx in range(1,len(temp_gdt_description)):
                            # for some weird reason need to force convert to tuples
                            temp_arr = temp_gdt_description[idx]
                            if temp_arr != None and temp_arr[0] != 'time':
                                gdt_description.append((temp_arr[0], temp_arr[1], temp_arr[2]))

                    # append all content to one big array
                    temp_gdt_content = gdt_component['data_content']
                    for tempTuple in temp_gdt_content:
                        # sometimes there are inexplicable empty tuples in the content. Drop them
                        if tempTuple == [] or len(tempTuple) == 0:
                            continue

                        varTuple = []

                        # Adjust the ntp time stamp from instruments to standard date tine
                        #varTuple.append(datetime.fromtimestamp(tempTuple[0]))
                        varTuple.append(datetime.fromtimestamp(ntplib.ntp_to_system_time(tempTuple[0])))

                        for idx in range(1,len(tempTuple)):
                            # some silly numpy format won't go away so need to cast numbers to floats
                            if(gdt_description[idx][1] == 'number'):
                                varTuple.append((float)(tempTuple[idx]))
                            else:
                                varTuple.append(tempTuple[idx])

                        gdt_content.append(varTuple)


                #TODO - what to do if this is not a valid visualization message?


        # Now that all the messages have been parsed, any last processing should be done here
        if viz_product_type == "google_dt":
            # Using the description and content, build the google data table
            gdt = gviz_api.DataTable(gdt_description)
            gdt.LoadData(gdt_content)

            return gdt.ToJSonResponse()

        return None
Beispiel #25
0
    def render_graphs(cls,
                      graph_data,
                      stream_definition_id,
                      fileName=None,
                      resolution=None):
        # init Matplotlib with passsed parameters
        x_res = y_res = 100  # some default in case nothing is provided
        if resolution:
            x_res, y_res = resolution.split('x')
            x_res = int(x_res)
            y_res = int(y_res)
        fig = Figure(figsize=(x_res / 100, y_res / 100), dpi=100, frameon=True)
        ax = fig.add_subplot(111)
        canvas = FigureCanvas(fig)
        imgInMem = StringIO.StringIO()

        # Guess xticks for number of labels along x-axis. Place a label every 100 pixels
        ax.locator_params(x_res / 100)

        # If there's no data, wait
        # For the simple case of testing, lets plot all time variant variables one at a time
        xAxisVar = 'time'

        # Prepare the set of y axis variables that will be plotted. This needs to be smarter and passed as
        # config variable to the transform
        yAxisVars = []
        for varName, varData in graph_data.iteritems():
            if varName == 'time' or varName == 'height' or varName == 'longitude' or varName == 'latitude':
                continue
            yAxisVars.append(varName)

        # Do a error check for incorrect time values. Ignore all time == fill_values
        time_fill_value = 0.0
        clean_data_flag = False
        while not clean_data_flag:
            clean_data_flag = True
            # Go through the data and see if we can find empty time entries. Delete it along
            # with all corresponding variables
            for idx in xrange(len(graph_data[xAxisVar])):
                if graph_data[xAxisVar][idx] == time_fill_value:
                    graph_data[xAxisVar].pop(idx)
                    for varName in yAxisVars:
                        try:
                            graph_data[varName].pop(idx)
                        except IndexError:
                            # Do nothing really. Must be a malformed granule
                            pass
                    clean_data_flag = False
                    break

        #xAxisFloatData = graph_data[xAxisVar]
        xAxisFloatData = [
            datetime.fromtimestamp(ntplib.ntp_to_system_time(t))
            for t in graph_data[xAxisVar]
        ]
        #print " >>>>>>>>>>>>>>>> xAxisFloatData : ", xAxisFloatData

        idx = 0
        for varName in yAxisVars:
            yAxisFloatData = graph_data[varName]

            # Generate the plot
            ax.plot(xAxisFloatData,
                    yAxisFloatData,
                    cls.line_style(idx),
                    label=varName)
            idx += 1

        yAxisLabel = ""
        # generate a filename for the output image
        for varName in yAxisVars:
            if yAxisLabel:
                yAxisLabel = yAxisLabel + "-" + varName
            else:
                yAxisLabel = varName

        if not fileName:
            fileName = yAxisLabel + '_vs_' + xAxisVar

        fileName = fileName + "." + mpl_output_image_format

        # Choose a small font for the legend
        legend_font_prop = FontProperties()
        legend_font_prop.set_size('small')
        ax.set_xlabel(xAxisVar)
        ax.set_ylabel(yAxisLabel)
        ax.set_title(yAxisLabel + ' vs ' + xAxisVar)
        ax.set_autoscale_on(False)
        ax.legend(loc='upper left',
                  ncol=3,
                  fancybox=True,
                  shadow=True,
                  prop=legend_font_prop)

        # Date formatting
        # matplotlib date format object
        #hfmt = dates.DateFormatter('%m/%d %H:%M')
        #ax.xaxis.set_major_locator(dates.MinuteLocator())
        #ax.xaxis.set_major_formatter(hfmt)

        # Save the figure to the in memory file
        canvas.print_figure(imgInMem, format=mpl_output_image_format)
        imgInMem.seek(0)

        # Create output dictionary from the param dict
        out_rdt = RecordDictionaryTool(
            stream_definition_id=stream_definition_id)

        # Prepare granule content
        #out_dict = {}
        out_rdt["viz_product_type"] = ["matplotlib_graphs"]
        out_rdt["image_obj"] = [imgInMem.getvalue()]
        out_rdt["image_name"] = [fileName]
        out_rdt["content_type"] = ["image/png"]

        #print " >>>>>>>>>> OUT_IMAGE_NAME : ", out_rdt['image_name']

        #out_rdt["graph_image_param_dict"] = np.array([out_dict])
        return out_rdt.to_granule()
Beispiel #26
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        stream_definition_id = params

        #init stuff
        data_description = []
        data_table_content = []
        gdt_allowed_numerical_types = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
                                       'uint64', 'float32', 'float64','str']
        # TODO : Move this in to container parameter
        default_precision = 5

        rdt = RecordDictionaryTool.load_from_granule(input)


        data_description = []
        # Buid a local precisions and fill value dictionary to use for parsing data correctly

        precisions = {}
        fill_values = {}
        for field in rdt.fields:
            precision_str = rdt.context(field).precision
            if not precision_str:
                precisions[field] = default_precision
            else:
                try:
                    precisions[field] = int(precision_str)
                except ValueError:
                    precisions[field] = default_precision


            fill_values[field] = rdt.fill_value(field)

        if stream_definition_id == None:
            log.error("GoogleDT transform: Need a output stream definition to process graphs")
            return None

        fields = []
        fields = rdt.fields


        # if time was null or missing, do not process
        if 'time' not in rdt: return None
        if rdt['time'] is None:
            return None

        time_fill_value = 0.0 # should be derived from the granule's param dict.
        data_description.append(('time','number','time'))

        import re
        for field in fields:

            if field == rdt.temporal_parameter:
                continue

            # If a config block was passed, consider only the params listed in it
            if config and 'parameters' in config and len(config['parameters']) > 0:
                if not field in config['parameters']:
                    log.info("Skipping ", field, " since it was not present in the list of allowed parameters")
                    continue

            # only consider fields which are allowed.
            if rdt[field] == None:
                continue

            # Check if visibility is false (system generated params)
            if hasattr(rdt.context(field),'visible') and not rdt.context(field).visible:
                continue

            # If it's a QC parameter ignore it
            if field.endswith('_qc'):
                continue

            # Handle string type or if its an unknown type, convert to string
            context = rdt.context(field)
            if (rdt[field].dtype == 'string' or rdt[field].dtype not in gdt_allowed_numerical_types):
                data_description.append((field, 'string', field ))
            elif (isinstance(context.param_type, ArrayType) or isinstance(context.param_type,ParameterFunctionType)) and len(rdt[field].shape)>1:
                for i in xrange(rdt[field].shape[1]):
                    data_description.append(('%s[%s]' % (field,i), 'number', '%s[%s]' % (field,i), {'precision':str(precisions[field])}))
            else:
                data_description.append((field, 'number', field, {'precision':str(precisions[field])} ))


        for i in xrange(len(rdt)):
            varTuple = []

            # Put time first if its not zero. Retrieval returns 0 secs for malformed entries
            if rdt['time'][i] == time_fill_value:
                continue
            # convert timestamp from instrument to UNIX time stamp since thats what Google DT expects
            varTuple.append(ntplib.ntp_to_system_time(rdt['time'][i]))

            for dd in data_description:
                field = dd[0]
                field_type = dd[1]
                # ignore time since its been already added
                if field == None or field == 'time':
                    continue

                if re.match(r'.*\[\d+\]', field):
                    field, j = re.match(r'(.*)\[(\d+)\]', field).groups()
                    j = int(j)
                    varTuple.append(float(rdt[field][i][j]))
                elif rdt[field] == None or rdt[field][i] == None:
                    varTuple.append(None)
                else:
                    if(field_type == 'number'):
                        if rdt[field][i] == None or rdt[field][i] == fill_values[field]:
                            varTuple.append(None)
                        else:
                            # Adjust float for precision
                            if (precisions[field] == None):
                                varTuple.append(float(rdt[field][i]))
                            else:
                                varTuple.append(round(float(rdt[field][i]), precisions[field]))

                    # if field type is string, there are two possibilities. Either it really is a string or
                    # its an object that needs to be converted to string.
                    if(field_type == 'string'):
                        varTuple.append(str(rdt[field][i]))

            # Append the tuples to the data table
            if len(varTuple) > 0:
                data_table_content.append(varTuple)

        out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        # Prepare granule content
        out_dict = {"viz_product_type" : "google_dt",
                    "data_description" : data_description,
                    "data_content" : data_table_content}

        out_rdt["google_dt_components"] = np.array([out_dict])
        out_rdt["viz_timestamp"] = TimeUtils.ts_to_units(rdt.context(rdt.temporal_parameter).uom, time.time())

        log.debug('Google DT transform: Sending a granule')
        out_granule = out_rdt.to_granule()

        return out_granule
    def render_graphs(cls, rdt, graph_data, stream_definition_id, fileName = None, resolution = None, time_field = 'time'):
        # init Matplotlib with passsed parameters
        x_res = y_res = 100 # some default in case nothing is provided
        if resolution:
            x_res, y_res = resolution.split('x')
            x_res = int(x_res)
            y_res = int(y_res)
        fig = Figure(figsize=(x_res / 100, y_res / 100), dpi=100, frameon=True)
        ax = fig.add_subplot(111)
        canvas = FigureCanvas(fig)
        imgInMem = StringIO.StringIO()

        # Guess xticks for number of labels along x-axis. Place a label every 100 pixels
        ax.locator_params(x_res/100)

        # If there's no data, wait
        # For the simple case of testing, lets plot all time variant variables one at a time
        xAxisVar = time_field

        # Prepare the set of y axis variables that will be plotted. This needs to be smarter and passed as
        # config variable to the transform
        yAxisVars = []
        for varName, varData in graph_data.iteritems():
            if varName == time_field or varName == 'height' or varName == 'longitude' or varName == 'latitude':
                continue
            yAxisVars.append(varName)


        # Do a error check for incorrect time values. Ignore all time == fill_values
        time_fill_value = 0.0
        clean_data_flag = False
        while not clean_data_flag:
            clean_data_flag = True
            # Go through the data and see if we can find empty time entries. Delete it along
            # with all corresponding variables
            for idx in xrange(len(graph_data[xAxisVar])):
                if graph_data[xAxisVar][idx] == time_fill_value:
                    graph_data[xAxisVar].pop(idx)
                    for varName in yAxisVars:
                        try:
                            graph_data[varName].pop(idx)
                        except IndexError:
                            # Do nothing really. Must be a malformed granule
                            pass
                    clean_data_flag = False
                    break

        #xAxisFloatData = graph_data[xAxisVar]
        xAxisFloatData = [datetime.fromtimestamp(ntplib.ntp_to_system_time(t)) for t in graph_data[xAxisVar]]
        #print " >>>>>>>>>>>>>>>> xAxisFloatData : ", xAxisFloatData

        idx = 0
        for varName in yAxisVars:
            yAxisFloatData = graph_data[varName]

            # Generate the plot
            ax.plot(xAxisFloatData, yAxisFloatData, cls.line_style(idx), label=varName)
            idx += 1

        yAxisLabel = ""
        # generate a filename for the output image
        for varName in yAxisVars:
            if yAxisLabel:
                yAxisLabel = yAxisLabel + "-" + varName
            else:
                yAxisLabel = varName

        if not fileName:
            fileName = yAxisLabel + '_vs_' + xAxisVar

        fileName = fileName + "." + mpl_output_image_format

        # Choose a small font for the legend
        legend_font_prop = FontProperties()
        legend_font_prop.set_size('small')
        ax.set_xlabel(xAxisVar)
        ax.set_ylabel(yAxisLabel)
        ax.set_title(yAxisLabel + ' vs ' + xAxisVar)
        ax.set_autoscale_on(False)
        ax.legend(loc='upper left', ncol=3, fancybox=True, shadow=True, prop=legend_font_prop)

        # Date formatting
        # matplotlib date format object
        #hfmt = dates.DateFormatter('%m/%d %H:%M')
        #ax.xaxis.set_major_locator(dates.MinuteLocator())
        #ax.xaxis.set_major_formatter(hfmt)

        # Save the figure to the in memory file
        canvas.print_figure(imgInMem, format=mpl_output_image_format)
        imgInMem.seek(0)

        # Create output dictionary from the param dict
        out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        # Prepare granule content
        #out_dict = {}
        out_rdt["viz_product_type"] = ["matplotlib_graphs"]
        out_rdt["image_obj"] = [imgInMem.getvalue()]
        out_rdt["image_name"] = [fileName]
        out_rdt["content_type"] = ["image/png"]

        #print " >>>>>>>>>> OUT_IMAGE_NAME : ", out_rdt['image_name']
        #out_rdt["graph_image_param_dict"] = np.array([out_dict])
        out_rdt["viz_timestamp"] = TimeUtils.ts_to_units(rdt.context(rdt.temporal_parameter).uom, time.time())

        return out_rdt.to_granule()
Beispiel #28
0
    def execute(input=None,
                context=None,
                config=None,
                params=None,
                state=None):

        stream_definition_id = params

        #init stuff
        rdt_for_nones = {}
        data_table_content = []
        gdt_allowed_numerical_types = [
            'int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
            'uint64', 'float32', 'float64', 'str'
        ]
        # TODO : Move this in to container parameter
        default_precision = 5

        rdt = RecordDictionaryTool.load_from_granule(input)

        data_description = []
        # Buid a local precisions and fill value dictionary to use for parsing data correctly

        precisions = {}
        fill_values = {}
        for field in rdt.fields:
            precision_str = rdt.context(field).precision
            if not precision_str:
                precisions[field] = default_precision
            else:
                try:
                    precisions[field] = int(precision_str)
                except ValueError:
                    precisions[field] = default_precision

            fill_values[field] = rdt.fill_value(field)

        if stream_definition_id == None:
            log.error(
                "GoogleDT transform: Need a output stream definition to process graphs"
            )
            return None

        fields = []
        fields = rdt.fields

        # Ascertain temporal field. Use 'time' as backup
        time_field = rdt.temporal_parameter or 'time'
        #if 'time' not in rdt: return None
        if rdt[time_field] is None:
            return None

        #time_fill_value = 0.0 # should be derived from the granule's param dict.
        time_fill_value = fill_values[
            time_field]  # should be derived from the granule's param dict.
        total_num_of_records = len(rdt[time_field])
        data_description.append(('time', 'number', 'time'))

        ###### DEBUG ##########
        #for field in fields:
        #    if hasattr(rdt.context(field),'visible'):
        #        print "  >>>>>>>> '", field, "' [visible = ", rdt.context(field).visible,"] : ", rdt[field]
        #    else:
        #        print "  >>>>>>>> '", field, "' [visible = NOT SPECIFIED] : ", rdt[field]

        import re
        for field in fields:

            if field == time_field:
                continue

            # If a config block was passed, consider only the params listed in it
            if config and 'parameters' in config and len(
                    config['parameters']) > 0:
                if not field in config['parameters']:
                    log.info(
                        "Skipping ", field,
                        " since it was not present in the list of allowed parameters"
                    )
                    continue

            # If the value is none, assign it a small one fill_value array for now to generate description,
            # Actual array of fill_values will be assigned later
            rdt_field = rdt[field]
            if rdt_field == None:
                rdt_for_nones[field] = np.array([fill_values[field]] *
                                                total_num_of_records)
                rdt_field = rdt_for_nones[field]

            # Check if visibility is false (system generated params) or not specified explicitly
            if hasattr(rdt.context(field),
                       'visible') and not rdt.context(field).visible:
                continue

            # If it's a QC parameter ignore it
            if field.endswith('_qc'):
                continue

            # Handle string type or if its an unknown type, convert to string
            context = rdt.context(field)
            if (rdt_field.dtype == 'string'
                    or rdt_field.dtype not in gdt_allowed_numerical_types):
                data_description.append((field, 'string', field))
            elif (isinstance(context.param_type, ArrayType) or isinstance(
                    context.param_type,
                    ParameterFunctionType)) and len(rdt_field.shape) > 1:
                for i in xrange(rdt_field.shape[1]):
                    data_description.append(('%s[%s]' % (field, i), 'number',
                                             '%s[%s]' % (field, i), {
                                                 'precision':
                                                 str(precisions[field])
                                             }))
            else:
                data_description.append((field, 'number', field, {
                    'precision': str(precisions[field])
                }))

        for i in xrange(len(rdt)):
            varTuple = []

            # Put time first if its not zero. Retrieval returns 0 secs for malformed entries
            if rdt[time_field][i] == time_fill_value:
                continue
            # convert timestamp from instrument to UNIX time stamp since thats what Google DT expects
            varTuple.append(ntplib.ntp_to_system_time(rdt[time_field][i]))

            for dd in data_description:
                field = dd[0]
                field_type = dd[1]
                # ignore time since its been already added
                if field == None or field == time_field:
                    continue
                """
                rdt_field = rdt[field]
                if rdt_field == None:
                    rdt_field = np.array([fill_values[field]] * total_num_of_records)
                """

                if re.match(r'.*\[\d+\]', field):
                    field, j = re.match(r'(.*)\[(\d+)\]', field).groups()
                    j = int(j)
                    varTuple.append(float(rdt[field][i][j]))
                else:
                    if (field_type == 'number'):
                        if rdt[field] == None or rdt[field][i] == fill_values[
                                field]:
                            varTuple.append(None)
                        else:
                            # Adjust float for precision
                            if (precisions[field] == None):
                                varTuple.append(float(rdt[field][i]))
                            else:
                                varTuple.append(
                                    round(float(rdt[field][i]),
                                          precisions[field]))

                    # if field type is string, there are two possibilities. Either it really is a string or
                    # its an object that needs to be converted to string.
                    if (field_type == 'string'):
                        if rdt[field] == None or rdt[field][i] == fill_values[
                                field]:
                            varTuple.append(None)
                        else:
                            varTuple.append(str(rdt[field][i]))

            # Append the tuples to the data table
            if len(varTuple) > 0:
                data_table_content.append(varTuple)

        out_rdt = RecordDictionaryTool(
            stream_definition_id=stream_definition_id)

        # Prepare granule content
        out_dict = {
            "viz_product_type": "google_dt",
            "data_description": data_description,
            "data_content": data_table_content
        }

        out_rdt["google_dt_components"] = np.array([out_dict])
        out_rdt["viz_timestamp"] = TimeUtils.ts_to_units(
            rdt.context(time_field).uom, time.time())

        log.debug('Google DT transform: Sending a granule')
        out_granule = out_rdt.to_granule()

        return out_granule
Beispiel #29
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        stream_definition_id = params

        #init stuff
        var_tuple = []
        data_description = []
        data_table_content = []
        gdt_allowed_numerical_types = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
                                       'uint64', 'float32', 'float64','str']

        rdt = RecordDictionaryTool.load_from_granule(input)
        data_description = []

        if stream_definition_id == None:
            log.error("GoogleDT transform: Need a output stream definition to process graphs")
            return None

        fields = []
        if config and config['parameters']:
            fields = config['parameters']
        else:
            fields = rdt.fields

        # if time was null or missing, do not process
        if 'time' not in rdt: return None
        if rdt['time'] is None:
            return None

        time_fill_value = 0.0 # should be derived from the granule's param dict.
        data_description.append(('time','number','time'))

        for field in fields:
            if field == 'time':
                continue

            # If a config block was passed, consider only the params listed in it
            if config and config['parameters']:
                if not field in config['parameters']:
                    continue

            # only consider fields which are supposed to be numbers.
            if (rdt[field] != None) and (rdt[field].dtype not in gdt_allowed_numerical_types):
                continue

            data_description.append((field, 'number', field))

        for i in xrange(len(rdt)):
            varTuple = []

            # Put time first if its not zero. Retrieval returns 0 secs for malformed entries
            if rdt['time'][i] == time_fill_value:
                continue
            # convert timestamp from instrument to UNIX time stamp since thats what Google DT expects
            varTuple.append(ntplib.ntp_to_system_time(rdt['time'][i]))

            for dd in data_description:
                field = dd[0]
                # ignore time since its been already added
                if field == None or field == 'time':
                    continue

                if rdt[field] == None or rdt[field][i] == None:
                    varTuple.append(0.0)
                else:
                    varTuple.append(rdt[field][i])

            # Append the tuples to the data table
            if len(varTuple) > 0:
                data_table_content.append(varTuple)

        out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        # Prepare granule content
        out_dict = {"viz_product_type" : "google_dt",
                    "data_description" : data_description,
                    "data_content" : data_table_content}

        out_rdt["google_dt_components"] = np.array([out_dict])

        log.debug('Google DT transform: Sending a granule')

        out_granule = out_rdt.to_granule()

        return out_granule
Beispiel #30
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        stream_definition_id = params

        #init stuff
        var_tuple = []
        data_description = []
        data_table_content = []
        gdt_allowed_numerical_types = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
                                       'uint64', 'float32', 'float64','str']
        # TODO : Move this in to container parameter
        default_precision = 5

        rdt = RecordDictionaryTool.load_from_granule(input)
        data_description = []
        # Buid a local precisions and fill value dictionary to use for parsing data correctly

        precisions = {}
        fill_values = {}
        for field in rdt._pdict:
            _precision_str = rdt._pdict.get_context(field).precision
            if _precision_str == None or _precision_str == '':
                precisions[field] = default_precision
            else:
                precisions[field] = int(_precision_str)

            _fv_str = rdt._pdict.get_context(field).fill_value
            if _fv_str == None or _fv_str == '':
                fill_values[field] = None
            else:
                fill_values[field] = int(_fv_str)


        if stream_definition_id == None:
            log.error("GoogleDT transform: Need a output stream definition to process graphs")
            return None

        fields = []
        if config and config['parameters']:
            fields = config['parameters']
        else:
            fields = rdt.fields

        # if time was null or missing, do not process
        if 'time' not in rdt: return None
        if rdt['time'] is None:
            return None

        time_fill_value = 0.0 # should be derived from the granule's param dict.
        data_description.append(('time','number','time'))

        for field in fields:
            if field == 'time':
                continue

            # If a config block was passed, consider only the params listed in it
            if config and config['parameters']:
                if not field in config['parameters']:
                    continue

            # only consider fields which are allowed.
            if rdt[field] == None:
                continue
            if (rdt[field] != None) and (rdt[field].dtype not in gdt_allowed_numerical_types):
                print ">>>>>>>>>>>>>> DONT KNOW HOW TO HANDLE : ", rdt[field].dtype
                continue

            if (rdt[field].dtype == 'string'):
                data_description.append((field, 'string', field ))
            else:
                data_description.append((field, 'number', field, {'precision':str(precisions[field])} ))

        for i in xrange(len(rdt)):
            varTuple = []

            # Put time first if its not zero. Retrieval returns 0 secs for malformed entries
            if rdt['time'][i] == time_fill_value:
                continue
            # convert timestamp from instrument to UNIX time stamp since thats what Google DT expects
            varTuple.append(ntplib.ntp_to_system_time(rdt['time'][i]))

            for dd in data_description:
                field = dd[0]
                field_type = dd[1]
                # ignore time since its been already added
                if field == None or field == 'time':
                    continue

                if rdt[field] == None or rdt[field][i] == None:
                    varTuple.append(None)
                else:
                    if(field_type == 'number'):
                        if rdt[field][i] == None or rdt[field][i] == fill_values[field]:
                            varTuple.append(None)
                        else:
                            # Adjust float for precision
                            if (precisions[field] == None):
                                varTuple.append(float(rdt[field][i]))
                            else:
                                varTuple.append(round(float(rdt[field][i]), precisions[field]))
                    else:
                        varTuple.append(rdt[field][i])

            # Append the tuples to the data table
            if len(varTuple) > 0:
                data_table_content.append(varTuple)

        out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        # Prepare granule content
        out_dict = {"viz_product_type" : "google_dt",
                    "data_description" : data_description,
                    "data_content" : data_table_content}

        out_rdt["google_dt_components"] = np.array([out_dict])

        log.debug('Google DT transform: Sending a granule')

        out_granule = out_rdt.to_granule()

        return out_granule
Beispiel #31
0
    def execute(input=None, context=None, config=None, params=None, state=None):

        stream_definition_id = params

        #init stuff
        rdt_for_nones = {}
        hc_data = []
        normalized_ts = []
        hc_allowed_numerical_types = ['int8', 'int16', 'int32', 'int64', 'uint8', 'uint16', 'uint32',
                                       'uint64', 'float32', 'float64','str']
        # TODO : Move this in to container parameter
        default_precision = 5

        rdt = RecordDictionaryTool.load_from_granule(input)
        # Buid a local precisions and fill value dictionary to use for parsing data correctly

        precisions = {}
        fill_values = {}
        for field in rdt.fields:
            precision_str = rdt.context(field).precision
            if not precision_str:
                precisions[field] = default_precision
            else:
                try:
                    precisions[field] = int(precision_str)
                except ValueError:
                    precisions[field] = default_precision


            fill_values[field] = rdt.fill_value(field)

        if stream_definition_id == None:
            log.error("HighCharts transform: Need a output stream definition to process graphs")
            return None

        fields = []
        fields = rdt.fields

        # Ascertain temporal field. Use 'time' as backup
        time_field = rdt.temporal_parameter or 'time'
        #if 'time' not in rdt: return None
        if rdt[time_field] is None:
            return None

        #time_fill_value = 0.0 # should be derived from the granule's param dict.
        time_fill_value = fill_values[time_field] # should be derived from the granule's param dict.
        total_num_of_records = len(rdt[time_field])

        # convert timestamps from ntp to system
        count = 0
        normalized_ts = [None] * total_num_of_records
        for ts in rdt[time_field]:
            if ts == time_fill_value:
                normalized_ts[count] = time_fill_value
            else:
                normalized_ts[count] = float(ntplib.ntp_to_system_time(ts))
            count += 1

        ###### DEBUG ##########
        #for field in fields:
        #    if hasattr(rdt.context(field),'visible'):
        #        print "  >>>>>>>> '", field, "' [visible = ", rdt.context(field).visible,"] : ", rdt[field]
        #    else:
        #        print "  >>>>>>>> '", field, "' [visible = NOT SPECIFIED] : ", rdt[field]

        # Convert the fields in to HC series format
        import re
        for field in fields:
            field_precision = precisions[field]

            if field == time_field:
                continue

            # If a config block was passed, consider only the params listed in it
            if config and 'parameters' in config and len(config['parameters']) > 0:
                if not field in config['parameters']:
                    log.info("Skipping ", field, " since it was not present in the list of allowed parameters")
                    continue

            # If the value is none, assign it a small one fill_value array for now to generate description,
            # Actual array of fill_values will be assigned later
            rdt_field =rdt[field]
            if rdt_field == None:
                rdt_for_nones[field] = np.array([fill_values[field]] * total_num_of_records)
                #rdt_for_nones[field] = [fill_values[field]] * total_num_of_records
                rdt_field = rdt_for_nones[field]

            # Check if visibility is false (system generated params) or not specified explicitly
            if hasattr(rdt.context(field),'visible') and not rdt.context(field).visible:
                continue

            # If it's a QC parameter ignore it
            if field.endswith('_qc'):
                continue

            # NOTE: The reason why the following libes of code seem to make a lot of branches are to pull the major decisions
            #       outside the primary loops responsible for arranging data in the data structures to be sent to the charts.
            #       This is better than having to make the decisions on a per record entry.

            # Handle arrays by spliting them them in to individual parameters
            context = rdt.context(field)
            if (isinstance(context.param_type, ArrayType) or isinstance(context.param_type,ParameterFunctionType)) and len(rdt_field.shape)>1:
                if (rdt_field.dtype == 'string' or rdt_field.dtype not in hc_allowed_numerical_types):
                    for i in xrange(rdt_field.shape[1]):
                        series = {}
                        series["name"] = field + "[" + str(i) + "]"
                        series["visible"] = False
                        series["data"] = VizTransformHighChartsAlgorithm.form_series_data_str(normalized_ts, rdt_field, i, time_fill_value, fill_values[field])
                        hc_data.append(series)

                else: # Assume its a float or number
                    for i in xrange(rdt_field.shape[1]):
                        series = {}
                        series["name"] = field + "[" + str(i) + "]"
                        series["visible"] = True
                        series["tooltip"] = {"valueDecimals":field_precision}
                        series["data"] = VizTransformHighChartsAlgorithm.form_series_data_num(normalized_ts, rdt_field, i, time_fill_value, fill_values[field], field_precision)
                        hc_data.append(series)
            else:
                if (rdt_field.dtype == 'string' or rdt_field.dtype not in hc_allowed_numerical_types):
                    series = {}
                    series["name"] = field
                    series["visible"] = False
                    series["data"] = VizTransformHighChartsAlgorithm.form_series_data_str(normalized_ts, rdt_field, None, time_fill_value, fill_values[field])

                else:
                    series = {}
                    series["name"] = field
                    series["tooltip"] = {"valueDecimals":field_precision}
                    series["visible"] = True
                    series["data"] = VizTransformHighChartsAlgorithm.form_series_data_num(normalized_ts, rdt_field, None, time_fill_value, fill_values[field], field_precision)

                # Append series to the hc data
                hc_data.append(series)

        # Prep the outgoing granule
        out_rdt = RecordDictionaryTool(stream_definition_id=stream_definition_id)

        #out_dict = {"hc_data": hc_data}
        #out_rdt["hc_data"] = np.array([out_dict])

        out_rdt["hc_data"] = [hc_data]
        out_rdt["viz_timestamp"] = TimeUtils.ts_to_units(rdt.context(time_field).uom, time.time())

        log.debug('HighCharts Transform: Sending a granule')
        out_granule = out_rdt.to_granule()

        return out_granule
Beispiel #32
0
 def _extract_transmit_year(self) -> int:
     year = datetime.fromtimestamp(ntplib.ntp_to_system_time(self.ntp().sent)).year
     return year
Beispiel #33
0
def ntp_to_datestring(ntp_time):
    try:
        ntp_time = float(ntp_time)
        return datetime.datetime.utcfromtimestamp(ntplib.ntp_to_system_time(ntp_time)).strftime("%Y-%m-%d %H:%M:%S")
    except:
        return str(ntp_time)
Beispiel #34
0
def ntptime_to_string(t):
    t = ntplib.ntp_to_system_time(t)
    millis = '%f' % (t-int(t))
    millis = millis[1:5]
    return time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(t)) + millis + 'Z'
Beispiel #35
0
def ntptime_to_string(t):
    t = ntplib.ntp_to_system_time(t)
    millis = '%f' % (t - int(t))
    millis = millis[1:5]
    return time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(t)) + millis + 'Z'