def _build_parsed_values(self):
        """
        Take the velocity header data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorVelocityHeaderDataParticle: raw data =%r', self.raw_data)

        try:
            unpack_string = '<4s6sH8B20sH'
            sync, timestamp, number_of_records, noise1, noise2, noise3, _, correlation1, correlation2, correlation3, _,\
                _, cksum = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<20H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(timestamp)
            self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())

        except Exception as e:
            log.error('Error creating particle vel3d_cd_data_header, raw data: %r', self.raw_data)
            raise SampleException(e)

        result = [{VID: VectorVelocityHeaderDataParticleKey.TIMESTAMP, VAL: str(timestamp)},
                  {VID: VectorVelocityHeaderDataParticleKey.NUMBER_OF_RECORDS, VAL: number_of_records},
                  {VID: VectorVelocityHeaderDataParticleKey.NOISE1, VAL: noise1},
                  {VID: VectorVelocityHeaderDataParticleKey.NOISE2, VAL: noise2},
                  {VID: VectorVelocityHeaderDataParticleKey.NOISE3, VAL: noise3},
                  {VID: VectorVelocityHeaderDataParticleKey.CORRELATION1, VAL: correlation1},
                  {VID: VectorVelocityHeaderDataParticleKey.CORRELATION2, VAL: correlation2},
                  {VID: VectorVelocityHeaderDataParticleKey.CORRELATION3, VAL: correlation3}]

        log.debug('VectorVelocityHeaderDataParticle: particle=%s', result)
        return result
Esempio n. 2
0
    def launch(self):
        """
        Launches the simulator process as indicated by _COMMAND.

        @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the
                associated URI to establish connection with it.
        """
        log.debug("[OMSim] Launching: %s", _COMMAND)

        self._process = self._spawn(_COMMAND)

        if not self._process or not self.poll():
            msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND
            log.error(msg)
            raise Exception(msg)

        log.debug("[OMSim] process started, pid: %s", self.getpid())

        # give it some time to start up
        sleep(5)

        # get URI:
        uri = None
        with open("logs/rsn_oms_simulator.yml", buffering=1) as f:
            # we expect one of the first few lines to be of the form:
            # rsn_oms_simulator_uri=xxxx
            # where xxxx is the uri -- see oms_simulator_server.
            while uri is None:
                line = f.readline()
                if line.index("rsn_oms_simulator_uri=") == 0:
                    uri = line[len("rsn_oms_simulator_uri="):].strip()

        self._rsn_oms = CIOMSClientFactory.create_instance(uri)
        return self._rsn_oms, uri
def main():
    # Get the command line arguments
    options = docopt.docopt(__doc__)
    subsites = options.get('<subsites>')
    deployments = options.get('<deployments>')
    dates = options.get('<dates>')
    keep_temp_files = options.get('--keep')
    process_mode = options.get('--process')
    all_subsites = options.get('--all')
    zplsc_datafile = options.get('<zplsc_datafile>')

    if subsites is not None:
        subsites = subsites.split(" ")

    if deployments is not None:
        deployments = deployments.split(" ")
    else:
        deployments = []

    if dates is not None:
        dates = dates.split(" ")

    try:
        echogram_generator = ZPLSCEchogramGenerator(subsites, deployments, dates, keep_temp_files,
                                                    zplsc_datafile, process_mode, all_subsites)
        echogram_generator.generate_zplsc_echograms()
        log.info('Echogram processing completed successfully!')

    except ValueError:
        log.error('Invalid command line parameters: exiting Echogram Generator')
    def construct_protocol(self, proto_module):
        module = importlib.import_module(proto_module)
        if hasattr(module, 'create_playback_protocol'):
            return module.create_playback_protocol(self.handle_event)

        log.error('Unable to import and create playback protocol from module: %r', module)
        sys.exit(1)
Esempio n. 5
0
    def _process_packet(self):
        chunk = self._filehandle.read(1024)
        if chunk != '':
            self.buffer += chunk
            new_index = 0
            for match in self.ooi_ts_regex.finditer(self.buffer):
                payload = match.group(2)
                try:
                    packet_time = string_to_ntp_date_time(match.group(1))
                    header = PacketHeader(
                        packet_type=PacketType.FROM_INSTRUMENT,
                        payload_size=len(payload),
                        packet_time=packet_time)
                    header.set_checksum(payload)
                    packet = PlaybackPacket(payload=payload, header=header)
                    self.callback(packet)
                except ValueError:
                    log.error('Unable to extract timestamp from record: %r' %
                              match.group())
                new_index = match.end()

            if new_index > 0:
                self.buffer = self.buffer[new_index:]

            if len(self.buffer) > self.MAXBUF:
                self.buffer = self.buffer[-self.MAXBUF:]

            return True

        return False
def main():
    # Get the command line arguments
    options = docopt.docopt(__doc__)
    subsites = options.get('<subsites>')
    deployments = options.get('<deployments>')
    dates = options.get('<dates>')
    keep_temp_files = options.get('--keep')
    process_mode = options.get('--process')
    all_subsites = options.get('--all')
    zplsc_datafile = options.get('<zplsc_datafile>')

    if subsites is not None:
        subsites = subsites.split(" ")

    if deployments is not None:
        deployments = deployments.split(" ")
    else:
        deployments = []

    if dates is not None:
        dates = dates.split(" ")

    try:
        echogram_generator = ZPLSCEchogramGenerator(subsites, deployments,
                                                    dates, keep_temp_files,
                                                    zplsc_datafile,
                                                    process_mode, all_subsites)
        echogram_generator.generate_zplsc_echograms()
        log.info('Echogram processing completed successfully!')

    except ValueError:
        log.error(
            'Invalid command line parameters: exiting Echogram Generator')
    def get_deployment_dirs(self, subsite):
        """
        This method will determine the deployment directories for the subsite
        passed in.

        :param subsite: The subsite of the ZPLSC instrument.
        :return: deployments: The list of deployment directories.
        """

        # Generate a temporary deployment list to maintain the integrity of the
        # original list for subsequent subsite processing.
        deployments = [deployment for deployment in self.deployments]
        if not deployments:
            # Generate the subsite portion of the raw data path and get all the files in the subsite path.
            subsite_path = os.path.join(self.raw_data_dir, subsite.upper())
            deployment_dirs = self.get_dir_contents(subsite_path)

            # Generate the list of the 24 1-hour raw data files for the given date.
            deployment_list = [(DEPLOYMENT_DIR_MATCHER.match(ddir)).group(1)
                               for ddir in deployment_dirs if DEPLOYMENT_DIR_MATCHER.match(ddir) is not None]

            if self.process_mode:
                deployment_list = [deployment_list[-1]]

            for deployment in deployment_list:
                try:
                    deployments.append(int(deployment))

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s', deployment, ex.message)
                    break

        return deployments
    def parse_dates(echogram_dates):
        """
        Parse the date(s) passed in for proper format and convert them to a
        datetime object. Also, determine and set whether the entire month
        of echograms will be generated.

        :param echogram_dates: List of dates in string format.
        :return: parsed_dates: List of dates in the datetime object format.
        """

        parsed_dates = {}

        if echogram_dates is not None:
            for echogram_date in echogram_dates:
                date_regex = DATE_YYYY_MM_DD_REGEX_MATCHER.match(echogram_date)
                if date_regex:
                    year = int(date_regex.group(1))
                    month = int(date_regex.group(2))

                    if date_regex.lastindex == 3:
                        day = int(date_regex.group(3))
                    else:
                        day = 1

                    converted_date = date(year, month, day)

                    # Indicate this date is not an entire month.
                    parsed_dates[converted_date] = False

                else:
                    log.error('Incorrect date format: %s: Correct format is YYYY[-/]MM-DD or YYYY[-/]MM', date)
                    parsed_dates = False
                    break

        return parsed_dates
    def launch(self):
        """
        Launches the simulator process as indicated by _COMMAND.

        @return (rsn_oms, uri) A pair with the CIOMSSimulator instance and the
                associated URI to establish connection with it.
        """
        log.debug("[OMSim] Launching: %s", _COMMAND)

        self._process = self._spawn(_COMMAND)

        if not self._process or not self.poll():
            msg = "[OMSim] Failed to launch simulator: %s" % _COMMAND
            log.error(msg)
            raise Exception(msg)

        log.debug("[OMSim] process started, pid: %s", self.getpid())

        # give it some time to start up
        sleep(5)

        # get URI:
        uri = None
        with open("logs/rsn_oms_simulator.yml", buffering=1) as f:
            # we expect one of the first few lines to be of the form:
            # rsn_oms_simulator_uri=xxxx
            # where xxxx is the uri -- see oms_simulator_server.
            while uri is None:
                line = f.readline()
                if line.index("rsn_oms_simulator_uri=") == 0:
                    uri = line[len("rsn_oms_simulator_uri="):].strip()

        self._rsn_oms = CIOMSClientFactory.create_instance(uri)
        return self._rsn_oms, uri
Esempio n. 10
0
    def _process_packet(self):
        chunk = self._filehandle.read(1024)
        if chunk != '':
            self.buffer += chunk
            new_index = 0
            for match in self.ooi_ts_regex.finditer(self.buffer):
                payload = match.group(2)
                try:
                    packet_time = string_to_ntp_date_time(match.group(1))
                    header = PacketHeader(packet_type=PacketType.FROM_INSTRUMENT,
                                          payload_size=len(payload), packet_time=packet_time)
                    header.set_checksum(payload)
                    packet = PlaybackPacket(payload=payload, header=header)
                    self.callback(packet)
                except ValueError:
                    log.error('Unable to extract timestamp from record: %r' % match.group())
                new_index = match.end()

            if new_index > 0:
                self.buffer = self.buffer[new_index:]

            if len(self.buffer) > self.MAXBUF:
                self.buffer = self.buffer[-self.MAXBUF:]

            return True

        return False
    def get_deployment_dirs(self, subsite):
        """
        This method will determine the deployment directories for the subsite
        passed in.

        :param subsite: The subsite of the ZPLSC instrument.
        :return: deployments: The list of deployment directories.
        """

        # Generate a temporary deployment list to maintain the integrity of the
        # original list for subsequent subsite processing.
        deployments = [deployment for deployment in self.deployments]
        if not deployments:
            # Generate the portion of the URL up to the subsite directory.
            subsite_url = os.path.join(RAW_DATA_URL, subsite.upper())

            # Get the all the deployment sub-directories under the subsite directory.
            deployment_dirs = self.get_dir_contents(subsite_url)

            # Generate the list of the 24 1-hour raw data files for the given date.
            deployment_list = DEPLOYMENT_DIR_MATCHER.findall(deployment_dirs)
            deployment_list.sort()

            if self.process_mode:
                deployment_list = [deployment_list[-1]]

            for deployment in deployment_list:
                try:
                    deployments.append(int(deployment))

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s', deployment, ex.message)
                    break

        return deployments
Esempio n. 12
0
    def construct_protocol(self, proto_module):
        module = importlib.import_module(proto_module)
        if hasattr(module, 'create_playback_protocol'):
            return module.create_playback_protocol(self.handle_event)

        log.error('Unable to import and create playback protocol from module: %r', module)
        sys.exit(1)
    def parse_dates(echogram_dates):
        """
        Parse the date(s) passed in for proper format and convert them to a
        datetime object. Also, determine and set whether the entire month
        of echograms will be generated.

        :param echogram_dates: List of dates in string format.
        :return: parsed_dates: List of dates in the datetime object format.
        """

        parsed_dates = {}

        if echogram_dates is not None:
            for echogram_date in echogram_dates:
                date_regex = DATE_YYYY_MM_DD_REGEX_MATCHER.match(echogram_date)
                if date_regex:
                    year = int(date_regex.group(1))
                    month = int(date_regex.group(2))

                    if date_regex.lastindex == 3:
                        day = int(date_regex.group(3))
                        converted_date = date(year, month, day)
                        # Indicate this date is not an entire month.
                        parsed_dates[converted_date] = False
                    else:
                        day = 1
                        converted_date = date(year, month, day)
                        # Indicate this date is an entire month.
                        parsed_dates[converted_date] = True
                else:
                    log.error('Incorrect date format: %s: Correct format is YYYY[-/]MM-DD or YYYY[-/]MM', date)
                    parsed_dates = False
                    break

        return parsed_dates
    def aggregate_raw_data(self, date_dirs_url, data_date):
        """
        This method will retrieve the 24 1-hour files residing at the URL
        passed in for the date passed in.  It will store the files locally,
        concatenate them to one file.  It will return the file name of the
        concatenated file and the file name of the echogram that will be
        generated.

        This method will raise an exception if there is an issue creating
        the local ZPLSC Echogram directory.

        :param date_dirs_url: The URL of the raw data server where the 24 1-hour files reside.
        :param data_date: The date of the raw data for the echogram to be generated.
        :return: zplsc_24_datafile: The 24-hour concatenated raw data file
                 zplsc_echogram_file_path: The file path for the echogram to be generated.
        """

        zplsc_echogram_file_path = None

        filenames_list, raw_data_url, raw_datafile_prefix = self.get_data_filenames(date_dirs_url, data_date)
        if len(filenames_list) != 24:
            return '', None

        # Download the 24 1-hour raw data files to a temporary local directory.
        for raw_data_file in filenames_list:
            remote_raw_data_file = os.path.join(raw_data_url, raw_data_file)
            local_raw_data = os.path.join(self.temp_directory, raw_data_file)
            try:
                urllib.urlretrieve(remote_raw_data_file, local_raw_data)
            except urllib.ContentTooShortError as ex:
                log.error('Error retrieving: %s: %s', remote_raw_data_file, ex.message)
                continue

        # Concatenate the 24 1-hour raw data files to 1 24-hour raw data file and return the filename.
        zplsc_24_datafilename = self.zplsc_24_datafile_prefix + raw_datafile_prefix
        zplsc_24_datafile = os.path.join(self.temp_directory, zplsc_24_datafilename) + RAW_FILE_EXT
        os.system('cat ' + os.path.join(self.temp_directory, raw_datafile_prefix) + '*' + RAW_FILE_EXT + ' > ' +
                  zplsc_24_datafile)

        # Generate the ZPLSC Echogram filename.
        echogram_path_idx = string.find(raw_data_url, RAW_DATA_URL)
        if echogram_path_idx >= 0:
            base_directory = os.path.expanduser(self.base_echogram_directory)
            path_structure = raw_data_url[echogram_path_idx+len(RAW_DATA_URL)+1:]
            zplsc_echogram_file_path = os.path.join(base_directory, path_structure)

            # Create the ZPLSC Echogram directory structure if it doesn't exist.
            try:
                os.makedirs(zplsc_echogram_file_path)
            except OSError as ex:
                if ex.errno == errno.EEXIST and os.path.isdir(zplsc_echogram_file_path):
                    pass
                else:
                    log.error('Error creating local ZPLSC Echogram storage directory: %s', ex.message)
                    raise

        return zplsc_24_datafile, zplsc_echogram_file_path
Esempio n. 15
0
    def _publish(self, events, headers):
        msg_headers = self._merge_headers(headers)

        now = time.time()
        try:
            publish = self.connection.ensure(self.producer, self.producer.publish, max_retries=4)
            publish(json.dumps(events), headers=msg_headers, user_id=self.username,
                    declare=[self._queue], content_type='text/plain')
            log.info('Published %d messages using KOMBU in %.2f secs with headers %r',
                     len(events), time.time() - now, msg_headers)
        except Exception as e:
            log.error('Exception attempting to publish events: %r', e)
            return events
    def input_is_valid(self, subsites, deployments, echogram_dates):
        """
        This method validates the command line parameters entered.

        :param subsites: The subsite(s) that the ZPLSC instrument(s) are attached.
        :param deployments: The command line deployment number of interest.
        :param echogram_dates: The command line dates of interest.
        :return valid_input:  Boolean indicating whether all the inputs validated.
        """

        valid_input = True

        # Get the configuration parameters.
        zplsc_config = None
        with open(ZPLSC_CONFIG_FILE, 'r') as config_file:
            try:
                zplsc_config = yaml.load(config_file)
            except yaml.YAMLError as ex:
                log.error('Error loading the configuration file: %s: %s', ex.message)
        zplsc_subsites = zplsc_config['zplsc_subsites']

        # If no subsites were passed in, set the list of subsites to all in the config file.
        self.subsites = subsites
        if subsites is None:
            self.subsites = zplsc_subsites
            self.process_mode = True

        # Validate the subsites in the list.
        for subsite in self.subsites:
            if subsite not in zplsc_subsites:
                log.error('Subsite %s is not in the list of subsites with ZPLSC instrumentation.', subsite)
                valid_input = False
                break

        if valid_input and deployments is not None:
            self.deployments = []
            if not isinstance(deployments, types.ListType):
                deployments = [deployments]

            for deployment in deployments:
                try:
                    self.deployments.append(int(deployment))

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s', deployment, ex.message)
                    valid_input = False
                    break

        if valid_input:
            self.echogram_dates = self.parse_dates(echogram_dates)
            if self.echogram_dates is False:
                log.error('Invalid start date: %s', echogram_dates)
                valid_input = False

        self.base_echogram_directory = zplsc_config.get('zplsc_echogram_directory', BASE_ECHOGRAM_DIRECTORY)

        return valid_input
Esempio n. 17
0
 def recv_evt_messages():
     """
     A looping function that monitors a ZMQ SUB socket for asynchronous
     driver events. Can be run as a thread or greenlet.
     @param driver_client The client object that launches the thread.
     """
     self.stop_event_thread = False
     while not self.stop_event_thread:
         try:
             evt = self.zmq_evt_socket.recv_pyobj(flags=zmq.NOBLOCK)
             log.debug('got event: %s' % str(evt))
             if self.evt_callback:
                 self.evt_callback(evt)
         except zmq.ZMQError:
             time.sleep(.5)
         except Exception, e:
             log.error('Driver client error reading from zmq event socket: ' + str(e))
             log.error('Driver client error type: ' + str(type(e)))                    
Esempio n. 18
0
 def construct_driver(self):
     """
     Attempt to import and construct the driver object based on
     configuration.
     @retval True if successful, False otherwise.
     """
     try:
         module = importlib.import_module(self.driver_module)
         driver_class = getattr(module, self.driver_class)
         self.driver = driver_class(self.send_event)
         log.info('Imported and created driver from module: %r class: %r driver: %r',
                  module, driver_class, self.driver)
         return True
     except Exception as e:
         log.error('Could not import/construct driver module %s, class %s.',
                   self.driver_module, self.driver_class)
         log.error('%s' % str(e))
         return False
    def _build_parsed_values(self):
        """
        Take the velocity data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorVelocityDataParticle: raw data =%r', self.raw_data)

        try:

            unpack_string = '<2s4B2H3h6BH'

            (sync_id, analog_input2_lsb, count, pressure_msb, analog_input2_msb, pressure_lsw,
             analog_input1, velocity_beam1, velocity_beam2, velocity_beam3, amplitude_beam1,
             amplitude_beam2, amplitude_beam3, correlation_beam1, correlation_beam2,
             correlation_beam3, checksum) = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<11H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            analog_input2 = analog_input2_msb * 0x100 + analog_input2_lsb
            pressure = pressure_msb * 0x10000 + pressure_lsw

        except Exception as e:
            log.error('Error creating particle vel3d_cd_velocity_data, raw data: %r', self.raw_data)
            raise SampleException(e)

        result = [{VID: VectorVelocityDataParticleKey.ANALOG_INPUT2, VAL: analog_input2},
                  {VID: VectorVelocityDataParticleKey.COUNT, VAL: count},
                  {VID: VectorVelocityDataParticleKey.PRESSURE, VAL: pressure},
                  {VID: VectorVelocityDataParticleKey.ANALOG_INPUT1, VAL: analog_input1},
                  {VID: VectorVelocityDataParticleKey.VELOCITY_BEAM1, VAL: velocity_beam1},
                  {VID: VectorVelocityDataParticleKey.VELOCITY_BEAM2, VAL: velocity_beam2},
                  {VID: VectorVelocityDataParticleKey.VELOCITY_BEAM3, VAL: velocity_beam3},
                  {VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM1, VAL: amplitude_beam1},
                  {VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM2, VAL: amplitude_beam2},
                  {VID: VectorVelocityDataParticleKey.AMPLITUDE_BEAM3, VAL: amplitude_beam3},
                  {VID: VectorVelocityDataParticleKey.CORRELATION_BEAM1, VAL: correlation_beam1},
                  {VID: VectorVelocityDataParticleKey.CORRELATION_BEAM2, VAL: correlation_beam2},
                  {VID: VectorVelocityDataParticleKey.CORRELATION_BEAM3, VAL: correlation_beam3}]

        log.debug('VectorVelocityDataParticle: particle=%s', result)
        return result
Esempio n. 20
0
 def construct_driver(self):
     """
     Attempt to import and construct the driver object based on
     configuration.
     @retval True if successful, False otherwise.
     """
     try:
         module = importlib.import_module(self.driver_module)
         driver_class = getattr(module, self.driver_class)
         self.driver = driver_class(self.send_event)
         log.info(
             'Imported and created driver from module: %r class: %r driver: %r',
             module, driver_class, self.driver)
         return True
     except Exception as e:
         log.error('Could not import/construct driver module %s, class %s.',
                   self.driver_module, self.driver_class)
         log.error('%s' % str(e))
         return False
Esempio n. 21
0
 def recv_evt_messages():
     """
     A looping function that monitors a ZMQ SUB socket for asynchronous
     driver events. Can be run as a thread or greenlet.
     @param driver_client The client object that launches the thread.
     """
     self.stop_event_thread = False
     while not self.stop_event_thread:
         try:
             evt = self.zmq_evt_socket.recv_pyobj(flags=zmq.NOBLOCK)
             log.debug('got event: %s' % str(evt))
             if self.evt_callback:
                 self.evt_callback(evt)
         except zmq.ZMQError:
             time.sleep(.5)
         except Exception, e:
             log.error(
                 'Driver client error reading from zmq event socket: ' +
                 str(e))
             log.error('Driver client error type: ' + str(type(e)))
def process_oms_request():
    """
    This is the method that is called when the OMS POSTs OMS Events to
    this registered listener at the "/" path.
    :return:
    """

    if isinstance(request.json, list):
        # Log the list of Alert & Alarm messages from the OMS Event
        for alert_alarm_dict in request.json:
            aa_publisher.enqueue(alert_alarm_dict)
            log.info('oms_alert_alarm_server: OMS_AA_MSG: %r', alert_alarm_dict)

        # Publish the list of Alert & Alarm messages to qpid
        aa_publisher.publish()

    else:
        log.error('No data in the POSTed alert/alarm OMS Event ...')

    return '', httplib.ACCEPTED
Esempio n. 23
0
    def handle_event(self, event_type, val=None):
        """
        Construct and send an asynchronous driver event.
        @param event_type a DriverAsyncEvent type specifier.
        @param val event value for sample and test result events.
        """
        event = {'type': event_type, 'value': val, 'time': time.time()}

        if isinstance(event[EventKeys.VALUE], Exception):
            event[EventKeys.VALUE] = encode_exception(event[EventKeys.VALUE])

        if event[EventKeys.TYPE] == DriverAsyncEvent.ERROR:
            log.error(event)

        if event[EventKeys.TYPE] == DriverAsyncEvent.SAMPLE:
            if event[EventKeys.VALUE].get('stream_name') != 'raw':
                # don't publish raw
                self.particle_publisher.enqueue(event)
        else:
            self.event_publisher.enqueue(event)
Esempio n. 24
0
    def _publish(self, events, headers):
        msg_headers = self._merge_headers(headers)

        now = time.time()
        try:
            publish = self.connection.ensure(self.producer,
                                             self.producer.publish,
                                             max_retries=4)
            publish(json.dumps(events),
                    headers=msg_headers,
                    user_id=self.username,
                    declare=[self._queue],
                    content_type='text/plain')
            log.info(
                'Published %d messages using KOMBU in %.2f secs with headers %r',
                len(events),
                time.time() - now, msg_headers)
        except Exception as e:
            log.error('Exception attempting to publish events: %r', e)
            return events
Esempio n. 25
0
 def connect(self):
     delay = 1
     max_delay = 60
     while True:
         try:
             connection = qm.Connection(self.url,
                                        reconnect=False,
                                        username=self.username,
                                        password=self.password)
             connection.open()
             session = connection.session()
             self.sender = session.sender(
                 '%s; {create: always, node: {type: queue, durable: true}}'
                 % self.queue)
             log.info('Shovel connected to QPID')
             return
         except qm.ConnectError:
             log.error('Shovel QPID connection error. Sleep %d seconds',
                       delay)
             time.sleep(delay)
             delay = min(max_delay, delay * 2)
Esempio n. 26
0
def process_oms_request():
    """
    This is the method that is called when the OMS POSTs OMS Events to
    this registered listener at the "/" path.
    :return:
    """

    if isinstance(request.json, list):
        # Log the list of Alert & Alarm messages from the OMS Event
        for alert_alarm_dict in request.json:
            aa_publisher.enqueue(alert_alarm_dict)
            log.info('oms_alert_alarm_server: OMS_AA_MSG: %r',
                     alert_alarm_dict)

        # Publish the list of Alert & Alarm messages to qpid
        aa_publisher.publish()

    else:
        log.error('No data in the POSTed alert/alarm OMS Event ...')

    return '', httplib.ACCEPTED
    def _build_parsed_values(self):
        """
        Take the head config data and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        try:
            unpack_string = '<4s2s2H12s176s22sHh2s'
            sync, config, head_freq, head_type, head_serial, system_data, _, num_beams, cksum, _ = struct.unpack(
                unpack_string, self.raw_data)

            if not validate_checksum('<111H', self.raw_data, -4):
                log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            config = common.convert_word_to_bit_field(config)
            system_data = base64.b64encode(system_data)
            head_serial = head_serial.split('\x00', 1)[0]

            pressure_sensor = config[-1]
            mag_sensor = config[-2]
            tilt_sensor = config[-3]
            tilt_mount = config[-4]

        except Exception:
            log.error('Error creating particle head config, raw data: %r', self.raw_data)
            raise SampleException

        result = [{VID: NortekHeadConfigDataParticleKey.PRESSURE_SENSOR, VAL: pressure_sensor},
                  {VID: NortekHeadConfigDataParticleKey.MAG_SENSOR, VAL: mag_sensor},
                  {VID: NortekHeadConfigDataParticleKey.TILT_SENSOR, VAL: tilt_sensor},
                  {VID: NortekHeadConfigDataParticleKey.TILT_SENSOR_MOUNT, VAL: tilt_mount},
                  {VID: NortekHeadConfigDataParticleKey.HEAD_FREQ, VAL: head_freq},
                  {VID: NortekHeadConfigDataParticleKey.HEAD_TYPE, VAL: head_type},
                  {VID: NortekHeadConfigDataParticleKey.HEAD_SERIAL, VAL: head_serial},
                  {VID: NortekHeadConfigDataParticleKey.SYSTEM_DATA, VAL: system_data, DataParticleKey.BINARY: True},
                  {VID: NortekHeadConfigDataParticleKey.NUM_BEAMS, VAL: num_beams}]

        log.debug('NortekHeadConfigDataParticle: particle=%r', result)
        return result
    def _build_parsed_values(self):
        """
        Take the system data sample format and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        log.debug('VectorSystemDataParticle: raw data =%r', self.raw_data)

        try:

            unpack_string = '<4s6s2H4h2bHH'

            (sync, timestamp, battery, sound_speed, heading, pitch,
             roll, temperature, error, status, analog_input, cksum) = struct.unpack_from(unpack_string, self.raw_data)

            if not validate_checksum('<13H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(timestamp)
            self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())

        except Exception as e:
            log.error('Error creating particle vel3d_cd_system_data, raw data: %r', self.raw_data)
            raise SampleException(e)

        result = [{VID: VectorSystemDataParticleKey.TIMESTAMP, VAL: str(timestamp)},
                  {VID: VectorSystemDataParticleKey.BATTERY, VAL: battery},
                  {VID: VectorSystemDataParticleKey.SOUND_SPEED, VAL: sound_speed},
                  {VID: VectorSystemDataParticleKey.HEADING, VAL: heading},
                  {VID: VectorSystemDataParticleKey.PITCH, VAL: pitch},
                  {VID: VectorSystemDataParticleKey.ROLL, VAL: roll},
                  {VID: VectorSystemDataParticleKey.TEMPERATURE, VAL: temperature},
                  {VID: VectorSystemDataParticleKey.ERROR, VAL: error},
                  {VID: VectorSystemDataParticleKey.STATUS, VAL: status},
                  {VID: VectorSystemDataParticleKey.ANALOG_INPUT, VAL: analog_input}]

        log.debug('VectorSystemDataParticle: particle=%r', result)

        return result
Esempio n. 29
0
    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a driver by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The driver command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.
        driver_timeout = kwargs.pop('driver_timeout', 600)
        msg = {'cmd': cmd, 'args': args, 'kwargs': kwargs}

        log.debug('Sending command %s.' % str(msg))
        start_send = time.time()
        while True:
            try:
                # Attempt command send. Retry if necessary.
                self.zmq_cmd_socket.send_pyobj(msg, flags=zmq.NOBLOCK)
                if msg == 'stop_driver_process':
                    return 'driver stopping'

                # Command sent, break out and wait for reply.
                break

            except zmq.ZMQError:
                # Socket not ready to accept send. Sleep and retry later.
                time.sleep(.5)
                delta = time.time() - start_send
                if delta >= driver_timeout:
                    raise InstDriverClientTimeoutError()

            except Exception, e:
                log.error('Driver client error writing to zmq socket: ' +
                          str(e))
                log.error('Driver client error type: ' + str(type(e)))
                raise SystemError('exception writing to zmq socket: ' + str(e))
    def get_deployment_dirs(self, subsite):
        """
        This method will determine the deployment directories for the subsite
        passed in.

        :param subsite: The subsite of the ZPLSC instrument.
        :return: deployments: The list of deployment directories.
        """

        # Generate a temporary deployment list to maintain the integrity of the
        # original list for subsequent subsite processing.
        deployments = [deployment for deployment in self.deployments]
        if not deployments:
            # Generate the subsite portion of the raw data path and get all the files in the subsite path.
            subsite_path = os.path.join(self.raw_data_dir, subsite.upper())
            deployment_dirs = self.get_dir_contents(subsite_path)

            # Generate the list of the 24 1-hour raw data files for the given date.
            deployment_list = [
                (DEPLOYMENT_DIR_MATCHER.match(ddir)).group(1)
                for ddir in deployment_dirs
                if DEPLOYMENT_DIR_MATCHER.match(ddir) is not None
            ]

            if self.process_mode:
                deployment_list = [deployment_list[-1]]

            for deployment in deployment_list:
                try:
                    deployments.append(int(deployment))

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s', deployment,
                              ex.message)
                    break

        return deployments
Esempio n. 31
0
    def cmd_dvr(self, cmd, *args, **kwargs):
        """
        Command a driver by request-reply messaging. Package command
        message and send on blocking command socket. Block on same socket
        to receive the reply. Return the driver reply.
        @param cmd The driver command identifier.
        @param args Positional arguments of the command.
        @param kwargs Keyword arguments of the command.
        @retval Command result.
        """
        # Package command dictionary.
        driver_timeout = kwargs.pop('driver_timeout', 600)
        msg = {'cmd':cmd,'args':args,'kwargs':kwargs}

        log.debug('Sending command %s.' % str(msg))
        start_send = time.time()
        while True:
            try:
                # Attempt command send. Retry if necessary.
                self.zmq_cmd_socket.send_pyobj(msg, flags=zmq.NOBLOCK)
                if msg == 'stop_driver_process':
                    return 'driver stopping'

                # Command sent, break out and wait for reply.
                break    

            except zmq.ZMQError:
                # Socket not ready to accept send. Sleep and retry later.
                time.sleep(.5)
                delta = time.time() - start_send
                if delta >= driver_timeout:
                    raise InstDriverClientTimeoutError()

            except Exception,e:
                log.error('Driver client error writing to zmq socket: ' + str(e))
                log.error('Driver client error type: ' + str(type(e)))
                raise SystemError('exception writing to zmq socket: ' + str(e))
    def _build_parsed_values(self):
        """
        Take the clock data and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        try:
            minutes, seconds, day, hour, year, month, _ = struct.unpack('<6B2s', self.raw_data)
        except Exception as e:
            log.error('Error creating particle clock data raw data: %r', self.raw_data)
            raise SampleException(e)

        minutes = int('%02x' % minutes)
        seconds = int('%02x' % seconds)
        day = int('%02x' % day)
        hour = int('%02x' % hour)
        year = int('%02x' % year)
        month = int('%02x' % month)

        result = [{VID: NortekEngClockDataParticleKey.DATE_TIME_ARRAY,
                   VAL: [minutes, seconds, day, hour, year, month]}]

        log.debug('NortekEngClockDataParticle: particle=%r', result)
        return result
    def _build_parsed_values(self):
        """
        Take the hardware config data and parse it into
        values with appropriate tags.
        """
        try:
            unpack_string = '<4s14s2s4H2s12s4sh2s'
            (sync, serial_num, config, board_frequency, pic_version, hw_revision,
             recorder_size, status, spare, fw_version, cksum, _) = struct.unpack(unpack_string, self.raw_data)

            if not validate_checksum('<23H', self.raw_data, -4):
                log.warn("_parse_read_hw_config: Bad read hw response from instrument (%r)", self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            config = common.convert_word_to_bit_field(config)
            status = common.convert_word_to_bit_field(status)
            recorder_installed = config[-1]
            compass_installed = config[-2]
            velocity_range = status[-1]

        except Exception:
            log.error('Error creating particle hardware config, raw data: %r', self.raw_data)
            raise SampleException

        result = [{VID: NortekHardwareConfigDataParticleKey.SERIAL_NUM, VAL: serial_num},
                  {VID: NortekHardwareConfigDataParticleKey.RECORDER_INSTALLED, VAL: recorder_installed},
                  {VID: NortekHardwareConfigDataParticleKey.COMPASS_INSTALLED, VAL: compass_installed},
                  {VID: NortekHardwareConfigDataParticleKey.BOARD_FREQUENCY, VAL: board_frequency},
                  {VID: NortekHardwareConfigDataParticleKey.PIC_VERSION, VAL: pic_version},
                  {VID: NortekHardwareConfigDataParticleKey.HW_REVISION, VAL: hw_revision},
                  {VID: NortekHardwareConfigDataParticleKey.RECORDER_SIZE, VAL: recorder_size},
                  {VID: NortekHardwareConfigDataParticleKey.VELOCITY_RANGE, VAL: velocity_range},
                  {VID: NortekHardwareConfigDataParticleKey.FW_VERSION, VAL: fw_version}]

        log.debug('NortekHardwareConfigDataParticle: particle=%r', result)
        return result
Esempio n. 34
0
    def handle_event(self, event_type, val=None):
        """
        Construct and send an asynchronous driver event.
        @param event_type a DriverAsyncEvent type specifier.
        @param val event value for sample and test result events.
        """
        event = {
            'type': event_type,
            'value': val,
            'time': time.time()
        }

        if isinstance(event[EventKeys.VALUE], Exception):
            event[EventKeys.VALUE] = encode_exception(event[EventKeys.VALUE])

        if event[EventKeys.TYPE] == DriverAsyncEvent.ERROR:
            log.error(event)

        if event[EventKeys.TYPE] == DriverAsyncEvent.SAMPLE:
            if event[EventKeys.VALUE].get('stream_name') != 'raw':
                # don't publish raw
                self.particle_publisher.enqueue(event)
        else:
            self.event_publisher.enqueue(event)
Esempio n. 35
0
def main():
    """
    This main routine will get the configuration file from the command
    line parameter and set the values for required URIs for the OMS, the
    OMS Alert Alarm Server and the qpid Server.  It will then get the qpid
    publisher for publishing the OMS events.  Finally, it will start the web
    service.
    """

    global aa_publisher

    options = docopt(__doc__)
    server_config_file = options['<server_config>']
    try:
        config = yaml.load(open(server_config_file))
    except IOError:
        log.error('Cannot find configuration file: %r', server_config_file)
        return

    try:
        oms_uri = config.get('oms_uri')
        alert_alarm_server_uri = config.get('alert_alarm_server_uri')
        qpid_uri = config.get('qpid_uri')
    except AttributeError:
        log.error('Configuration file is empty: %r', server_config_file)
        return

    if not all((oms_uri, alert_alarm_server_uri, qpid_uri)):
        log.error('Missing mandatory configuration values missing from %r',
                  server_config_file)
    else:
        headers = {'aaServerUri': alert_alarm_server_uri}

        try:
            aa_publisher = Publisher.from_url(qpid_uri, headers=headers)
            start_web_service(oms_uri, alert_alarm_server_uri)

        except Exception as ex:
            log.exception('Error starting OMS Alert and Alarm web service: %r',
                          ex)
            return
def main():
    """
    This main routine will get the configuration file from the command
    line parameter and set the values for required URIs for the OMS, the
    OMS Alert Alarm Server and the qpid Server.  It will then get the qpid
    publisher for publishing the OMS events.  Finally, it will start the web
    service.
    """

    global aa_publisher

    options = docopt(__doc__)
    server_config_file = options['<server_config>']
    try:
        config = yaml.load(open(server_config_file))
    except IOError:
        log.error('Cannot find configuration file: %r', server_config_file)
        return

    try:
        oms_uri = config.get('oms_uri')
        alert_alarm_server_uri = config.get('alert_alarm_server_uri')
        qpid_uri = config.get('qpid_uri')
    except AttributeError:
        log.error('Configuration file is empty: %r', server_config_file)
        return

    if not all((oms_uri, alert_alarm_server_uri, qpid_uri)):
        log.error('Missing mandatory configuration values missing from %r', server_config_file)
    else:
        headers = {'aaServerUri': alert_alarm_server_uri}

        try:
            aa_publisher = Publisher.from_url(qpid_uri, headers=headers)
            start_web_service(oms_uri, alert_alarm_server_uri)

        except Exception as ex:
            log.exception('Error starting OMS Alert and Alarm web service: %r', ex)
            return
    def aggregate_raw_data(self, date_dirs_path, data_date):
        """
        This method will aggregate the 24 1-hour files residing at the path
        passed in for the date passed in.  It will copy the 24 files to a
        temporary directory and then aggregate them and store the single file
        in the temporary directory.  It will return the file name of the
        aggregated file and the file name of the echogram file that will be
        generated.

        Exceptions raised This method will raise an exception if there is an
        issue creating the local ZPLSC Echogram directory.

        :param date_dirs_path: The path of the raw data server where the 24 1-hour files reside.
        :param data_date: The date of the raw data for the echogram to be generated.
        :return: zplsc_24_datafile: The 24-hour concatenated raw data file
                 zplsc_echogram_file_path: The file path for the echogram to be generated.
        """

        zplsc_echogram_file_path = None

        try:
            filenames_list, raw_data_path, raw_datafile_prefix = self.get_data_filenames(date_dirs_path, data_date)
        except OSError:
            return '', None

        if len(filenames_list) != 24:
            return '', None

        # Copy the 24 1-hour raw data files to a temporary local directory.
        for raw_data_file in filenames_list:
            remote_raw_data_file = os.path.join(raw_data_path, raw_data_file)
            local_raw_data = os.path.join(self.temp_directory, raw_data_file)
            try:
                shutil.copyfile(remote_raw_data_file, local_raw_data)
            except OSError as ex:
                log.error('Error copying data file to temporary directory: %s: %s', remote_raw_data_file, ex.message)
                continue

        # Concatenate the 24 1-hour raw data files to 1 24-hour raw data file and return the filename.
        zplsc_24_datafilename = self.zplsc_24_datafile_prefix + raw_datafile_prefix
        zplsc_24_datafile = os.path.join(self.temp_directory, zplsc_24_datafilename) + RAW_FILE_EXT
        raw_data_glob = os.path.join(raw_data_path, raw_datafile_prefix) + '*' + RAW_FILE_EXT
        os.system('cat ' + raw_data_glob + ' > ' + zplsc_24_datafile)

        # Generate the ZPLSC Echogram filename.
        echogram_path_idx = string.find(raw_data_path, self.raw_data_dir)
        if echogram_path_idx >= 0:
            base_directory = os.path.expanduser(self.base_echogram_directory)
            path_structure = raw_data_path[echogram_path_idx+len(self.raw_data_dir)+1:]
            zplsc_echogram_file_path = os.path.join(base_directory, path_structure)

            # Create the ZPLSC Echogram directory structure if it doesn't exist.
            try:
                os.makedirs(zplsc_echogram_file_path)
            except OSError as ex:
                if ex.errno == errno.EEXIST and os.path.isdir(zplsc_echogram_file_path):
                    pass
                else:
                    log.error('Error creating local ZPLSC Echogram storage directory: %s', ex.message)
                    raise

        return zplsc_24_datafile, zplsc_echogram_file_path
Esempio n. 38
0
        start_reply = time.time()
        while True:
            try:
                # Attempt reply recv. Retry if necessary.
                reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)
                # Reply recieved, break and return.
                break
            except zmq.ZMQError:
                # Socket not ready with the reply. Sleep and retry later.
                time.sleep(.5)
                delta = time.time() - start_reply
                if delta >= driver_timeout:
                    raise InstDriverClientTimeoutError()

            except Exception, e:
                log.error('Driver client error reading from zmq socket: ' +
                          str(e))
                log.error('Driver client error type: ' + str(type(e)))
                raise SystemError('exception reading from zmq socket: ' +
                                  str(e))

        log.trace('Reply: %r', reply)

        ## exception information is returned as a tuple (code, message, stacks)
        if isinstance(reply, tuple) and len(reply) == 3:
            log.error('Proceeding to raise exception with these args: ' +
                      str(reply))
            raise EXCEPTION_FACTORY.create_exception(*reply)
        else:
            return reply
    def _build_parsed_values(self):
        """
        Take the velocity data sample and parse it into values with appropriate tags.
        @throws SampleException If there is a problem with sample creation

        typedef struct {
            unsigned char cSync; // sync = 0xa5
            unsigned char cId; // identification (0x01=normal, 0x80=diag)
            unsigned short hSize; // size of structure (words)
            PdClock clock; // date and time
            short hError; // error code:
            unsigned short hAnaIn1; // analog input 1
            unsigned short hBattery; // battery voltage (0.1 V)
            union {
                unsigned short hSoundSpeed; // speed of sound (0.1 m/s)
                unsigned short hAnaIn2; // analog input 2
            } u;
            short hHeading; // compass heading (0.1 deg)
            short hPitch; // compass pitch (0.1 deg)
            short hRoll; // compass roll (0.1 deg)
            unsigned char cPressureMSB; // pressure MSB
            char cStatus; // status:
            unsigned short hPressureLSW; // pressure LSW
            short hTemperature; // temperature (0.01 deg C)
            short hVel[3]; // velocity
            unsigned char cAmp[3]; // amplitude
            char cFill;
            short hChecksum; // checksum
        } PdMeas;
        """
        try:
            unpack_format = (
                ('sync',            '<4s'),  # cSync, cId, hSize
                ('timestamp',       '6s'),   # PdClock
                ('error',           'H'),    # defined as signed short, but represents bitmap, using unsigned
                ('analog1',         'H'),
                ('battery_voltage', 'H'),
                ('sound_speed',     'H'),
                ('heading',         'h'),
                ('pitch',           'h'),
                ('roll',            'h'),
                ('pressure_msb',    'B'),
                ('status',          'B'),    # defined as char, but represents bitmap, using unsigned
                ('pressure_lsw',    'H'),
                ('temperature',     'h'),
                ('velocity_beam1',  'h'),
                ('velocity_beam2',  'h'),
                ('velocity_beam3',  'h'),
                ('amplitude_beam1', 'B'),
                ('amplitude_beam2', 'B'),
                ('amplitude_beam3', 'B'),
            )

            data = unpack_from_format(self._data_particle_type, unpack_format, self.raw_data)

            if not validate_checksum('<20H', self.raw_data):
                log.warn("Failed checksum in %s from instrument (%r)", self._data_particle_type, self.raw_data)
                self.contents[DataParticleKey.QUALITY_FLAG] = DataParticleValue.CHECKSUM_FAILED

            timestamp = common.convert_time(data.timestamp)
            self.set_internal_timestamp((timestamp-datetime(1900, 1, 1)).total_seconds())

            pressure = data.pressure_msb * 0x10000 + data.pressure_lsw

        except Exception as e:
            log.error('Error creating particle velpt_velocity_data, raw data: %r', self.raw_data)
            raise SampleException(e)

        key = AquadoppVelocityDataParticleKey

        result = [{VID: key.TIMESTAMP, VAL: str(timestamp)},
                  {VID: key.ERROR, VAL: data.error},
                  {VID: key.ANALOG1, VAL: data.analog1},
                  {VID: key.BATTERY_VOLTAGE, VAL: data.battery_voltage},
                  {VID: key.SOUND_SPEED_ANALOG2, VAL: data.sound_speed},
                  {VID: key.HEADING, VAL: data.heading},
                  {VID: key.PITCH, VAL: data.pitch},
                  {VID: key.ROLL, VAL: data.roll},
                  {VID: key.STATUS, VAL: data.status},
                  {VID: key.PRESSURE, VAL: pressure},
                  {VID: key.TEMPERATURE, VAL: data.temperature},
                  {VID: key.VELOCITY_BEAM1, VAL: data.velocity_beam1},
                  {VID: key.VELOCITY_BEAM2, VAL: data.velocity_beam2},
                  {VID: key.VELOCITY_BEAM3, VAL: data.velocity_beam3},
                  {VID: key.AMPLITUDE_BEAM1, VAL: data.amplitude_beam1},
                  {VID: key.AMPLITUDE_BEAM2, VAL: data.amplitude_beam2},
                  {VID: key.AMPLITUDE_BEAM3, VAL: data.amplitude_beam3}]

        return result
Esempio n. 40
0
 def enqueue(self, event):
     try:
         json.dumps(event)
         self._deque.append(event)
     except Exception as e:
         log.error('Unable to encode event as JSON: %r', e)
    def input_is_valid(self):
        """
        This method validates the command line parameters entered.

        :return valid_input:  Boolean indicating whether all the inputs validated.
        """

        valid_input = True

        # Get the configuration parameters.
        zplsc_config_file = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), ZPLSC_CONFIG_FILE)
        zplsc_config = None
        try:
            with open(zplsc_config_file, 'r') as config_file:
                try:
                    zplsc_config = yaml.load(config_file)
                except yaml.YAMLError as ex:
                    log.error('Error loading the configuration file: %s: %s',
                              zplsc_config_file, ex.message)
                    valid_input = False
        except IOError as ex:
            log.error('Error opening configuration file: %s: %s',
                      zplsc_config_file, ex.message)
            valid_input = False

        if valid_input:
            if self.zplsc_datafile is not None and not os.path.isfile(
                    self.zplsc_datafile):
                log.error('Invalid data file: %s', self.zplsc_datafile)

        if valid_input:
            self.zplsc_subsites = zplsc_config['zplsc_subsites']
            self.raw_data_dir = zplsc_config['raw_data_dir']

            # If this is a process run or we are processing all the subsite
            if self.process_mode or self.all_subsites:
                self.subsites = self.zplsc_subsites

            # If we are not generating a 1-hour echogram, validate the subsites in the list.
            if self.zplsc_datafile is None:
                for subsite in self.subsites:
                    if subsite not in self.zplsc_subsites:
                        log.error(
                            'Subsite is not in the list of subsites with ZPLSC instrumentation: %s',
                            subsite)
                        valid_input = False
                        break

        if valid_input and self.deployments:
            if not isinstance(self.deployments, types.ListType):
                self.deployments = [self.deployments]

            for index in range(len(self.deployments)):
                try:
                    self.deployments[index] = int(self.deployments[index])

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s',
                              self.deployments[index], ex.message)
                    valid_input = False
                    break

        if valid_input:
            if self.echogram_dates is False:
                log.error('Invalid echogram date(s)')
                valid_input = False

        if valid_input:
            self.base_echogram_directory = zplsc_config.get(
                'zplsc_echogram_directory', BASE_ECHOGRAM_DIRECTORY)

        return valid_input
    def aggregate_raw_data(self, date_dirs_path, data_date):
        """
        This method will aggregate the 24 1-hour files residing at the path
        passed in for the date passed in.  It will copy the 24 files to a
        temporary directory and then aggregate them and store the single file
        in the temporary directory.  It will return the file name of the
        aggregated file and the file name of the echogram file that will be
        generated.

        Exceptions raised This method will raise an exception if there is an
        issue creating the local ZPLSC Echogram directory.

        :param date_dirs_path: The path of the raw data server where the 24 1-hour files reside.
        :param data_date: The date of the raw data for the echogram to be generated.
        :return: zplsc_24_datafile: The 24-hour concatenated raw data file
                 zplsc_echogram_file_path: The file path for the echogram to be generated.
        """

        zplsc_echogram_file_path = None

        try:
            filenames_list, raw_data_path, raw_datafile_prefix = self.get_data_filenames(
                date_dirs_path, data_date)
        except OSError:
            return '', None

        if len(filenames_list) != 24:
            return '', None

        # Copy the 24 1-hour raw data files to a temporary local directory.
        for raw_data_file in filenames_list:
            remote_raw_data_file = os.path.join(raw_data_path, raw_data_file)
            local_raw_data = os.path.join(self.temp_directory, raw_data_file)
            try:
                shutil.copyfile(remote_raw_data_file, local_raw_data)
            except OSError as ex:
                log.error(
                    'Error copying data file to temporary directory: %s: %s',
                    remote_raw_data_file, ex.message)
                continue

        # Concatenate the 24 1-hour raw data files to 1 24-hour raw data file and return the filename.
        zplsc_24_datafilename = self.zplsc_24_datafile_prefix + raw_datafile_prefix
        zplsc_24_datafile = os.path.join(self.temp_directory,
                                         zplsc_24_datafilename) + RAW_FILE_EXT
        raw_data_glob = os.path.join(raw_data_path,
                                     raw_datafile_prefix) + '*' + RAW_FILE_EXT
        os.system('cat ' + raw_data_glob + ' > ' + zplsc_24_datafile)

        # Generate the ZPLSC Echogram filename.
        echogram_path_idx = string.find(raw_data_path, self.raw_data_dir)
        if echogram_path_idx >= 0:
            base_directory = os.path.expanduser(self.base_echogram_directory)
            path_structure = raw_data_path[echogram_path_idx +
                                           len(self.raw_data_dir) + 1:]
            zplsc_echogram_file_path = os.path.join(base_directory,
                                                    path_structure)

            # Create the ZPLSC Echogram directory structure if it doesn't exist.
            try:
                os.makedirs(zplsc_echogram_file_path)
            except OSError as ex:
                if ex.errno == errno.EEXIST and os.path.isdir(
                        zplsc_echogram_file_path):
                    pass
                else:
                    log.error(
                        'Error creating local ZPLSC Echogram storage directory: %s',
                        ex.message)
                    raise

        return zplsc_24_datafile, zplsc_echogram_file_path
Esempio n. 43
0
    def parse_file(self):
        """
        Parse the *.raw file.
        """

        # Extract the file time from the file name
        input_file_name = self._stream_handle.name
        (filepath, filename) = os.path.split(input_file_name)

        # tuple contains the string before the '.', the '.', and the 'raw' string
        outfile = filename.rpartition('.')[0]

        match = FILE_NAME_MATCHER.match(input_file_name)
        if match:
            file_time = match.group('Date') + match.group('Time')
        else:
            file_time = ""
            # Files retrieved from the instrument should always match the timestamp naming convention
            self.recov_exception_callback("Unable to extract file time from input file name: %s."
                                          "Expected format *-DYYYYmmdd-THHMMSS.raw" % input_file_name)

        # Read binary file a block at a time
        raw = self._stream_handle.read(BLOCK_SIZE)

        # Set starting byte
        byte_cnt = 0

        # Read the configuration datagram, output at the beginning of the file
        length1, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
        byte_cnt += LENGTH_SIZE

        # Configuration datagram header
        datagram_header = read_datagram_header(raw[byte_cnt:byte_cnt+DATAGRAM_HEADER_SIZE])
        byte_cnt += DATAGRAM_HEADER_SIZE

        # Configuration: header
        config_header = read_config_header(raw[byte_cnt:byte_cnt+CONFIG_HEADER_SIZE])
        byte_cnt += CONFIG_HEADER_SIZE

        transducer_count = config_header['transducer_count']

        if GET_CONFIG_TRANSDUCER:
            td_gain = {}
            td_gain_table = {}
            td_pulse_length_table = {}
            td_phi_equiv_beam_angle = {}

            # Configuration: transducers (1 to 7 max)
            for i in xrange(1, transducer_count+1):
                config_transducer = read_config_transducer(
                    raw[byte_cnt:byte_cnt+CONFIG_TRANSDUCER_SIZE])

                # Example data that one might need for various calculations later on
                td_gain[i] = config_transducer['gain']
                td_gain_table[i] = config_transducer['gain_table']
                td_pulse_length_table[i] = config_transducer['pulse_length_table']
                td_phi_equiv_beam_angle[i] = config_transducer['equiv_beam_angle']

        byte_cnt += CONFIG_TRANSDUCER_SIZE * transducer_count

        # Compare length1 (from beginning of datagram) to length2 (from the end of datagram) to
        # the actual number of bytes read. A mismatch can indicate an invalid, corrupt, misaligned,
        # or missing configuration datagram or a reverse byte order binary data file.
        # A bad/missing configuration datagram header is a significant error.
        length2, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
        if not (length1 == length2 == byte_cnt-LENGTH_SIZE):
            raise ValueError(
                "Length of configuration datagram and number of bytes read do not match: length1: %s"
                ", length2: %s, byte_cnt: %s. Possible file corruption or format incompatibility." %
                (length1, length2, byte_cnt+LENGTH_SIZE))

        first_ping_metadata = defaultdict(list)
        trans_keys = range(1, transducer_count+1)
        trans_array = dict((key, []) for key in trans_keys)         # transducer power data
        trans_array_time = dict((key, []) for key in trans_keys)    # transducer time data
        td_f = dict.fromkeys(trans_keys)                            # transducer frequency
        td_dR = dict.fromkeys(trans_keys)                           # transducer depth measurement

        position = 0

        while raw:
            # We only care for the Sample datagrams, skip over all the other datagrams
            match = SAMPLE_MATCHER.search(raw)

            if not match:
                # Read in the next block w/ a token sized overlap
                self._stream_handle.seek(self._stream_handle.tell() - 4)
                raw = self._stream_handle.read(BLOCK_SIZE)

                # The last 4 bytes is just the length2 of the last datagram
                if len(raw) <= 4:
                    break

            # Offset by size of length value
            match_start = match.start() - LENGTH_SIZE

            # Seek to the position of the length data before the token to read into numpy array
            self._stream_handle.seek(position + match_start)

            # Read and unpack the Sample Datagram into numpy array
            sample_data = np.fromfile(self._stream_handle, dtype=sample_dtype, count=1)
            channel = sample_data['channel_number'][0]

            # Check for a valid channel number that is within the number of transducers config
            # to prevent incorrectly indexing into the dictionaries.
            # An out of bounds channel number can indicate invalid, corrupt,
            # or misaligned datagram or a reverse byte order binary data file.
            # Log warning and continue to try and process the rest of the file.
            if channel < 0 or channel > transducer_count:
                log.warn("Invalid channel: %s for transducer count: %s."
                         "Possible file corruption or format incompatibility.", channel, transducer_count)

                # Need current position in file to increment for next regex search offset
                position = self._stream_handle.tell()

                # Read the next block for regex search
                raw = self._stream_handle.read(BLOCK_SIZE)
                continue

            # Convert high and low bytes to internal time
            internal_time = (sample_data['high_date_time'][0] << 32) + sample_data['low_date_time'][0]
            # Note: Strictly sequential time tags are not guaranteed.
            trans_array_time[channel].append(internal_time)

            # Gather metadata once per transducer channel number
            if not trans_array[channel]:
                file_name = self.output_file_path + '/' + outfile + '_' + \
                            str(int(sample_data['frequency'])/1000) + 'k.png'

                first_ping_metadata[ZplscBParticleKey.FILE_TIME] = file_time
                first_ping_metadata[ZplscBParticleKey.FILE_NAME].append(file_name)
                first_ping_metadata[ZplscBParticleKey.CHANNEL].append(channel)
                first_ping_metadata[ZplscBParticleKey.TRANSDUCER_DEPTH].append(sample_data['transducer_depth'][0])
                first_ping_metadata[ZplscBParticleKey.FREQUENCY].append(sample_data['frequency'][0])
                first_ping_metadata[ZplscBParticleKey.TRANSMIT_POWER].append(sample_data['transmit_power'][0])
                first_ping_metadata[ZplscBParticleKey.PULSE_LENGTH].append(sample_data['pulse_length'][0])
                first_ping_metadata[ZplscBParticleKey.BANDWIDTH].append(sample_data['bandwidth'][0])
                first_ping_metadata[ZplscBParticleKey.SAMPLE_INTERVAL].append(sample_data['sample_interval'][0])
                first_ping_metadata[ZplscBParticleKey.SOUND_VELOCITY].append(sample_data['sound_velocity'][0])
                first_ping_metadata[ZplscBParticleKey.ABSORPTION_COEF].append(sample_data['absorption_coefficient'][0])
                first_ping_metadata[ZplscBParticleKey.TEMPERATURE].append(sample_data['temperature'][0])

                # Make only one particle for the first ping series containing data for all channels
                if channel == config_header['transducer_count']:
                    # Convert from Windows time to NTP time.
                    time = datetime(1601, 1, 1) + timedelta(microseconds=internal_time/10.0)
                    year, month, day, hour, min, sec = time.utctimetuple()[:6]
                    unix_time = calendar.timegm((year, month, day, hour, min, sec+(time.microsecond/1e6)))
                    time_stamp = ntplib.system_to_ntp_time(unix_time)

                    # Extract a particle and append it to the record buffer
                    # Note: numpy unpacked values still need to be encoded
                    particle = self._extract_sample(ZplscBInstrumentDataParticle, None,
                                                    first_ping_metadata,
                                                    time_stamp)
                    log.debug('Parsed particle: %s', particle.generate_dict())
                    self._record_buffer.append(particle)

                # Extract various calibration parameters used for generating echogram plot
                # This data doesn't change so extract it once per channel
                td_f[channel] = sample_data['frequency'][0]
                td_dR[channel] = sample_data['sound_velocity'][0] * sample_data['sample_interval'][0] / 2

            count = sample_data['count'][0]

            # Extract array of power data
            power_dtype = np.dtype([('power_data', '<i2')])     # 2 byte int (short)
            power_data = np.fromfile(self._stream_handle, dtype=power_dtype, count=count)

            # Decompress power data to dB
            trans_array[channel].append(power_data['power_data'] * 10. * np.log10(2) / 256.)

            # Read the athwartship and alongship angle measurements
            if sample_data['mode'][0] > 1:
                angle_dtype = np.dtype([('athwart', '<i1'), ('along', '<i1')])     # 1 byte ints
                angle_data = np.fromfile(self._stream_handle, dtype=angle_dtype, count=count)

            # Read and compare length1 (from beginning of datagram) to length2
            # (from the end of datagram). A mismatch can indicate an invalid, corrupt,
            # or misaligned datagram or a reverse byte order binary data file.
            # Log warning and continue to try and process the rest of the file.
            len_dtype = np.dtype([('length2', '<i4')])     # 4 byte int (long)
            length2_data = np.fromfile(self._stream_handle, dtype=len_dtype, count=1)
            if not (sample_data['length1'][0] == length2_data['length2'][0]):
                log.warn("Mismatching beginning and end length values in sample datagram: length1"
                         ": %s, length2: %s. Possible file corruption or format incompatibility."
                         , sample_data['length1'][0], length2_data['length2'][0])

            # Need current position in file to increment for next regex search offset
            position = self._stream_handle.tell()

            # Read the next block for regex search
            raw = self._stream_handle.read(BLOCK_SIZE)

        # Driver spends most of the time plotting,
        # this can take longer for more transducers so lets break out the work
        processes = []
        for channel in td_f.iterkeys():
            try:
                process = Process(target=self.generate_echogram_plot,
                                  args=(trans_array_time[channel], trans_array[channel],
                                        td_f[channel], td_dR[channel], channel,
                                        first_ping_metadata[ZplscBParticleKey.FILE_NAME][channel-1]))
                process.start()
                processes.append(process)

            except Exception, e:
                log.error("Error: Unable to start process: %s", e)
    def generate_zplsc_echograms(self):
        """
        This method will get the subsites, deployments and dates from the
        command line or all of the subsites, deployments and dates for the
        daily process.  It will generate the echograms based on those inputs
        and upload the echograms to the raw data server.

        :return:
        """

        # If we are creating a 1-hour echogram, generate the echogram.
        if self.zplsc_datafile is not None:
            # Send the 1-hour raw data file to the zplsc C Series parser to generate the echogram.
            with open(self.zplsc_datafile) as file_handle:
                base_directory = os.path.expanduser(
                    self.base_echogram_directory)
                path_structure, filename = os.path.split(self.zplsc_datafile)
                zplsc_echogram_file_path = None
                for subsite in self.zplsc_subsites:
                    subsite_index = path_structure.find(subsite)
                    if subsite_index >= 0:
                        zplsc_echogram_file_path = os.path.join(
                            base_directory, path_structure[subsite_index:])
                        # Create the ZPLSC Echogram directory structure if it doesn't exist.
                        try:
                            os.makedirs(zplsc_echogram_file_path)
                        except OSError as ex:
                            if ex.errno == errno.EEXIST and os.path.isdir(
                                    zplsc_echogram_file_path):
                                pass
                            else:
                                log.error(
                                    'Error creating local ZPLSC Echogram storage directory: %s',
                                    ex.message)
                                raise
                        break

                if zplsc_echogram_file_path is not None:
                    # Get the parser for this file and generate the echogram.
                    parser = ZplscCParser(CONFIG, file_handle,
                                          self.rec_exception_callback)
                    parser.create_echogram(zplsc_echogram_file_path)
                else:
                    log.warning(
                        'The subsite is not one of the subsites containing a ZPLSC-C instrument.'
                    )

        else:  # We are creating 24-hour echograms ...
            # Create the temporary data file directory.
            self.temp_directory = os.path.join(os.path.expanduser(USER_HOME),
                                               TEMP_DIR)
            if not os.path.exists(self.temp_directory):
                os.mkdir(self.temp_directory)

            # Create the echograms for the zplsc instruments of each subsite.
            for subsite in self.subsites:
                zplsc_24_subsite_prefix = subsite + '-'

                try:
                    deployments = self.get_deployment_dirs(subsite)
                except OSError:
                    continue

                for deployment in deployments:
                    zplsc_24_deployment_prefix = zplsc_24_subsite_prefix + 'R' + str(
                        deployment) + '-'

                    try:
                        echogram_dates, date_dirs_path = self.get_date_dirs(
                            subsite, deployment)
                    except OSError:
                        continue

                    for date_dir, entire_month in echogram_dates.items():
                        self.zplsc_24_datafile_prefix = zplsc_24_deployment_prefix + 'sn' + self.serial_num + '-'

                        if entire_month:
                            number_of_days_in_the_month = calendar.monthrange(
                                date_dir.year, date_dir.month)[1]
                            for day in range(number_of_days_in_the_month):
                                echogram_date = date_dir + timedelta(days=day)

                                # Aggregate the 24 raw data files for the given instrument to 1 24-hour data file.
                                zplsc_24_datafile, zplsc_echogram_file_path = self.aggregate_raw_data(
                                    date_dirs_path, echogram_date)
                                if not zplsc_24_datafile:
                                    log.warning(
                                        'Unable to aggregate raw data files for %s under %s',
                                        echogram_date, date_dirs_path)
                                    continue

                                # Send the 24-hour raw data file to the zplsc C Series parser to generate the echogram.
                                with open(zplsc_24_datafile) as file_handle:
                                    parser = ZplscCParser(
                                        CONFIG, file_handle,
                                        self.rec_exception_callback)
                                    parser.create_echogram(
                                        zplsc_echogram_file_path)

                                if not self.keep_temp_files:
                                    self.purge_temporary_files()

                        else:
                            # Aggregate the 24 raw data files for the given instrument to 1 24-hour data file.
                            zplsc_24_datafile, zplsc_echogram_file_path = self.aggregate_raw_data(
                                date_dirs_path, date_dir)

                            if not zplsc_24_datafile:
                                log.warning(
                                    'Unable to aggregate raw data files for %s under %s',
                                    date_dir, date_dirs_path)
                                continue

                            # Send the 24-hour raw data file to the zplsc C Series parser to generate the echogram.
                            with open(zplsc_24_datafile) as file_handle:
                                parser = ZplscCParser(
                                    CONFIG, file_handle,
                                    self.rec_exception_callback)
                                parser.create_echogram(
                                    zplsc_echogram_file_path)

                            if not self.keep_temp_files:
                                self.purge_temporary_files()

            # Remove the temporary data file directory and its content.
            if not self.keep_temp_files:
                shutil.rmtree(self.temp_directory)

            # If it's running as a daily process, wait 24 hours and re-run this method
            if self.process_mode:
                threading.Timer(SECONDS_IN_DAY,
                                self.generate_zplsc_echograms).start()
    def _build_parsed_values(self):
        """
        Take the user config data and parse it into
        values with appropriate tags.
        @throws SampleException If there is a problem with sample creation
        """
        try:
            config = UserConfiguration(self.raw_data)
        except Exception as e:
            log.error('Error creating particle user config, raw data: %r', self.raw_data)
            raise SampleException(e)

        fields = [
            UserConfigKey.TX_LENGTH,
            UserConfigKey.BLANK_DIST,
            UserConfigKey.RX_LENGTH,
            UserConfigKey.TIME_BETWEEN_PINGS,
            UserConfigKey.TIME_BETWEEN_BURSTS,
            UserConfigKey.NUM_PINGS,
            UserConfigKey.AVG_INTERVAL,
            UserConfigKey.NUM_BEAMS,
            UserConfigKey.PROFILE_TYPE,
            UserConfigKey.MODE_TYPE,
            UserConfigKey.POWER_TCM1,
            UserConfigKey.POWER_TCM2,
            UserConfigKey.SYNC_OUT_POSITION,
            UserConfigKey.SAMPLE_ON_SYNC,
            UserConfigKey.START_ON_SYNC,
            UserConfigKey.POWER_PCR1,
            UserConfigKey.POWER_PCR2,
            UserConfigKey.COMPASS_UPDATE_RATE,
            UserConfigKey.COORDINATE_SYSTEM,
            UserConfigKey.NUM_CELLS,
            UserConfigKey.CELL_SIZE,
            UserConfigKey.MEASUREMENT_INTERVAL,
            UserConfigKey.DEPLOYMENT_NAME,
            UserConfigKey.WRAP_MODE,
            UserConfigCompositeKey.DEPLOY_START_TIME,
            UserConfigCompositeKey.DIAG_INTERVAL,
            UserConfigKey.USE_SPEC_SOUND_SPEED,
            UserConfigKey.DIAG_MODE_ON,
            UserConfigKey.ANALOG_OUTPUT_ON,
            UserConfigKey.OUTPUT_FORMAT,
            UserConfigKey.SCALING,
            UserConfigKey.SERIAL_OUT_ON,
            UserConfigKey.STAGE_ON,
            UserConfigKey.ANALOG_POWER_OUTPUT,
            UserConfigKey.SOUND_SPEED_ADJUST,
            UserConfigKey.NUM_DIAG_SAMPLES,
            UserConfigKey.NUM_BEAMS_PER_CELL,
            UserConfigKey.NUM_PINGS_DIAG,
            UserConfigKey.USE_DSP_FILTER,
            UserConfigKey.FILTER_DATA_OUTPUT,
            UserConfigKey.ANALOG_INPUT_ADDR,
            UserConfigKey.SW_VER,
            UserConfigCompositeKey.VELOCITY_ADJ_FACTOR,
            UserConfigKey.FILE_COMMENTS,
            UserConfigKey.WAVE_DATA_RATE,
            UserConfigKey.WAVE_CELL_POS,
            UserConfigKey.DYNAMIC_POS_TYPE,
            UserConfigKey.PERCENT_WAVE_CELL_POS,
            UserConfigKey.WAVE_TX_PULSE,
            UserConfigKey.FIX_WAVE_BLANK_DIST,
            UserConfigKey.WAVE_CELL_SIZE,
            UserConfigKey.NUM_DIAG_PER_WAVE,
            UserConfigKey.NUM_SAMPLE_PER_BURST,
            UserConfigKey.ANALOG_SCALE_FACTOR,
            UserConfigKey.CORRELATION_THRS,
            UserConfigKey.TX_PULSE_LEN_2ND,
            UserConfigCompositeKey.FILTER_CONSTANTS,
            ]

        result = [{VID: field, VAL: getattr(config, field)} for field in fields]

        log.debug('NortekUserConfigDataParticle: particle=%r', result)
        return result
Esempio n. 46
0
    def parse_file(self):
        """
        Parse the *.raw file.
        """

        # Extract the file time from the file name
        input_file_name = self._stream_handle.name
        (filepath, filename) = os.path.split(input_file_name)

        # tuple contains the string before the '.', the '.', and the 'raw' string
        outfile = filename.rpartition('.')[0]

        match = FILE_NAME_MATCHER.match(input_file_name)
        if match:
            file_time = match.group('Date') + match.group('Time')
            rel_file_path = os.path.join(*match.groups()[1:-1])
            full_file_path = os.path.join(self.output_file_path, rel_file_path)
            if not os.path.exists(full_file_path):
                os.makedirs(full_file_path)
        else:
            file_time = ""
            rel_file_path = ""
            # Files retrieved from the instrument should always match the timestamp naming convention
            self.recov_exception_callback("Unable to extract file time from input file name: %s."
                                          "Expected format *-DYYYYmmdd-THHMMSS.raw" % input_file_name)

        # Read binary file a block at a time
        raw = self._stream_handle.read(BLOCK_SIZE)

        # Set starting byte
        byte_cnt = 0

        # Read the configuration datagram, output at the beginning of the file
        length1, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
        byte_cnt += LENGTH_SIZE

        # Configuration datagram header
        datagram_header = read_datagram_header(raw[byte_cnt:byte_cnt+DATAGRAM_HEADER_SIZE])
        byte_cnt += DATAGRAM_HEADER_SIZE

        # Configuration: header
        config_header = read_config_header(raw[byte_cnt:byte_cnt+CONFIG_HEADER_SIZE])
        byte_cnt += CONFIG_HEADER_SIZE

        transducer_count = config_header['transducer_count']

        if GET_CONFIG_TRANSDUCER:
            td_gain = {}
            td_gain_table = {}
            td_pulse_length_table = {}
            td_phi_equiv_beam_angle = {}

            # Configuration: transducers (1 to 7 max)
            for i in xrange(1, transducer_count+1):
                config_transducer = read_config_transducer(
                    raw[byte_cnt:byte_cnt+CONFIG_TRANSDUCER_SIZE])

                # Example data that one might need for various calculations later on
                td_gain[i] = config_transducer['gain']
                td_gain_table[i] = config_transducer['gain_table']
                td_pulse_length_table[i] = config_transducer['pulse_length_table']
                td_phi_equiv_beam_angle[i] = config_transducer['equiv_beam_angle']

        byte_cnt += CONFIG_TRANSDUCER_SIZE * transducer_count

        # Compare length1 (from beginning of datagram) to length2 (from the end of datagram) to
        # the actual number of bytes read. A mismatch can indicate an invalid, corrupt, misaligned,
        # or missing configuration datagram or a reverse byte order binary data file.
        # A bad/missing configuration datagram header is a significant error.
        length2, = unpack('<l', raw[byte_cnt:byte_cnt+LENGTH_SIZE])
        if not (length1 == length2 == byte_cnt-LENGTH_SIZE):
            raise ValueError(
                "Length of configuration datagram and number of bytes read do not match: length1: %s"
                ", length2: %s, byte_cnt: %s. Possible file corruption or format incompatibility." %
                (length1, length2, byte_cnt+LENGTH_SIZE))

        first_ping_metadata = defaultdict(list)
        trans_keys = range(1, transducer_count+1)
        trans_array = dict((key, []) for key in trans_keys)         # transducer power data
        trans_array_time = dict((key, []) for key in trans_keys)    # transducer time data
        td_f = dict.fromkeys(trans_keys)                            # transducer frequency
        td_dR = dict.fromkeys(trans_keys)                           # transducer depth measurement

        position = 0

        while raw:
            # We only care for the Sample datagrams, skip over all the other datagrams
            match = SAMPLE_MATCHER.search(raw)

            if not match:
                # Read in the next block w/ a token sized overlap
                self._stream_handle.seek(self._stream_handle.tell() - 4)
                raw = self._stream_handle.read(BLOCK_SIZE)

                # The last 4 bytes is just the length2 of the last datagram
                if len(raw) <= 4:
                    break

            # Offset by size of length value
            match_start = match.start() - LENGTH_SIZE

            # Seek to the position of the length data before the token to read into numpy array
            self._stream_handle.seek(position + match_start)

            # Read and unpack the Sample Datagram into numpy array
            sample_data = np.fromfile(self._stream_handle, dtype=sample_dtype, count=1)
            channel = sample_data['channel_number'][0]

            # Check for a valid channel number that is within the number of transducers config
            # to prevent incorrectly indexing into the dictionaries.
            # An out of bounds channel number can indicate invalid, corrupt,
            # or misaligned datagram or a reverse byte order binary data file.
            # Log warning and continue to try and process the rest of the file.
            if channel < 0 or channel > transducer_count:
                log.warn("Invalid channel: %s for transducer count: %s."
                         "Possible file corruption or format incompatibility.", channel, transducer_count)

                # Need current position in file to increment for next regex search offset
                position = self._stream_handle.tell()

                # Read the next block for regex search
                raw = self._stream_handle.read(BLOCK_SIZE)
                continue

            # Convert high and low bytes to internal time
            internal_time = (sample_data['high_date_time'][0] << 32) + sample_data['low_date_time'][0]
            # Note: Strictly sequential time tags are not guaranteed.
            trans_array_time[channel].append(internal_time)

            # Gather metadata once per transducer channel number
            if not trans_array[channel]:
                file_path = os.path.join(
                    rel_file_path, outfile + '_' + str(int(sample_data['frequency'])/1000) + 'k.png')

                first_ping_metadata[ZplscBParticleKey.FILE_TIME] = file_time
                first_ping_metadata[ZplscBParticleKey.FILE_PATH].append(file_path)
                first_ping_metadata[ZplscBParticleKey.CHANNEL].append(channel)
                first_ping_metadata[ZplscBParticleKey.TRANSDUCER_DEPTH].append(sample_data['transducer_depth'][0])
                first_ping_metadata[ZplscBParticleKey.FREQUENCY].append(sample_data['frequency'][0])
                first_ping_metadata[ZplscBParticleKey.TRANSMIT_POWER].append(sample_data['transmit_power'][0])
                first_ping_metadata[ZplscBParticleKey.PULSE_LENGTH].append(sample_data['pulse_length'][0])
                first_ping_metadata[ZplscBParticleKey.BANDWIDTH].append(sample_data['bandwidth'][0])
                first_ping_metadata[ZplscBParticleKey.SAMPLE_INTERVAL].append(sample_data['sample_interval'][0])
                first_ping_metadata[ZplscBParticleKey.SOUND_VELOCITY].append(sample_data['sound_velocity'][0])
                first_ping_metadata[ZplscBParticleKey.ABSORPTION_COEF].append(sample_data['absorption_coefficient'][0])
                first_ping_metadata[ZplscBParticleKey.TEMPERATURE].append(sample_data['temperature'][0])

                # Make only one particle for the first ping series containing data for all channels
                if channel == config_header['transducer_count']:
                    # Convert from Windows time to NTP time.
                    time = datetime(1601, 1, 1) + timedelta(microseconds=internal_time/10.0)
                    year, month, day, hour, min, sec = time.utctimetuple()[:6]
                    unix_time = calendar.timegm((year, month, day, hour, min, sec+(time.microsecond/1e6)))
                    time_stamp = ntplib.system_to_ntp_time(unix_time)

                    # Extract a particle and append it to the record buffer
                    # Note: numpy unpacked values still need to be encoded
                    particle = self._extract_sample(ZplscBInstrumentDataParticle, None,
                                                    first_ping_metadata,
                                                    time_stamp)
                    log.debug('Parsed particle: %s', particle.generate_dict())
                    self._record_buffer.append(particle)

                # Extract various calibration parameters used for generating echogram plot
                # This data doesn't change so extract it once per channel
                td_f[channel] = sample_data['frequency'][0]
                td_dR[channel] = sample_data['sound_velocity'][0] * sample_data['sample_interval'][0] / 2

            count = sample_data['count'][0]

            # Extract array of power data
            power_dtype = np.dtype([('power_data', '<i2')])     # 2 byte int (short)
            power_data = np.fromfile(self._stream_handle, dtype=power_dtype, count=count)

            # Decompress power data to dB
            trans_array[channel].append(power_data['power_data'] * 10. * np.log10(2) / 256.)

            # Read the athwartship and alongship angle measurements
            if sample_data['mode'][0] > 1:
                angle_dtype = np.dtype([('athwart', '<i1'), ('along', '<i1')])     # 1 byte ints
                angle_data = np.fromfile(self._stream_handle, dtype=angle_dtype, count=count)

            # Read and compare length1 (from beginning of datagram) to length2
            # (from the end of datagram). A mismatch can indicate an invalid, corrupt,
            # or misaligned datagram or a reverse byte order binary data file.
            # Log warning and continue to try and process the rest of the file.
            len_dtype = np.dtype([('length2', '<i4')])     # 4 byte int (long)
            length2_data = np.fromfile(self._stream_handle, dtype=len_dtype, count=1)
            if not (sample_data['length1'][0] == length2_data['length2'][0]):
                log.warn("Mismatching beginning and end length values in sample datagram: length1"
                         ": %s, length2: %s. Possible file corruption or format incompatibility."
                         , sample_data['length1'][0], length2_data['length2'][0])

            # Need current position in file to increment for next regex search offset
            position = self._stream_handle.tell()

            # Read the next block for regex search
            raw = self._stream_handle.read(BLOCK_SIZE)

        # Driver spends most of the time plotting,
        # this can take longer for more transducers so lets break out the work
        processes = []
        for channel in td_f.iterkeys():
            try:
                process = Process(target=self.generate_echogram_plot,
                                  args=(trans_array_time[channel], trans_array[channel],
                                        td_f[channel], td_dR[channel], channel,
                                        os.path.join(
                                            self.output_file_path,
                                            first_ping_metadata[ZplscBParticleKey.FILE_PATH][channel-1])))
                process.start()
                processes.append(process)

            except Exception, e:
                log.error("Error: Unable to start process: %s", e)
    def generate_zplsc_echograms(self):
        """
        This method will get the subsites, deployments and dates from the
        command line or all of the subsites, deployments and dates for the
        daily process.  It will generate the echograms based on those inputs
        and upload the echograms to the raw data server.

        :return:
        """

        # If we are creating a 1-hour echogram, generate the echogram.
        if self.zplsc_datafile is not None:
            # Send the 1-hour raw data file to the zplsc C Series parser to generate the echogram.
            with open(self.zplsc_datafile) as file_handle:
                base_directory = os.path.expanduser(self.base_echogram_directory)
                path_structure, filename = os.path.split(self.zplsc_datafile)
                zplsc_echogram_file_path = None
                for subsite in self.zplsc_subsites:
                    subsite_index = path_structure.find(subsite)
                    if subsite_index >= 0:
                        zplsc_echogram_file_path = os.path.join(base_directory, path_structure[subsite_index:])
                        # Create the ZPLSC Echogram directory structure if it doesn't exist.
                        try:
                            os.makedirs(zplsc_echogram_file_path)
                        except OSError as ex:
                            if ex.errno == errno.EEXIST and os.path.isdir(zplsc_echogram_file_path):
                                pass
                            else:
                                log.error('Error creating local ZPLSC Echogram storage directory: %s', ex.message)
                                raise
                        break

                if zplsc_echogram_file_path is not None:
                    # Get the parser for this file and generate the echogram.
                    parser = ZplscCParser(CONFIG, file_handle, self.rec_exception_callback)
                    parser.create_echogram(zplsc_echogram_file_path)
                else:
                    log.warning('The subsite is not one of the subsites containing a ZPLSC-C instrument.')

        else:  # We are creating 24-hour echograms ...
            # Create the temporary data file directory.
            self.temp_directory = os.path.join(os.path.expanduser(USER_HOME), TEMP_DIR)
            if not os.path.exists(self.temp_directory):
                os.mkdir(self.temp_directory)

            # Create the echograms for the zplsc instruments of each subsite.
            for subsite in self.subsites:
                zplsc_24_subsite_prefix = subsite + '-'

                try:
                    deployments = self.get_deployment_dirs(subsite)
                except OSError:
                    continue

                for deployment in deployments:
                    zplsc_24_deployment_prefix = zplsc_24_subsite_prefix + 'R' + str(deployment) + '-'

                    try:
                        echogram_dates, date_dirs_path = self.get_date_dirs(subsite, deployment)
                    except OSError:
                        continue

                    for date_dir, entire_month in echogram_dates.items():
                        self.zplsc_24_datafile_prefix = zplsc_24_deployment_prefix + 'sn' + self.serial_num + '-'

                        if entire_month:
                            number_of_days_in_the_month = calendar.monthrange(date_dir.year, date_dir.month)[1]
                            for day in range(number_of_days_in_the_month):
                                echogram_date = date_dir + timedelta(days=day)

                                # Aggregate the 24 raw data files for the given instrument to 1 24-hour data file.
                                zplsc_24_datafile, zplsc_echogram_file_path = self.aggregate_raw_data(date_dirs_path,
                                                                                                      echogram_date)
                                if not zplsc_24_datafile:
                                    log.warning('Unable to aggregate raw data files for %s under %s',
                                                echogram_date, date_dirs_path)
                                    continue

                                # Send the 24-hour raw data file to the zplsc C Series parser to generate the echogram.
                                with open(zplsc_24_datafile) as file_handle:
                                    parser = ZplscCParser(CONFIG, file_handle, self.rec_exception_callback)
                                    parser.create_echogram(zplsc_echogram_file_path)

                                if not self.keep_temp_files:
                                    self.purge_temporary_files()

                        else:
                            # Aggregate the 24 raw data files for the given instrument to 1 24-hour data file.
                            zplsc_24_datafile, zplsc_echogram_file_path = self.aggregate_raw_data(date_dirs_path, date_dir)

                            if not zplsc_24_datafile:
                                log.warning('Unable to aggregate raw data files for %s under %s', date_dir, date_dirs_path)
                                continue

                            # Send the 24-hour raw data file to the zplsc C Series parser to generate the echogram.
                            with open(zplsc_24_datafile) as file_handle:
                                parser = ZplscCParser(CONFIG, file_handle, self.rec_exception_callback)
                                parser.create_echogram(zplsc_echogram_file_path)

                            if not self.keep_temp_files:
                                self.purge_temporary_files()

            # Remove the temporary data file directory and its content.
            if not self.keep_temp_files:
                shutil.rmtree(self.temp_directory)

            # If it's running as a daily process, wait 24 hours and re-run this method
            if self.process_mode:
                threading.Timer(SECONDS_IN_DAY, self.generate_zplsc_echograms).start()
Esempio n. 48
0
                raise SystemError('exception writing to zmq socket: ' + str(e))
            
        log.trace('Awaiting reply.')
        start_reply = time.time()
        while True:
            try:
                # Attempt reply recv. Retry if necessary.
                reply = self.zmq_cmd_socket.recv_pyobj(flags=zmq.NOBLOCK)
                # Reply recieved, break and return.
                break
            except zmq.ZMQError:
                # Socket not ready with the reply. Sleep and retry later.
                time.sleep(.5)
                delta = time.time() - start_reply
                if delta >= driver_timeout:
                    raise InstDriverClientTimeoutError()

            except Exception,e:
                log.error('Driver client error reading from zmq socket: ' + str(e))
                log.error('Driver client error type: ' + str(type(e)))
                raise SystemError('exception reading from zmq socket: ' + str(e))
                
        log.trace('Reply: %r', reply)

        ## exception information is returned as a tuple (code, message, stacks)
        if isinstance(reply, tuple) and len(reply)==3:
            log.error('Proceeding to raise exception with these args: ' + str(reply))
            raise EXCEPTION_FACTORY.create_exception(*reply)
        else:
            return reply
Esempio n. 49
0
 def enqueue(self, event):
     try:
         json.dumps(event)
         self._deque.append(event)
     except Exception as e:
         log.error('Unable to encode event as JSON: %r', e)
    def input_is_valid(self):
        """
        This method validates the command line parameters entered.

        :return valid_input:  Boolean indicating whether all the inputs validated.
        """

        valid_input = True

        # Get the configuration parameters.
        zplsc_config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), ZPLSC_CONFIG_FILE)
        zplsc_config = None
        try:
            with open(zplsc_config_file, 'r') as config_file:
                try:
                    zplsc_config = yaml.load(config_file)
                except yaml.YAMLError as ex:
                    log.error('Error loading the configuration file: %s: %s', zplsc_config_file, ex.message)
                    valid_input = False
        except IOError as ex:
            log.error('Error opening configuration file: %s: %s', zplsc_config_file, ex.message)
            valid_input = False

        if valid_input:
            if self.zplsc_datafile is not None and not os.path.isfile(self.zplsc_datafile):
                    log.error('Invalid data file: %s', self.zplsc_datafile)

        if valid_input:
            self.zplsc_subsites = zplsc_config['zplsc_subsites']
            self.raw_data_dir = zplsc_config['raw_data_dir']

            # If this is a process run or we are processing all the subsite
            if self.process_mode or self.all_subsites:
                self.subsites = self.zplsc_subsites

            # If we are not generating a 1-hour echogram, validate the subsites in the list.
            if self.zplsc_datafile is None:
                for subsite in self.subsites:
                    if subsite not in self.zplsc_subsites:
                        log.error('Subsite is not in the list of subsites with ZPLSC instrumentation: %s', subsite)
                        valid_input = False
                        break

        if valid_input and self.deployments:
            if not isinstance(self.deployments, types.ListType):
                self.deployments = [self.deployments]

            for index in range(len(self.deployments)):
                try:
                    self.deployments[index] = int(self.deployments[index])

                except ValueError as ex:
                    log.error('Invalid deployment number: %s: %s', self.deployments[index], ex.message)
                    valid_input = False
                    break

        if valid_input:
            if self.echogram_dates is False:
                log.error('Invalid echogram date(s)')
                valid_input = False

        if valid_input:
            self.base_echogram_directory = zplsc_config.get('zplsc_echogram_directory', BASE_ECHOGRAM_DIRECTORY)

        return valid_input