def we_split_function(self, raw_data):
        """
        Sort through the raw data to identify new blocks of data that need processing.
        This is needed instead of a regex because blocks are identified by position
        in this binary file.
        """
	
        form_list = []
        raw_data_len = len(raw_data)
	
	"""
	Ok, there is a new issue with parsing these records.  The Status messages
	can have an optional 2 bytes on the end, and since the rest of the data
	is relatively unformated packed binary records, detecting the presence of
	that optional 2 bytes can be difficult.  The only pattern we have to detect
	is the STATUS_START field ( 4 bytes FF FF FF F[A-F] ).  So, we peel this
	appart be parsing backwards, using the end-of-record as an additional anchor
	point.
	"""
	
	# '-1' to remove the '\x03' end-of-record marker
	parse_end_point = raw_data_len - 1
        while parse_end_point > 0:
	    
	    # look for a status message at postulated message header position
	    
	    header_start = STATUS_BYTES_AUGMENTED
	    
	    # look for an augmented status
            if STATUS_START_MATCHER.match(raw_data[parse_end_point-STATUS_BYTES_AUGMENTED:parse_end_point]):
		# A hit for the status message at the augmented offset
		# NOTE, we don't need the status messages, so we drop them on the floor here
		# and only deliver a stream of samples to build_parse_values
                parse_end_point = parse_end_point-STATUS_BYTES_AUGMENTED 
		
            # check if this is a unaugmented status
            elif STATUS_START_MATCHER.match(raw_data[parse_end_point-STATUS_BYTES:parse_end_point]):
		# A hit for the status message at the unaugmented offset
		# NOTE: same as above
                parse_end_point = parse_end_point-STATUS_BYTES
		
            else:
		# assume if not a stat that hit above, we have a sample.  If we missparse, we
		# will end up with extra bytes when we finish, and sample_except at that point.
                form_list.append((parse_end_point-E_GLOBAL_SAMPLE_BYTES, parse_end_point))
                parse_end_point = parse_end_point-E_GLOBAL_SAMPLE_BYTES
 
            # if the remaining bytes are less than the data sample bytes, all we might have left is a status sample, if we don't we're done
            if parse_end_point != 0 and parse_end_point < STATUS_BYTES and parse_end_point < E_GLOBAL_SAMPLE_BYTES  and parse_end_point < STATUS_BYTES_AUGMENTED:
		self._exception_callback(UnexpectedDataException("Error sieving WE data, inferred sample/status alignment incorrect"))
		return_list = []
		return return_list
	    
	# since we parsed this backwards, we need to reverse to list to deliver the data in the correct order    
	return_list = form_list[::-1]    
        log.debug("returning we sieve list %s", return_list)
        return return_list
Пример #2
0
    def sieve_function(self, raw_data):
        """
        This method sorts through the raw data to identify new blocks of data that need
        processing.  This is needed instead of a regex because blocks are identified by
        position in this binary file.
        """
        form_list = []
        raw_data_len = len(raw_data)

        # Starting from the end of the buffer and working backwards
        parse_end_point = raw_data_len

        # We are going to go through the file data in reverse order since we have a
        # variable length status indicator field.
        # While we do not hit the beginning of the file contents, continue
        while parse_end_point > 0:

            # Create the different start indices for the three different scenarios
            raw_data_start_index_augmented = parse_end_point - STATUS_BYTES_AUGMENTED
            raw_data_start_index_normal = parse_end_point - STATUS_BYTES
            global_recovered_eng_rec_index = parse_end_point - WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_BYTES

            # Check for an an augmented status first
            if raw_data_start_index_augmented >= 0 and \
                    STATUS_START_MATCHER.match(raw_data[raw_data_start_index_augmented:parse_end_point]):
                log.trace("Found OffloadProfileData with decimation factor")
                parse_end_point = raw_data_start_index_augmented

            # Check for a normal status
            elif raw_data_start_index_normal >= 0 and \
                    STATUS_START_MATCHER.match(raw_data[raw_data_start_index_normal:parse_end_point]):
                log.trace("Found OffloadProfileData without decimation factor")
                parse_end_point = raw_data_start_index_normal

            # If neither, we are dealing with a global wfp_sio e recovered engineering data record,
            # so we will save the start and end points
            elif global_recovered_eng_rec_index >= 0:
                log.trace("Found OffloadEngineeringData")
                form_list.append(
                    (global_recovered_eng_rec_index, parse_end_point))
                parse_end_point = global_recovered_eng_rec_index

            # We must not have a good file, log some debug info for now
            else:
                log.debug("raw_data_start_index_augmented %d",
                          raw_data_start_index_augmented)
                log.debug("raw_data_start_index_normal %d",
                          raw_data_start_index_normal)
                log.debug("global_recovered_eng_rec_index %d",
                          global_recovered_eng_rec_index)
                log.debug("bad file or bad position?")
                raise SampleException(
                    "File size is invalid or improper positioning")

        return_list = form_list[::-1]

        return return_list
Пример #3
0
    def we_split_function(self, raw_data):
        """
        Sort through the raw data to identify new blocks of data that need processing.
        """
        form_list = []
        """
        The Status messages can have an optional 2 bytes on the end, and since the
        rest of the data consists of relatively unformated packed binary records,
        detecting the presence of that optional 2 bytes can be difficult. The only
        pattern we have to detect is the STATUS_START field ( 4 bytes FF FF FF F[A-F]).
        We peel this appart by parsing backwards, using the end-of-record as an
        additional anchor point.
        """
        parse_end_point = len(raw_data)
        while parse_end_point > 0:

            # look for a status message at postulated message header position

            header_start = STATUS_BYTES_AUGMENTED
            # look for an augmented status
            if STATUS_START_MATCHER.match(
                    raw_data[parse_end_point -
                             STATUS_BYTES_AUGMENTED:parse_end_point]):
                # A hit for the status message at the augmented offset
                # NOTE, we don't need the status messages and only deliver a stream of
                # samples to build_parsed_values
                parse_end_point = parse_end_point - STATUS_BYTES_AUGMENTED

                # check if this is an unaugmented status
            elif STATUS_START_MATCHER.match(
                    raw_data[parse_end_point - STATUS_BYTES:parse_end_point]):
                # A hit for the status message at the unaugmented offset
                # NOTE: same as above
                parse_end_point = parse_end_point - STATUS_BYTES
            else:
                # assume if not a stat that hit above, we have a sample. Mis-parsing will result
                # in extra bytes at the end and a sample exception.
                form_list.append(
                    (parse_end_point - E_GLOBAL_SAMPLE_BYTES, parse_end_point))
                parse_end_point = parse_end_point - E_GLOBAL_SAMPLE_BYTES

            # if the remaining bytes are less than data sample bytes, all we might have left is a status sample
            if parse_end_point != 0 and parse_end_point < STATUS_BYTES \
                    and parse_end_point < E_GLOBAL_SAMPLE_BYTES  \
                    and parse_end_point < STATUS_BYTES_AUGMENTED:
                self._exception_callback(
                    UnexpectedDataException(
                        "Error sieving WE data, inferred sample/status alignment incorrect"
                    ))
                return_list = []
                return return_list

        # Because we parsed this backwards, we need to reverse the list to deliver the data in the correct order
        return_list = form_list[::-1]
        log.debug("returning we sieve/split list %s", return_list)
        return return_list
Пример #4
0
    def we_split_function(self, raw_data):
        """
        Sort through the raw data to identify new blocks of data that need processing.
        """
        form_list = []

        """
        The Status messages can have an optional 2 bytes on the end, and since the
        rest of the data consists of relatively unformated packed binary records,
        detecting the presence of that optional 2 bytes can be difficult. The only
        pattern we have to detect is the STATUS_START field ( 4 bytes FF FF FF F[A-F]).
        We peel this appart by parsing backwards, using the end-of-record as an
        additional anchor point.
        """
        parse_end_point = len(raw_data)
        while parse_end_point > 0:

            # look for a status message at postulated message header position

            header_start = STATUS_BYTES_AUGMENTED
            # look for an augmented status
            if STATUS_START_MATCHER.match(raw_data[parse_end_point - STATUS_BYTES_AUGMENTED : parse_end_point]):
                # A hit for the status message at the augmented offset
                # NOTE, we don't need the status messages and only deliver a stream of
                # samples to build_parsed_values
                parse_end_point = parse_end_point - STATUS_BYTES_AUGMENTED

                # check if this is an unaugmented status
            elif STATUS_START_MATCHER.match(raw_data[parse_end_point - STATUS_BYTES : parse_end_point]):
                # A hit for the status message at the unaugmented offset
                # NOTE: same as above
                parse_end_point = parse_end_point - STATUS_BYTES
            else:
                # assume if not a stat that hit above, we have a sample. Mis-parsing will result
                # in extra bytes at the end and a sample exception.
                form_list.append((parse_end_point - E_GLOBAL_SAMPLE_BYTES, parse_end_point))
                parse_end_point = parse_end_point - E_GLOBAL_SAMPLE_BYTES

            # if the remaining bytes are less than data sample bytes, all we might have left is a status sample
            if (
                parse_end_point != 0
                and parse_end_point < STATUS_BYTES
                and parse_end_point < E_GLOBAL_SAMPLE_BYTES
                and parse_end_point < STATUS_BYTES_AUGMENTED
            ):
                self._exception_callback(
                    UnexpectedDataException("Error sieving WE data, inferred sample/status alignment incorrect")
                )
                return_list = []
                return return_list

        # Because we parsed this backwards, we need to reverse the list to deliver the data in the correct order
        return_list = form_list[::-1]
        log.debug("returning we sieve/split list %s", return_list)
        return return_list
Пример #5
0
    def sieve_function(self, raw_data):
        """
        This method sorts through the raw data to identify new blocks of data that need
        processing.  This is needed instead of a regex because blocks are identified by
        position in this binary file.
        """
        form_list = []
        raw_data_len = len(raw_data)

        # Starting from the end of the buffer and working backwards
        parse_end_point = raw_data_len

        # We are going to go through the file data in reverse order since we have a
        # variable length status indicator field.
        # While we do not hit the beginning of the file contents, continue
        while parse_end_point > 0:

            # Create the different start indices for the three different scenarios
            raw_data_start_index_augmented = parse_end_point-STATUS_BYTES_AUGMENTED
            raw_data_start_index_normal = parse_end_point-STATUS_BYTES
            global_recovered_eng_rec_index = parse_end_point-WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_BYTES

            # Check for an an augmented status first
            if raw_data_start_index_augmented >= 0 and \
                    STATUS_START_MATCHER.match(raw_data[raw_data_start_index_augmented:parse_end_point]):
                log.trace("Found OffloadProfileData with decimation factor")
                parse_end_point = raw_data_start_index_augmented

            # Check for a normal status
            elif raw_data_start_index_normal >= 0 and \
                    STATUS_START_MATCHER.match(raw_data[raw_data_start_index_normal:parse_end_point]):
                log.trace("Found OffloadProfileData without decimation factor")
                parse_end_point = raw_data_start_index_normal

            # If neither, we are dealing with a global wfp_sio e recovered engineering data record,
            # so we will save the start and end points
            elif global_recovered_eng_rec_index >= 0:
                log.trace("Found OffloadEngineeringData")
                form_list.append((global_recovered_eng_rec_index, parse_end_point))
                parse_end_point = global_recovered_eng_rec_index

            # We must not have a good file, log some debug info for now
            else:
                log.debug("raw_data_start_index_augmented %d", raw_data_start_index_augmented)
                log.debug("raw_data_start_index_normal %d", raw_data_start_index_normal)
                log.debug("global_recovered_eng_rec_index %d", global_recovered_eng_rec_index)
                log.debug("bad file or bad position?")
                raise SampleException("File size is invalid or improper positioning")

        return_list = form_list[::-1]

        return return_list
Пример #6
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
        parsing, plus the state. An empty list of nothing was parsed.
        """

        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()
        
        while (chunk != None):   
            # Parse/match the SIO header
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)
	    end_of_header = sio_header_match.end(0) 
                
            sample_count = 0       
            if sio_header_match.group(1) == 'WE':
                log.trace('read_state: %s', self._read_state)
                        
                # Parse/match the E file header     
		e_header_match = E_HEADER_MATCHER.search(chunk[end_of_header:end_of_header+HEADER_BYTES])
                
                if e_header_match:
		    payload = chunk[end_of_header+HEADER_BYTES:-1] # '-1' to remove the '\x03' end-of-record marker
		    data_split = self.we_split_function(payload)
                    if data_split:
			for ii in range(0,len(data_split)):    
			    e_record = payload[data_split[ii][0]:data_split[ii][1]]
			    
                            if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):				    
                                fields = struct.unpack('>I', e_record[0:4])
                                self._timestamp = ntplib.system_to_ntp_time(float(fields[0]))
			    
                                if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                                    sample = self._extract_sample(FlordLWfpParserDataParticle,
			                                      None,
			                                      e_record,
			                                      self._timestamp)
				    if sample:
					# create particle
					result_particles.append(sample)
					sample_count += 1
				else:
				    self._exception_callback(UnexpectedDataException("Found unexpected data."))
                                    
                else: # no e header match
                    self._exception_callback(UnexpectedDataException("Found unexpected data."))

            self._chunk_sample_count.append(sample_count)    
            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
Пример #7
0
    def _process_we_record(self, payload):

        indices_list = self.we_split_function(payload)
        for indices in indices_list:
            e_record = payload[indices[0] : indices[1]]

            if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):
                fields = struct.unpack(">I", e_record[0:4])
                self._timestamp = ntplib.system_to_ntp_time(float(fields[0]))

                sample = self._extract_sample(FlordLWfpSioDataParticle, None, e_record, self._timestamp)
                if sample:
                    # create particle
                    self._result_particles.append(sample)
Пример #8
0
    def _process_we_record(self, payload):

        indices_list = self.we_split_function(payload)
        for indices in indices_list:
            e_record = payload[indices[0]:indices[1]]

            if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):
                fields = struct.unpack('>I', e_record[0:4])
                self._timestamp = ntplib.system_to_ntp_time(float(fields[0]))

                sample = self._extract_sample(FlordLWfpSioDataParticle, None,
                                              e_record, self._timestamp)
                if sample:
                    # create particle
                    self._result_particles.append(sample)
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """            
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while (chunk != None):
	    sio_header_match = SIO_HEADER_MATCHER.match(chunk)
	    
            sample_count = 0
            log.debug('parsing header %s', sio_header_match.group(0)[1:SIO_HEADER_BYTES])
	    
            if sio_header_match.group(1) == 'WE':
                log.trace("********************************matched chunk header %s", chunk[0:SIO_HEADER_BYTES])
	    
                # Parse/match the E file header
                e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1])
		
                if e_header_match:
		    
		    log.debug('******************************* HEADER MATCH WAS:')
		    log.debug('%s', ":".join("{:02x}".format(ord(c)) for c in chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1]))				   
		    payload = chunk[SIO_HEADER_BYTES+HEADER_BYTES+1:]
		     
                    data_split = self.we_split_function(payload)
                    if data_split:
			log.debug('Found data match in chunk %s', chunk[1:SIO_HEADER_BYTES])
			for ii in range(0,len(data_split)):    
			    e_record = payload[data_split[ii][0]:data_split[ii][1]]

			    # particle-ize the data block received, return the record		    			    
			    if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):
				
				fields = struct.unpack('>I', e_record[0:4])
				timestampS = float(fields[0])
				timestamp = ntplib.system_to_ntp_time(timestampS)
				
				if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
				    sample = self._extract_sample(DostaLnWfpSioMuleParserDataParticle,
								  None,
								  e_record,
								  timestamp)
				    if sample:
					# create particle
					result_particles.append(sample)
					sample_count += 1
		                		
		                
		else: # no e header match
		    log.warn("*****************************************************BAD E HEADER 0x%s",
			       ":".join("{:02x}".format(ord(c)) for c in chunk))
		    self._exception_callback(UnexpectedDataException("Found unexpected data."))
		
            self._chunk_sample_count.append(sample_count)

            (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()

        return result_particles
Пример #10
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
        parsing, plus the state. An empty list of nothing was parsed.
        """

        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while (chunk != None):
            # Parse/match the SIO header
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)
            end_of_header = sio_header_match.end(0)

            sample_count = 0
            if sio_header_match.group(1) == 'WE':
                log.trace('read_state: %s', self._read_state)

                # Parse/match the E file header
                e_header_match = E_HEADER_MATCHER.search(
                    chunk[end_of_header:end_of_header + HEADER_BYTES])

                if e_header_match:
                    payload = chunk[
                        end_of_header + HEADER_BYTES:
                        -1]  # '-1' to remove the '\x03' end-of-record marker
                    data_split = self.we_split_function(payload)
                    if data_split:
                        for ii in range(0, len(data_split)):
                            e_record = payload[
                                data_split[ii][0]:data_split[ii][1]]

                            if not STATUS_START_MATCHER.match(
                                    e_record[0:STATUS_BYTES]):
                                fields = struct.unpack('>I', e_record[0:4])
                                self._timestamp = ntplib.system_to_ntp_time(
                                    float(fields[0]))

                                if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                                    sample = self._extract_sample(
                                        FlordLWfpSioMuleParserDataParticle,
                                        None, e_record, self._timestamp)
                                    if sample:
                                        # create particle
                                        result_particles.append(sample)
                                        sample_count += 1
                                else:
                                    self._exception_callback(
                                        UnexpectedDataException(
                                            "Found unexpected data."))

                else:  # no e header match
                    self._exception_callback(
                        UnexpectedDataException("Found unexpected data."))

            self._chunk_sample_count.append(sample_count)
            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
Пример #11
0
    def _process_engineering_data(self, profile_eng_data):
        """
        This method processes profile engineering data attempting to create
         the following particle types along the way:
            WfpEngWfpSioParserDataStatusParticle
            WfpEngWfpSioParserDataEngineeringParticle
        """

        # Start from the end of the chunk and working backwards
        parse_end_point = len(profile_eng_data)

        # We are going to go through the file data in reverse order since we have a
        # variable length sample record that could have a decimation factor.
        # While we do not hit the beginning of the file contents, continue
        while parse_end_point > 0:

            # Create the different start indices for the three different scenarios
            start_index_augmented = parse_end_point-STATUS_BYTES_AUGMENTED
            start_index_normal = parse_end_point-STATUS_BYTES
            global_recovered_eng_rec_index = parse_end_point-WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_BYTES

            # Check for an an augmented status first
            if start_index_augmented >= 0 and \
                    STATUS_START_MATCHER.match(profile_eng_data[start_index_augmented:parse_end_point]):
                log.trace("Found OffloadProfileData with decimation factor")

                fields_prof = struct.unpack_from('>I', profile_eng_data[start_index_augmented+8:])
                timestamp = fields_prof[0]

                sample = self._extract_sample(WfpEngWfpSioParserDataStatusParticle, None,
                                              self._current_controller_timestamp +
                                              profile_eng_data[start_index_augmented:parse_end_point],
                                              float(ntplib.system_to_ntp_time(timestamp)))

                # Set the new end point
                parse_end_point = start_index_augmented

            # Check for a normal status
            elif start_index_normal >= 0 and \
                    STATUS_START_MATCHER.match(profile_eng_data[start_index_normal:parse_end_point]):
                log.trace("Found OffloadProfileData without decimation factor")

                fields_prof = struct.unpack_from('>I', profile_eng_data[start_index_normal+8:])
                timestamp = fields_prof[0]

                sample = self._extract_sample(WfpEngWfpSioParserDataStatusParticle, None,
                                              self._current_controller_timestamp +
                                              profile_eng_data[start_index_normal:parse_end_point],
                                              float(ntplib.system_to_ntp_time(timestamp)))

                parse_end_point = start_index_normal

            # If neither, we are dealing with a global wfp_sio e recovered engineering data record,
            # so we will save the start and end points
            elif global_recovered_eng_rec_index >= 0:
                log.trace("Found OffloadEngineeringData")

                fields_prof = struct.unpack_from('>I', profile_eng_data[global_recovered_eng_rec_index:])
                timestamp = fields_prof[0]

                sample = self._extract_sample(WfpEngWfpSioParserDataEngineeringParticle, None,
                                              profile_eng_data[
                                              global_recovered_eng_rec_index:parse_end_point],
                                              float(ntplib.system_to_ntp_time(timestamp)))

                # Set the new end point
                parse_end_point = global_recovered_eng_rec_index

            # We must not have a good file, log some debug info for now
            else:
                log.debug("start_index_augmented %d", start_index_augmented)
                log.debug("start_index_normal %d", start_index_normal)
                log.debug("global_recovered_eng_rec_index %d", global_recovered_eng_rec_index)
                self._exception_callback(SampleException("Data invalid"))

            if sample:
                log.trace("Sample found: %s", sample.generate())
                # create particle
                self._result_particles.append(sample)
Пример #12
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while chunk is not None:
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            log.debug('parsing header %s',
                      sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) != 'WE':
                log.warn(" chunk did not match header WE %s",
                         chunk[0:SIO_HEADER_BYTES])
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

                # Parse/match the E file header
            e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES + HEADER_BYTES + 1])

            if not e_header_match:
                # no e header match
                log.warn("*BAD E HEADER 0x%s",
                         ":".join("{:02x}".format(ord(c)) for c in chunk))
                self._exception_callback(
                    UnexpectedDataException("Found unexpected data."))
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

            payload = chunk[SIO_HEADER_BYTES + HEADER_BYTES + 1:]

            data_split = self.we_split_function(payload)

            log.debug('Found data match in chunk %s',
                      chunk[1:SIO_HEADER_BYTES])
            for ii in range(0, len(data_split)):
                e_record = payload[data_split[ii][0]:data_split[ii][1]]
                log.debug('Extracted E Record to store in particle %s',
                          hexlify(e_record))

                # particle-ize the data block received, return the record
                if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):

                    fields = struct.unpack('>I', e_record[0:4])
                    timestamp_s = float(fields[0])
                    timestamp = ntplib.system_to_ntp_time(timestamp_s)

                    if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                        # create particle
                        log.debug('Particle created with raw data %s',
                                  hexlify(e_record))
                        log.debug('Particle timestamp = %f', timestamp)
                        sample = self._extract_sample(
                            DostaLnWfpSioDataParticle, None, e_record,
                            timestamp)
                        result_particles.append(sample)

            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
Пример #13
0
    def we_split_function(self, raw_data):
        """
        Sort through the raw data to identify new blocks of data that need processing.
        This is needed instead of a regex because blocks are identified by position
        in this binary file.
        """

        form_list = []
        raw_data_len = len(raw_data)
        """
        The Status messages
        can have an optional 2 bytes on the end, and since the rest of the data
        is relatively un-formatted packed binary records, detecting the presence of
        that optional 2 bytes can be difficult.  The only pattern we have to detect
        is the STATUS_START field ( 4 bytes FF FF FF F[A-F] ).  Peel this
        apart by parsing backwards, using the end-of-record as an additional anchor
        point.
        """

        # '-1' to remove the '\x03' end-of-record marker
        parse_end_point = raw_data_len - 1
        while parse_end_point > 0:

            # look for a status message at postulated message header position

            # look for an augmented status
            if STATUS_START_MATCHER.match(
                    raw_data[parse_end_point -
                             STATUS_BYTES_AUGMENTED:parse_end_point]):
                # A hit for the status message at the augmented offset
                # NOTE, we don't need the status messages, so we drop them on the floor here
                # and only deliver a stream of samples to build_parse_values
                parse_end_point = parse_end_point - STATUS_BYTES_AUGMENTED

                # check if this is a un-augmented status
            elif STATUS_START_MATCHER.match(
                    raw_data[parse_end_point - STATUS_BYTES:parse_end_point]):
                # A hit for the status message at the un-augmented offset
                # NOTE: same as above
                parse_end_point = parse_end_point - STATUS_BYTES

            else:
                # assume if not a stat that hit above, we have a sample.  If we mis-parse, we
                # will end up with extra bytes when we finish, and sample_except at that point.
                form_list.append(
                    (parse_end_point - E_GLOBAL_SAMPLE_BYTES, parse_end_point))
                parse_end_point = parse_end_point - E_GLOBAL_SAMPLE_BYTES

            # if the remaining bytes are less than the data sample bytes,
            # all we might have left is a status sample, if we don't we're done
            if parse_end_point != 0 and \
               parse_end_point < STATUS_BYTES and \
               parse_end_point < E_GLOBAL_SAMPLE_BYTES and \
               parse_end_point < STATUS_BYTES_AUGMENTED:

                self._exception_callback(
                    UnexpectedDataException(
                        "Error sieving WE data, inferred sample/status alignment incorrect"
                    ))
                return_list = []
                return return_list

        # since we parsed this backwards, we need to reverse to list to deliver the data in the correct order
        return_list = form_list[::-1]
        log.debug("returning we sieve list %s", return_list)
        return return_list
Пример #14
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list of nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)

        while chunk is not None:

            header_match = SIO_HEADER_MATCHER.match(chunk)
            sample_count = 0

            # Check to see if we are dealing with a wfp_eng SIO chunk
            if header_match.group(1) == 'WE':

                data_match = DATA_MATCHER.match(chunk[len(header_match.group(0)):])

                controller_timestamp = header_match.group(3)

                if data_match:

                    sensor_profile_start_time_data = data_match.group(2)

                    if sensor_profile_start_time_data:

                        # Need to unpack the sensor profile start time timestamp
                        fields_prof = struct.unpack_from('>I', sensor_profile_start_time_data[4:])
                        timestamp = fields_prof[0]

                        sample = self._extract_sample(WfpEngWfpSioMuleParserDataStartTimeParticle, None,
                                                      controller_timestamp +
                                                      sensor_profile_start_time_data,
                                                      float(ntplib.system_to_ntp_time(timestamp)))
                        if sample:
                            log.debug("Sample found: %s", sample)
                            # create particle
                            result_particles.append(sample)
                            sample_count += 1

                    profile_eng_data = data_match.group(3)

                    # Start from the end of the chunk and working backwards
                    parse_end_point = len(profile_eng_data)

                    # We are going to go through the file data in reverse order since we have a
                    # variable length sample record that could have a decimation factor.
                    # While we do not hit the beginning of the file contents, continue
                    while parse_end_point > 0:

                        # Create the different start indices for the three different scenarios
                        start_index_augmented = parse_end_point-STATUS_BYTES_AUGMENTED
                        start_index_normal = parse_end_point-STATUS_BYTES
                        global_recovered_eng_rec_index = parse_end_point-WFP_E_GLOBAL_RECOVERED_ENG_DATA_SAMPLE_BYTES

                        # Check for an an augmented status first
                        if start_index_augmented >= 0 and \
                                STATUS_START_MATCHER.match(profile_eng_data[start_index_augmented:parse_end_point]):
                            log.debug("Found OffloadProfileData with decimation factor")

                            fields_prof = struct.unpack_from('>I', profile_eng_data[start_index_augmented+8:])
                            timestamp = fields_prof[0]

                            sample = self._extract_sample(WfpEngWfpSioMuleParserDataStatusParticle, None,
                                                          controller_timestamp +
                                                          profile_eng_data[start_index_augmented:parse_end_point],
                                                          float(ntplib.system_to_ntp_time(timestamp)))

                            # Set the new end point
                            parse_end_point = start_index_augmented

                        # Check for a normal status
                        elif start_index_normal >= 0 and \
                                STATUS_START_MATCHER.match(profile_eng_data[start_index_normal:parse_end_point]):
                            log.debug("Found OffloadProfileData without decimation factor")

                            fields_prof = struct.unpack_from('>I', profile_eng_data[start_index_normal+8:])
                            timestamp = fields_prof[0]

                            sample = self._extract_sample(WfpEngWfpSioMuleParserDataStatusParticle, None,
                                                          controller_timestamp +
                                                          profile_eng_data[start_index_normal:parse_end_point],
                                                          float(ntplib.system_to_ntp_time(timestamp)))

                            parse_end_point = start_index_normal

                        # If neither, we are dealing with a global wfp e recovered engineering data record,
                        # so we will save the start and end points
                        elif global_recovered_eng_rec_index >= 0:
                            log.debug("Found OffloadEngineeringData")

                            fields_prof = struct.unpack_from('>I', profile_eng_data[global_recovered_eng_rec_index:])
                            timestamp = fields_prof[0]

                            sample = self._extract_sample(WfpEngWfpSioMuleParserDataEngineeringParticle, None,
                                                          profile_eng_data[
                                                          global_recovered_eng_rec_index:parse_end_point],
                                                          float(ntplib.system_to_ntp_time(timestamp)))

                            # Set the new end point
                            parse_end_point = global_recovered_eng_rec_index

                        # We must not have a good file, log some debug info for now
                        else:
                            log.debug("start_index_augmented %d", start_index_augmented)
                            log.debug("start_index_normal %d", start_index_normal)
                            log.debug("global_recovered_eng_rec_index %d", global_recovered_eng_rec_index)
                            self._exception_callback(SampleException("Data invalid"))

                        if sample:
                            log.debug("Sample found: %s", sample)
                            # create particle
                            result_particles.append(sample)
                            sample_count += 1

                else:
                    self._exception_callback(SampleException("Unexpected data"))

            # Important:  We must set the sample count for every chunk
            self._chunk_sample_count.append(sample_count)

            (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()

        return result_particles
Пример #15
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while chunk is not None:
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            log.debug('parsing header %s', sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) != 'WE':
                log.warn(" chunk did not match header WE %s", chunk[0:SIO_HEADER_BYTES])
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

                # Parse/match the E file header
            e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1])

            if not e_header_match:
                # no e header match
                log.warn("*BAD E HEADER 0x%s",
                         ":".join("{:02x}".format(ord(c)) for c in chunk))
                self._exception_callback(UnexpectedDataException("Found unexpected data."))
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

            payload = chunk[SIO_HEADER_BYTES+HEADER_BYTES+1:]

            data_split = self.we_split_function(payload)

            log.debug('Found data match in chunk %s', chunk[1:SIO_HEADER_BYTES])
            for ii in range(0, len(data_split)):
                e_record = payload[data_split[ii][0]:data_split[ii][1]]
                log.debug('Extracted E Record to store in particle %s', hexlify(e_record))

                # particle-ize the data block received, return the record
                if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):

                    fields = struct.unpack('>I', e_record[0:4])
                    timestamp_s = float(fields[0])
                    timestamp = ntplib.system_to_ntp_time(timestamp_s)

                    if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                        # create particle
                        log.debug('Particle created with raw data %s', hexlify(e_record))
                        log.debug('Particle timestamp = %f', timestamp)
                        sample = self._extract_sample(DostaLnWfpSioDataParticle,
                                                      None,
                                                      e_record,
                                                      timestamp)
                        result_particles.append(sample)

            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
Пример #16
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while (chunk != None):
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            sample_count = 0
            log.debug('parsing header %s',
                      sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) == 'WE':
                log.trace(
                    "********************************matched chunk header %s",
                    chunk[0:SIO_HEADER_BYTES])

                # Parse/match the E file header
                e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                    chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES + HEADER_BYTES +
                          1])

                if e_header_match:

                    log.debug(
                        '******************************* HEADER MATCH WAS:')
                    log.debug(
                        '%s', ":".join(
                            "{:02x}".format(ord(c))
                            for c in chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES +
                                           HEADER_BYTES + 1]))
                    payload = chunk[SIO_HEADER_BYTES + HEADER_BYTES + 1:]

                    data_split = self.we_split_function(payload)
                    if data_split:
                        log.debug('Found data match in chunk %s',
                                  chunk[1:SIO_HEADER_BYTES])
                        for ii in range(0, len(data_split)):
                            e_record = payload[
                                data_split[ii][0]:data_split[ii][1]]

                            # particle-ize the data block received, return the record
                            if not STATUS_START_MATCHER.match(
                                    e_record[0:STATUS_BYTES]):

                                fields = struct.unpack('>I', e_record[0:4])
                                timestampS = float(fields[0])
                                timestamp = ntplib.system_to_ntp_time(
                                    timestampS)

                                if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                                    sample = self._extract_sample(
                                        DostaLnWfpSioMuleParserDataParticle,
                                        None, e_record, timestamp)
                                    if sample:
                                        # create particle
                                        result_particles.append(sample)
                                        sample_count += 1

                else:  # no e header match
                    log.warn(
                        "*****************************************************BAD E HEADER 0x%s",
                        ":".join("{:02x}".format(ord(c)) for c in chunk))
                    self._exception_callback(
                        UnexpectedDataException("Found unexpected data."))

            self._chunk_sample_count.append(sample_count)

            (timestamp, chunk, start,
             end) = self._chunker.get_next_data_with_index()

        return result_particles