コード例 #1
0
    def _parse_header(self):
        """
        This method ensures the header data matches the wfp e global flags
        """
        # read the first bytes from the file
        header = self._stream_handle.read(HEADER_BYTES)
        match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.match(header)
        if not match:
            raise SampleException("File header does not match the header regex")

        self._saved_header = header

        # update the state to show we have read the header
        self._increment_state(HEADER_BYTES)
コード例 #2
0
    def _parse_header(self):
        """
        This method ensures the header data matches the wfp e global flags
        """
        # read the first bytes from the file
        header = self._stream_handle.read(HEADER_BYTES)
        match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.match(header)
        if not match:
            raise SampleException("File header does not match the header regex")

        self._saved_header = header

        # update the state to show we have read the header
        self._increment_state(HEADER_BYTES)
コード例 #3
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """            
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while (chunk != None):
	    sio_header_match = SIO_HEADER_MATCHER.match(chunk)
	    
            sample_count = 0
            log.debug('parsing header %s', sio_header_match.group(0)[1:SIO_HEADER_BYTES])
	    
            if sio_header_match.group(1) == 'WE':
                log.trace("********************************matched chunk header %s", chunk[0:SIO_HEADER_BYTES])
	    
                # Parse/match the E file header
                e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1])
		
                if e_header_match:
		    
		    log.debug('******************************* HEADER MATCH WAS:')
		    log.debug('%s', ":".join("{:02x}".format(ord(c)) for c in chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1]))				   
		    payload = chunk[SIO_HEADER_BYTES+HEADER_BYTES+1:]
		     
                    data_split = self.we_split_function(payload)
                    if data_split:
			log.debug('Found data match in chunk %s', chunk[1:SIO_HEADER_BYTES])
			for ii in range(0,len(data_split)):    
			    e_record = payload[data_split[ii][0]:data_split[ii][1]]

			    # particle-ize the data block received, return the record		    			    
			    if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):
				
				fields = struct.unpack('>I', e_record[0:4])
				timestampS = float(fields[0])
				timestamp = ntplib.system_to_ntp_time(timestampS)
				
				if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
				    sample = self._extract_sample(DostaLnWfpSioMuleParserDataParticle,
								  None,
								  e_record,
								  timestamp)
				    if sample:
					# create particle
					result_particles.append(sample)
					sample_count += 1
		                		
		                
		else: # no e header match
		    log.warn("*****************************************************BAD E HEADER 0x%s",
			       ":".join("{:02x}".format(ord(c)) for c in chunk))
		    self._exception_callback(UnexpectedDataException("Found unexpected data."))
		
            self._chunk_sample_count.append(sample_count)

            (timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()

        return result_particles
コード例 #4
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while chunk is not None:
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            log.debug('parsing header %s',
                      sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) != 'WE':
                log.warn(" chunk did not match header WE %s",
                         chunk[0:SIO_HEADER_BYTES])
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

                # Parse/match the E file header
            e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES + HEADER_BYTES + 1])

            if not e_header_match:
                # no e header match
                log.warn("*BAD E HEADER 0x%s",
                         ":".join("{:02x}".format(ord(c)) for c in chunk))
                self._exception_callback(
                    UnexpectedDataException("Found unexpected data."))
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

            payload = chunk[SIO_HEADER_BYTES + HEADER_BYTES + 1:]

            data_split = self.we_split_function(payload)

            log.debug('Found data match in chunk %s',
                      chunk[1:SIO_HEADER_BYTES])
            for ii in range(0, len(data_split)):
                e_record = payload[data_split[ii][0]:data_split[ii][1]]
                log.debug('Extracted E Record to store in particle %s',
                          hexlify(e_record))

                # particle-ize the data block received, return the record
                if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):

                    fields = struct.unpack('>I', e_record[0:4])
                    timestamp_s = float(fields[0])
                    timestamp = ntplib.system_to_ntp_time(timestamp_s)

                    if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                        # create particle
                        log.debug('Particle created with raw data %s',
                                  hexlify(e_record))
                        log.debug('Particle timestamp = %f', timestamp)
                        sample = self._extract_sample(
                            DostaLnWfpSioDataParticle, None, e_record,
                            timestamp)
                        result_particles.append(sample)

            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
コード例 #5
0
ファイル: dosta_ln_wfp_sio.py プロジェクト: GrimJ/mi-dataset
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while chunk is not None:
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            log.debug('parsing header %s', sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) != 'WE':
                log.warn(" chunk did not match header WE %s", chunk[0:SIO_HEADER_BYTES])
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

                # Parse/match the E file header
            e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES+HEADER_BYTES+1])

            if not e_header_match:
                # no e header match
                log.warn("*BAD E HEADER 0x%s",
                         ":".join("{:02x}".format(ord(c)) for c in chunk))
                self._exception_callback(UnexpectedDataException("Found unexpected data."))
                # get the next chunk
                (timestamp, chunk) = self._chunker.get_next_data()
                continue  # jump to next iteration of the chunk loop

            payload = chunk[SIO_HEADER_BYTES+HEADER_BYTES+1:]

            data_split = self.we_split_function(payload)

            log.debug('Found data match in chunk %s', chunk[1:SIO_HEADER_BYTES])
            for ii in range(0, len(data_split)):
                e_record = payload[data_split[ii][0]:data_split[ii][1]]
                log.debug('Extracted E Record to store in particle %s', hexlify(e_record))

                # particle-ize the data block received, return the record
                if not STATUS_START_MATCHER.match(e_record[0:STATUS_BYTES]):

                    fields = struct.unpack('>I', e_record[0:4])
                    timestamp_s = float(fields[0])
                    timestamp = ntplib.system_to_ntp_time(timestamp_s)

                    if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                        # create particle
                        log.debug('Particle created with raw data %s', hexlify(e_record))
                        log.debug('Particle timestamp = %f', timestamp)
                        sample = self._extract_sample(DostaLnWfpSioDataParticle,
                                                      None,
                                                      e_record,
                                                      timestamp)
                        result_particles.append(sample)

            (timestamp, chunk) = self._chunker.get_next_data()

        return result_particles
コード例 #6
0
    def parse_chunks(self):
        """
        Parse out any pending data chunks in the chunker. If
        it is a valid data piece, build a particle, update the position and
        timestamp. Go until the chunker has no more valid data.
        @retval a list of tuples with sample particles encountered in this
            parsing, plus the state. An empty list if nothing was parsed.
        """
        result_particles = []
        (timestamp, chunk) = self._chunker.get_next_data()

        while (chunk != None):
            sio_header_match = SIO_HEADER_MATCHER.match(chunk)

            sample_count = 0
            log.debug('parsing header %s',
                      sio_header_match.group(0)[1:SIO_HEADER_BYTES])

            if sio_header_match.group(1) == 'WE':
                log.trace(
                    "********************************matched chunk header %s",
                    chunk[0:SIO_HEADER_BYTES])

                # Parse/match the E file header
                e_header_match = WFP_E_GLOBAL_FLAGS_HEADER_MATCHER.search(
                    chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES + HEADER_BYTES +
                          1])

                if e_header_match:

                    log.debug(
                        '******************************* HEADER MATCH WAS:')
                    log.debug(
                        '%s', ":".join(
                            "{:02x}".format(ord(c))
                            for c in chunk[SIO_HEADER_BYTES:SIO_HEADER_BYTES +
                                           HEADER_BYTES + 1]))
                    payload = chunk[SIO_HEADER_BYTES + HEADER_BYTES + 1:]

                    data_split = self.we_split_function(payload)
                    if data_split:
                        log.debug('Found data match in chunk %s',
                                  chunk[1:SIO_HEADER_BYTES])
                        for ii in range(0, len(data_split)):
                            e_record = payload[
                                data_split[ii][0]:data_split[ii][1]]

                            # particle-ize the data block received, return the record
                            if not STATUS_START_MATCHER.match(
                                    e_record[0:STATUS_BYTES]):

                                fields = struct.unpack('>I', e_record[0:4])
                                timestampS = float(fields[0])
                                timestamp = ntplib.system_to_ntp_time(
                                    timestampS)

                                if len(e_record) == E_GLOBAL_SAMPLE_BYTES:
                                    sample = self._extract_sample(
                                        DostaLnWfpSioMuleParserDataParticle,
                                        None, e_record, timestamp)
                                    if sample:
                                        # create particle
                                        result_particles.append(sample)
                                        sample_count += 1

                else:  # no e header match
                    log.warn(
                        "*****************************************************BAD E HEADER 0x%s",
                        ":".join("{:02x}".format(ord(c)) for c in chunk))
                    self._exception_callback(
                        UnexpectedDataException("Found unexpected data."))

            self._chunk_sample_count.append(sample_count)

            (timestamp, chunk, start,
             end) = self._chunker.get_next_data_with_index()

        return result_particles