コード例 #1
0
ファイル: datatimes.py プロジェクト: abcrew/FIREL0-L1
 def major_data(self, inval):
     """
     read in and add major data to the class
     """
     inval = FIREdata.hex2int(inval)
     dt = FIREdata.dat2time(inval[0:8])
     d1 = FIREdata.dat2time(inval[ majorTimelen:])
     self.dat['Epoch'] = dm.dmarray.append(self.dat['Epoch'], dt)
     self.dat['Time'] = dm.dmarray.append(self.dat['Time'], d1)
コード例 #2
0
ファイル: datatimes.py プロジェクト: matthewlh/FIREL0-L1
 def major_data(self, inval):
     """
     read in and add major data to the class
     """
     if (np.asarray(inval) == 0).all(): # is this line fill?
         return
     dt = FIREdata.dat2time(inval[0:8])
     d1 = FIREdata.dat2time(inval[ self._majorTimelen:])
     self.append([dt, d1])
コード例 #3
0
ファイル: hires.py プロジェクト: abcrew/FIREL0-L1
 def majorStamps(self):
     """
     return the major time stamps
     """
     major = []
     for v in self:
         if v.pktnum == '01': # this is a major stamp
             major.append(v)
     stamps = []
     for v in major:
         stamps.append(FIREdata.dat2time(FIREdata.hex2int(v.data[:8])))
     return stamps
コード例 #4
0
ファイル: test_FIREdata.py プロジェクト: abcrew/FIREL0-L1
 def test_dat2time(self):
     """dat2time has known behaviour"""
     self.assertEqual(
         FIREdata.dat2time("0D 04 12 12 28 38 00 55"), datetime.datetime(2013, 4, 18, 18, 40, 56, 85000)
     )
     self.assertEqual(
         FIREdata.dat2time("0D 04 12 12 28 38 00 55".split(" ")), datetime.datetime(2013, 4, 18, 18, 40, 56, 85000)
     )
     self.assertEqual(
         FIREdata.dat2time([int(v, 16) for v in "0D 04 12 12 28 38 00 55".split(" ")]),
         datetime.datetime(2013, 4, 18, 18, 40, 56, 85000),
     )
     self.assertTrue(FIREdata.dat2time([int(v, 16) for v in "0D 15 12 12 28 38 00 55".split(" ")]) is None)
コード例 #5
0
ファイル: burst.py プロジェクト: matthewlh/FIREL0-L1
    def __init__(self, inpage, h):
        len1 = datalen+majorTimelen
        len2 = datalen+minorTimelen
        dat = FIREdata.hex2int(inpage)

        if len(inpage) == len1: # major
            try:
                self.t0 = FIREdata.dat2time(dat[0:8])
                self.major_data(dat)
            except ValueError:
                return
            print("\tData at time {0} decoded".format(self[-1][0].isoformat()))
        elif len(inpage) == len2: # minor
            try:
                self.minor_data(dat, h)
            except ValueError:
                return
コード例 #6
0
ファイル: context.py プロジェクト: matthewlh/FIREL0-L1
    def read(self, filename):
        h = []
        pages = super(context, self).read(filename)
        # use a sliding window to find valid packets
        for p in pages:
            start_ind = 0
            stop_ind  = start_ind + datalen + majorTimelen
            while stop_ind < (len(p)-majorTimelen-datalen):
                skipped = 0
                if None not in p[start_ind:stop_ind]:
                    # the data is all there in a row just make a context object
                    cp = contextPage(p[start_ind:stop_ind])
                else:
                    # print("Encountered a missing packet")
                    missing_ind = p[start_ind:stop_ind].index(None)
                    if missing_ind < (majorTimelen-1):
                        # did not have a whole time stamp skip the context there is not useful info
                        print("\tSkipped data no time stamp".format())
                        skipped=1
                    elif missing_ind >= (majorTimelen+datalen/2)-1:
                        # this means we have a valid time stamp and 1 valid measurement
                        #    so fill in the missing bytes with 00 and then set it to None
                        #    the context() class then needs to catch the None and set to fill
                        fill = ['00'] * ((majorTimelen+datalen) - missing_ind)
                        cp = contextPage(p[start_ind:stop_ind][0:missing_ind] + fill)
                        cp[0][1][1] = [None]
                        print("\t{0} Filled some data".format(cp[0][0].isoformat()))
                        stop_ind -= (len(p[start_ind:stop_ind])-missing_ind-1)
                        skipped=1
                    else:
                        # this means no valid data so fill in the missing bytes with 00 and then set it to None
                        #    the context() class then needs to catch the None and set to fill
                        #    we are keeping this since there was a valid time stamp
                        fill = ['00'] * ((majorTimelen+datalen) - missing_ind)
                        cp = contextPage(p[start_ind:stop_ind][0:missing_ind] + fill)
                        if cp:
                            cp[0][1][:] = [None, None]
                            print("\t{0} Filled all data".format(cp[0][0].isoformat()))
                        stop_ind -= (len(p[start_ind:stop_ind])-missing_ind-1)
                        skipped=1

                start_ind = stop_ind
                if skipped:
                    # we need to get back on sync, for these data that means finding a
                    #   valid date in the data
                    skip_num = 0
                    while start_ind < len(p) and \
                              len(p[start_ind:]) > majorTimelen+datalen and \
                              not FIREdata.validDate(p[start_ind:start_ind+majorTimelen+datalen]):
                        start_ind += 1
                        skip_num += 1
                    print("\t\tSkipped {0} bytes at the start of the next packet".format(skip_num))

                stop_ind = start_ind + (datalen + majorTimelen)
                h.extend(cp)
                        
        print("Decoded {0} context measurements".format(len(h)))
        return context(h)
コード例 #7
0
ファイル: context.py プロジェクト: abcrew/FIREL0-L1
 def __init__(self, inpage):
     len1 = datalen+majorTimelen
     len2 = datalen+minorTimelen
     dat = FIREdata.hex2int(inpage)
     try:
         self.t0 = FIREdata.dat2time(dat[0:8])
     except ValueError:
         return
     self.major_data(dat[0:datalen+majorTimelen])
     start = datalen+majorTimelen
     # the index of the start of each FIRE data
     for ii in range(start, len(dat), datalen+minorTimelen):
         stop = ii+datalen+majorTimelen
         try:
             self.minor_data(dat[ii:stop])
         except IndexError: # malformed data for some reason, skip it
             print("Skipping malformed context: {0}".format(dat[ii:stop]))
     # sort the data
     self = sorted(self, key = lambda x: x[0])
コード例 #8
0
ファイル: context.py プロジェクト: abcrew/FIREL0-L1
 def major_data(self, inval):
     """
     read in and add major data to the class
     """
     if (np.asarray(inval) == 0).all(): # is this line fill?
         return
     dt = FIREdata.dat2time(inval[0:8])
     d1 = np.asarray(inval[ majorTimelen:])
     d1 = np.asanyarray(['{0}'.format(v) for v in d1])
     d2 = int(d1[2] + d1[1] + d1[0], 16)
     d3 = int(d1[5] + d1[4] + d1[3], 16)
     dout = [d2, d3]
     self.append( (dt, dout) )
コード例 #9
0
ファイル: datatimes.py プロジェクト: matthewlh/FIREL0-L1
    def __init__(self, inpage):
        self._datalen = 8
        self._majorTimelen = 8
        dat = inpage.split(' ')
        dat = [int(v, 16) for v in dat]

        self.t0 = FIREdata.dat2time(inpage[0:25])

        # now the data length is 8
        for ii in range(0, len(dat), self._datalen): # the index of the start of each FIRE data
            stop = ii+self._datalen+self._majorTimelen  # 24 bytes of data and 2 for a minor time stamp
            self.major_data(dat[ii:stop])
        # cull any bad data
        ## this has None in place of data
        self = [v for v in self if None not in v]    
        # sort the data
        self = sorted(self, key = lambda x: x[0])
コード例 #10
0
ファイル: hires.py プロジェクト: matthewlh/FIREL0-L1
    def __init__(self, inpage):
        self._datalen = 24
        self._majorTimelen = 8
        self._minorTimelen = 2

        dat = inpage.split(' ')
        dat = [int(v, 16) for v in dat]

        self.t0 = FIREdata.dat2time(inpage[0:25])
        # now the data length is 24
        self.major_data(dat[0:self._datalen+self._majorTimelen])
        start = self._datalen+self._majorTimelen
        for ii in range(start, len(dat), self._datalen+self._minorTimelen): # the index of the start of each FIRE data
            stop = ii+self._datalen+self._minorTimelen  # 24 bytes of data and 2 for a minor time stamp
            self.minor_data(dat[ii:stop])
        # sort the data
        self = sorted(self, key = lambda x: x[0])
コード例 #11
0
ファイル: burst.py プロジェクト: matthewlh/FIREL0-L1
    def major_data(self, inval):
        """
        read in and add major data to the class
        """
        dt = FIREdata.dat2time(inval[0:8])
        # there are 10 times 100ms each before this one

        # dt is the time that was in the fill stamp
        #   the data that follow are each 100 ms after dt
        dt2 = [dt + datetime.timedelta(microseconds=100e3)*i for i in range(0,10)]
        # get the data from inval
        d1 = np.asarray(inval[ majorTimelen:])
        # change in the invals back to hex so that they can be
        #    split on the nibble
        d1 = np.asanyarray(['{:02x}'.format(v) for v in d1])
        # split them on the nibble
        d2 = [int(v[0], 16) for v in d1]
        d3 = [int(v[1], 16) for v in d1]
        dout = zip(d2, d3)
        for v1, v2 in zip(dt2, dout):
            self.append( (v1, v2) )
コード例 #12
0
ファイル: datatimes.py プロジェクト: abcrew/FIREL0-L1
    def read(self, filename):
        # need to have pages and packet information
        packets = packet.BIRDpackets(filename)

        """
        data times is at most one page
        """

        previous_packet = None # holds the last packet
        dataBuffer = [] # this holds the data form a packet as measurement may roll onto
                        #   the next packet
        firstPacket = False
        for packet_ in packets:
            """
            options in here:
            1) new page starting with packet 01
            2) new page with missing packet 01
            3) current page with next packet
            4) current page with missing packet
            5) last packet of page at 13
            6) last packet of a page with missing 13
            """
            if packet_.pktnum == '01':
                firstPacket = True

            ### option 2 ###
            ### option 1 ###
            if previous_packet is None: # new page starting
                dataBuffer = [] # clear the dataBuffer as we are starting a new page
                previous_packet = packet_ # hang on to the last packet
                print packet_
                # this is a decodable page, start now
                dataBuffer.extend(packet_.data) # grab the data out
                # since p.pktnum == 01 this is a major time stamp, decode it.
            else:
                while len(dataBuffer) > 0:
                    if FIREdata.validDate(FIREdata.hex2int(dataBuffer[:majorTimelen])):
                        pass
                    else:
                        dataBuffer.pop(0)
            ### option 3 ###
            ### option 4 ###

            """
            regardless of the packet if there is more data in the buffer we should
            decode it and add it to the arrays
            """
            while len(dataBuffer) >= minorLen:
                tmp = [dataBuffer.pop(0) for v in range(majorLen)]
                self.major_data(tmp)

        # go through and remove duplicate times and data
        print("Looking for duplicate measurements")

        arr, dt_ind, return_inverse = np.unique(self.dat['Epoch'], return_index=True, return_inverse=True) # this is unique an sort
        print("Found {0} duplicates of {1}".format(len(return_inverse)-len(dt_ind), len(return_inverse)))

        self.dat['Epoch'] = arr
        self.dat['Time'] = self.dat['Time'][dt_ind]
        # populate Duration and Mode
        self.dat['Mode'] = dm.dmarray.append(self.dat['Mode'], np.zeros(len(self.dat['Epoch']), dtype=int))
        if firstPacket:
            self.dat['Mode'][::2] = 1
        dur = [FIREdata.total_seconds(v2 - v1) for v1, v2 in itertools.izip(self.dat['Epoch'], self.dat['Time'])]
        self.dat['Duration'] = dm.dmarray.append(self.dat['Duration'], dur)


        return self
コード例 #13
0
ファイル: hires.py プロジェクト: abcrew/FIREL0-L1
    def read(self, filename):
        # need to have pages and packet information
        tm = time.time()
        packets = packet.BIRDpackets(filename)
        tm = timeDuration("Packet read", tm)
        
        cp = contigousPackets.fromPackets(packets)
        # TODO for now drop all contigousPackets() that don't have a majorStamp
        cp = [v for v in cp if v.majorStamps() and len(v) > 30]
        # at least 30 contigous packets to bother

        print( "Decoding data from {0} contigous segments".format( len(cp) ))

        # now cp is a list of contigousPackets() so a lot of checking is unneeded

        while len(cp) > 0:
            packets_ = cp.pop(0)
            time0 = time.time()
            self.start_ind = len(self.Timestamp)
            majors = packets_.majorStamps() # get the major timestamps
            dataBuffer = [] # clear any remaining data
            dataInfo = [] # clear any remaining info
            for packet_ in packets_:  # loop over each packet in the contigousPackets()
                if packet_.pktnum == '13': # last packet of a page
                    packet_.data = packet_.data[:-8] # throw away the fill bytes
                # here combine all the data together and decode         
                dataBuffer.extend(packet_.data) # grab the data out
                dataInfo.extend([packet_]*len(packet_.data))
            # search through the data for the major stamps and then sync up the time
            # TODO is there a chance that the data could look like major stamps?
            #   probably, but unlikely, if there is more than one sync to them all
            #   then the probability drops more
            findme = [hires.stampToData(v) for v in majors]
            # the /2 is since each element has 2 bytes (characters)
            ind = np.asarray([FIREdata.sublistExists(dataBuffer, v) for v in findme])//2
            if None in ind:
                raise(ValueError("Did not find the timestamp in the data!!"))
            # figure out what time the first major makes the first minor entry
            #   1) also if the first data is not a major some data will have to be thrown away
            #   2) think on the 15, 15, 15, 30 pattern and how to propigate that backwards

            # loop over all the values of ind changing majors to minors and removing fill
            for jj, ind_n in enumerate(ind):
                ##  print(dataBuffer[ind[jj]:ind[jj]+50])
                ##  print len(dataBuffer), len(dataInfo)
                dataBuffer = FIREdata.majorToMinor(dataBuffer, ind[jj])
                dataInfo = FIREdata.majorToMinor(dataInfo, ind[jj])
                ## print(dataBuffer[ind[jj]:ind[jj]+50])
                ## print len(dataBuffer), len(dataInfo)
                # now the indicies of the rest are 6 too high, fix them
                ind[jj+1:] = ind[jj+1:]-6
                ##  print(dataBuffer[ind[jj]:ind[jj]+50])
                if jj == 0:
                    # 1) to remove them look for matching minors at the start that
                    #      is the number of extra bytes
                    ##  print(dataBuffer[:40])
                    # find the start of the data in the stream
                    extra = FIREdata.findMinors(dataBuffer, minorLen)
                    ind -= extra # all of the inds here
                    for ii in range(extra):
                        dataBuffer.pop(0)
                        dataInfo.pop(0)
                    ##  print len(dataBuffer), len(dataInfo)
                    ##  print(dataBuffer[ind[jj]:ind[jj]+50])
                # there could well be fill inside the data before the end of a page
                #   loop over all the minors looking for all zeros after
                fill = FIREdata.findFill(dataBuffer, minorLen, ind[jj])
                if ind[jj] > fill:
                    for ii in range(ind[jj]-fill):
                        dataBuffer.pop(fill)
                        dataInfo.pop(fill)
                    ## if ind[jj] % minorLen != 0:
                    ##     raise(ValueError("Fill removal failed"))
                    ind[jj:] -= ind[jj]-fill
            # 2) Now that we have the right starting place
            #    and the data all prepared for decoding
            # figure out the time for the first timestamp
            n_minors = ind[0] // minorLen

            # the time back to the start is
            avg_t = np.average([15, 15, 15, 30])*1e3
            start_t = majors[0] - datetime.timedelta(microseconds=avg_t*(n_minors+3))
            while len(dataBuffer) > minorLen:
                try:
                    firstTime = self.inc_minor_time( start_t, dataBuffer[0:2] )
                    break
                except ValueError:
                    dataBuffer = dataBuffer[minorLen:]
                    dataInfo = dataInfo[minorLen:]
                    ind += minorLen
            ##  print majors[0], start_t, majors[0]-start_t

            self.decodeWhile(dataBuffer, dataInfo, firstTime)
            # now that all the data are decoded create the epoch variable
            #   from the timestamps
            self.stop_ind = len(self.Timestamp)-1
            self.timestampToEpoch()
            print('{3} to go: Decoded data from {0} to {1}  --  {2}  ({4:0.2f}s)'.format(self.Epoch[self.start_ind].isoformat(),
                                                                self.Epoch[self.stop_ind].isoformat(),
                                                                self.Epoch[self.stop_ind]-self.Epoch[self.start_ind], len(cp), time.time()-time0))

        self.dat['Epoch'] = dm.dmarray.append(self.dat['Epoch'], self.Epoch)
        self.dat['Timestamp'] = dm.dmarray.append(self.dat['Timestamp'], self.Timestamp)
        hr0 = np.asarray(self.hr0)
        hr1 = np.asarray(self.hr1)
        self.dat['hr0'] = dm.dmarray.append(self.dat['hr0'], hr0).reshape(-1, 6)
        self.dat['hr1'] = dm.dmarray.append(self.dat['hr1'], hr1).reshape(-1, 6)
        self.dat['Flag'] = dm.dmarray.append(self.dat['Flag'], self.Flag)
        self.dat['seqnum'] = dm.dmarray.append(self.dat['seqnum'], self.seqnum)
        self.dat['seqidx'] = dm.dmarray.append(self.dat['seqidx'], self.seqidx)
        self.dat['pktnum'] = dm.dmarray.append(self.dat['pktnum'], self.pktnum)
        ## self.dat['pktnum'] = self.dat['pktnum'][len(self.dat['pktnum'])-len(self.dat['Epoch']):]
        del self.dat['Flag']

        # go through and remove duplicate times and data
        print("Looking for duplicate measurements")

        arr, dt_ind, return_inverse = np.unique(self.dat['Epoch'], return_index=True, return_inverse=True) # this is unique an sort
        print("Found {0} duplicates of {1}".format(len(return_inverse)-len(dt_ind), len(return_inverse)))

        self.dat['Epoch'] = arr
        self.dat['Timestamp'] = self.dat['Timestamp'][dt_ind]
        self.dat['hr0'] = self.dat['hr0'][dt_ind]
        self.dat['hr1'] = self.dat['hr1'][dt_ind]
        self.dat['seqnum'] = self.dat['seqnum'][dt_ind]
        self.dat['seqidx'] = self.dat['seqidx'][dt_ind]
        self.dat['pktnum'] = self.dat['pktnum'][dt_ind]
        ##  self.dat['Flag'] = self.dat['Flag'][dt_ind]

        
        return self