コード例 #1
0
	def correlateData(self,frameLimit):
		sample = []
		self.fh.seek((self.startFrame)*4128,0)
		steps = frameLimit/10
		totalTime = datetime.now()
		print 'Correlating [          ]',
		print '\b'*12,
		sys.stdout.flush()
		for p in xrange(frameLimit):
			startTime = datetime.now()
			frame = drx.readFrame(self.fh)		
			
			if frame.parseID()[1] == 1:
				self.realTune1 = self.realTune1 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune1 = self.imagTune1 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			else:
				self.realTune2 = self.realTune2 + numpy.correlate(frame.data.iq.real,self.template).tolist()
				self.imagTune2 = self.imagTune2 + numpy.correlate(frame.data.iq.imag,self.template).tolist()
			if p%steps == 0:
				print '\b=',
				sys.stdout.flush()
		print '\b] Done'
		self.startFrame += frameLimit	
		#self.fh.close()
		print 'Read time: ' + str(datetime.now() - totalTime)
コード例 #2
0
 def simpleRMS(self):
     self.timer = datetime.now()
     #Note: Every time readframe is used, the open file is incremented by the frame size
     i = 0
     self.chunkArray=[]
     while i <= self.chunkSizeinFrames:
         self.thisFrame = drx.readFrame(self.input)
         self.chunkArray.extend(self.thisFrame.data.iq.real[:])
         i += 1
     print 'Processing this many frames as a chunk: ' + str(-1+len(self.chunkArray)/4096)
     numpyArr = numpy.array(self.chunkArray)
     print numpyArr
     self.returnData.append(math.sqrt(numpy.dot(numpyArr,numpyArr)/(self.chunkSizeinFrames*4096)))
     print 'Chunk processing time: ' + str(datetime.now()-self.timer)
コード例 #3
0
ファイル: rmsIterator.py プロジェクト: qwofford/data_munger
 def simpleRMS(self):
     self.timer = datetime.now()
     #Note: Every time readframe is used, the open file is incremented by the frame size
     i = 0
     self.chunkArrayT1P0=[]
     self.chunkArrayT1P1=[]
     self.chunkArrayT2P0=[]
     self.chunkArrayT2P1=[]
     #Make sure analysis starts with T1P0
     self.tmpFrame = drx.readFrame(self.input)
     self.beamTmp, self.tuneTmp, self.polTmp = self.tmpFrame.parseID()
     while self.tuneTmp != 1 & self.polTmp != 0:
         print "<br>T" + str(self.tuneTmp) + ":P" + str(self.polTmp) + " isn't a good place to start."
         self.tmpFrame = drx.readFrame(self.input)
         self.beamTmp, self.tuneTmp, self.polTmp = self.tmpFrame.parseID()
         print "<br>Seeking to T" + str(self.tuneTmp) + ":P" + str(self.polTmp)
     #Found it! Go back one frame and start analysis
     #print "<br>Beginning with Tuning " + str(self.tuneTmp) + " @ " + str(self.tmpFrame.getCentralFreq()) + "MHz. Polarity " + str(self.polTmp) +".\n" 
     self.input.seek(-4128,1)
     
     while i <= self.chunkSizeinFrames:
         #T1P0 read
         self.thisFrame = drx.readFrame(self.input)
         #T1P1 read
         self.thatFrame = drx.readFrame(self.input)
         #T2P0 read
         self.anotherFrame = drx.readFrame(self.input)
         #T2P1 read
         self.theOtherFrame = drx.readFrame(self.input)
         #Store the relevent data
         self.chunkArrayT1P0.extend(self.thisFrame.data.iq.real[:])
         #print "<br>Current T1P0 timestamp: " + str("%.9f" % self.thisFrame.getTime())
         self.chunkArrayT1P1.extend(self.thatFrame.data.iq.real[:])
         #print "<br>Current T1P1 timestamp: " + str("%.9f" % self.thatFrame.getTime())
         self.chunkArrayT2P0.extend(self.anotherFrame.data.iq.real[:])
         #print "<br>Current T2P0 timestamp: " + str("%.9f" % self.anotherFrame.getTime())
         self.chunkArrayT2P1.extend(self.theOtherFrame.data.iq.real[:])
         #print "<br>Current T2P1 timestamp: " + str("%.9f" % self.theOtherFrame.getTime())
         i += 1
     #Store header info for labeling plots
     self.beam, self.dontCare, self.irrelevant= self.thisFrame.parseID()
     self.tune1=self.thisFrame.getCentralFreq()
     self.tune2=self.anotherFrame.getCentralFreq()
     #print 'Processing this many frames as a chunk: ' + str(-1+len(self.chunkArrayT1P0)/4096)
     numpyArrT1P0 = numpy.array(self.chunkArrayT1P0)
     numpyArrT1P1 = numpy.array(self.chunkArrayT1P1)
     numpyArrT2P0 = numpy.array(self.chunkArrayT2P0)
     numpyArrT2P1 = numpy.array(self.chunkArrayT2P1)
     #Store RMS for chunk in the relevant returnDataXXYY format
     self.returnDataT1P0.append(math.sqrt(numpy.dot(numpyArrT1P0,numpyArrT1P0)/(self.chunkSizeinFrames*4096)))
     self.returnDataT1P1.append(math.sqrt(numpy.dot(numpyArrT1P1,numpyArrT1P1)/(self.chunkSizeinFrames*4096)))
     self.returnDataT2P0.append(math.sqrt(numpy.dot(numpyArrT2P0,numpyArrT2P0)/(self.chunkSizeinFrames*4096)))
     self.returnDataT2P1.append(math.sqrt(numpy.dot(numpyArrT2P1,numpyArrT2P1)/(self.chunkSizeinFrames*4096)))
コード例 #4
0
 def simpleRMS(self):
     self.timer = datetime.now()
     #Note: Every time readframe is used, the open file is incremented by the frame size
     i = 0
     self.chunkArray = []
     while i <= self.chunkSizeinFrames:
         self.thisFrame = drx.readFrame(self.input)
         self.chunkArray.extend(self.thisFrame.data.iq.real[:])
         i += 1
     print 'Processing this many frames as a chunk: ' + str(
         -1 + len(self.chunkArray) / 4096)
     numpyArr = numpy.array(self.chunkArray)
     print numpyArr
     self.returnData.append(
         math.sqrt(
             numpy.dot(numpyArr, numpyArr) /
             (self.chunkSizeinFrames * 4096)))
     print 'Chunk processing time: ' + str(datetime.now() - self.timer)
コード例 #5
0
for t in temp:
    t = t / 40.0

# Variable declaration
realdata = np.array((100, 100), dtype=float)  # [(0,0)] * 100
imagdata = np.array((100, 100), dtype=float)  # [(0,0)] * 100
realmincorr = 0.0
imagmincorr = 0.0
fh = open(file, "r")
startTime = datetime.now()

# Main file loop
for i in xrange(numFrames):
    # File reading/eof catching
    try:
        frame = drx.readFrame(fh)
    except errors.eofError:
        errorfile = open("/u/home/kkirchhoff/Top_Correlations_HDF/" + file[:26] + ".log", "w")
        errorfile.write("File ended at " + str(datetime.now()) + " on frame " + str(i))
        errorfile.close()
        break

        # Count sample vairalbes
    realcount = 0
    imagcount = 0
    # Correlations
    realcorr = np.correlate(frame.data.iq.real, temp, "same")
    imagcorr = np.correlate(frame.data.iq.imag, temp, "same")
    # Maximum correlation loops
    """
	#This is horrible. Why did I do this?
コード例 #6
0
ファイル: drxCsvMaker.py プロジェクト: kkirchhoff01/lslext
#Optional frame iteration limit
frameLimit = 0
if len(sys.argv)>2:
  frameLimit = sys.argv[2]

#Input file
file = open(argument,'rb')

# How many frames total?
file.seek(0, os.SEEK_END)
size = file.tell()
totalFrames = size/4128
#Go back to the beginning.
file.seek(-size,1)

#Is file size plausible?
#print "File size (bytes) is " + str(size) + ". This is " + str(totalFrames) + " frames."


#Don't iterate over everything if the user doesn't want to.
if frameLimit!=0:
  totalFrames = int(frameLimit)

#Create an IO stream. This will print 4096 lines per iteration. One line for each sample.
for i in xrange(totalFrames):
  frame = drx.readFrame(file)
  b1, t1, p1 = frame.parseID()
  for i in frame.data.iq.real[:]:
    print str(b1) + "," + str(t1) + "," +  str(frame.getCentralFreq()) + "," + str(p1) + "," + str(frame.data.iq.real[i]) + "," + str(frame.data.iq.imag[i])

コード例 #7
0
lf_on = open("/u/data/leap/observations/056777_000085151", "rb")
lf_off = open("/u/data/leap/observations/056777_000085152", "rb")

hf_on.seek((startAtFrame - 1) * 4128, 0)
hf_off.seek((startAtFrame - 1) * 4128, 0)
lf_on.seek((startAtFrame - 1) * 4128, 0)
lf_off.seek((startAtFrame - 1) * 4128, 0)

hf_onArr = []
hf_offArr = []
lf_onArr = []
lf_offArr = []
i = 0
while i < framesToIterate:
    i += 1
    hf_onTemp = drx.readFrame(hf_on)
    hf_onArr.extend(hf_onTemp.data.iq.real)
    hf_offTemp = drx.readFrame(hf_off)
    hf_offArr.extend(hf_offTemp.data.iq.real)
    lf_onTemp = drx.readFrame(lf_on)
    lf_onArr.extend(lf_onTemp.data.iq.real)
    lf_offTemp = drx.readFrame(lf_off)
    lf_offArr.extend(lf_offTemp.data.iq.real)


fig = plt.figure()
ax1 = fig.add_subplot(121)
hf_onx = ax1.plot(hf_onArr[:], label="HF / On Moon")
hf_offx = ax1.plot(hf_offArr[:], label="HF / Off Moon")
ax1.set_xlabel("Step")
ax1.set_ylabel("Count")
コード例 #8
0
def processDataBatchStokes(fh,
                           antennas,
                           tStart,
                           duration,
                           sampleRate,
                           config,
                           dataSets,
                           obsID=1,
                           clip1=0,
                           clip2=0):
    """
	Process a chunk of data in a raw DRX file into Stokes parameters and 
	add the contents to an HDF5 file.
	"""

    # Length of the FFT
    LFFT = config['LFFT']

    # Find the start of the observation
    junkFrame = drx.readFrame(fh)
    srate = junkFrame.getSampleRate()
    t0 = junkFrame.getTime()
    fh.seek(-drx.FrameSize, 1)

    print 'Looking for #%i at %s with sample rate %.1f Hz...' % (obsID, tStart,
                                                                 sampleRate)
    while datetime.utcfromtimestamp(t0) < tStart or srate != sampleRate:
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        t0 = junkFrame.getTime()
    print '... Found #%i at %s with sample rate %.1f Hz' % (
        obsID, datetime.utcfromtimestamp(t0), srate)
    tDiff = datetime.utcfromtimestamp(t0) - tStart
    try:
        duration = duration - tDiff.total_seconds()
    except:
        duration = duration - (tDiff.seconds + tDiff.microseconds / 1e6)

    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * config['maxFrames'] / beampols * 4096 /
                    float(LFFT)) * LFFT / 4096 * beampols

    # Number of frames to integrate over
    print "Line 455: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols
    nFramesAvg = int(round(config['average'] * srate / 4096 * beampols))
    nFramesAvg = int(1.0 * nFramesAvg / beampols * 4096 /
                     float(LFFT)) * LFFT / 4096 * beampols
    config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
    maxFrames = nFramesAvg
    print "Line 460: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    nChunks = int(round(duration / config['average']))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks
    print "Line 468: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

    # Date & Central Frequency
    beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 1:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)
    freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1 / srate))
    if float(fxc.__version__) < 0.8:
        freq = freq[1:]

    dataSets['obs%i-freq1' % obsID][:] = freq + centralFreq1
    dataSets['obs%i-freq2' % obsID][:] = freq + centralFreq2

    obs = dataSets['obs%i' % obsID]
    obs.attrs['tInt'] = config['average']
    obs.attrs['tInt_Unit'] = 's'
    obs.attrs['LFFT'] = LFFT
    obs.attrs['nChan'] = LFFT - 1 if float(fxc.__version__) < 0.8 else LFFT
    obs.attrs['RBW'] = freq[1] - freq[0]
    obs.attrs['RBW_Units'] = 'Hz'

    dataProducts = ['I', 'Q', 'U', 'V']
    done = False
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (maxFrames),
        # only deal with that chunk
        framesRemaining = nFrames - i * maxFrames
        if framesRemaining > maxFrames:
            framesWork = maxFrames
        else:
            framesWork = framesRemaining
        print "Working on chunk %i, %i frames remaining" % (i + 1,
                                                            framesRemaining)

        count = {0: 0, 1: 0, 2: 0, 3: 0}
        data = numpy.zeros((4, framesWork * 4096 / beampols),
                           dtype=numpy.csingle)
        # If there are fewer frames than we need to fill an FFT, skip this chunk
        if data.shape[1] < LFFT:
            break

        # Inner loop that actually reads the frames into the data array
        print "Working on %.1f ms of data" % (
            (framesWork * 4096 / beampols / srate) * 1000.0)

        for j in xrange(framesWork):
            # Read in the next frame and anticipate any problems that could occur
            try:
                cFrame = drx.readFrame(fh, Verbose=False)
            except errors.eofError:
                done = True
                break
            except errors.syncError:
                continue

            beam, tune, pol = cFrame.parseID()
            aStand = 2 * (tune - 1) + pol
            if j is 0:
                cTime = cFrame.getTime()

            try:
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            except ValueError:
                raise RuntimeError("Invalid Shape")

        # Save out some easy stuff
        dataSets['obs%i-time' % obsID][i] = cTime

        if config['countSats']:
            sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1)
            dataSets['obs%i-Saturation1' % obsID][i, :] = sats[0:2]
            dataSets['obs%i-Saturation2' % obsID][i, :] = sats[2:4]
        else:
            dataSets['obs%i-Saturation1' % obsID][i, :] = -1
            dataSets['obs%i-Saturation2' % obsID][i, :] = -1

        # Calculate the spectra for this block of data and then weight the results by
        # the total number of frames read.  This is needed to keep the averages correct.
        if clip1 == clip2:
            freq, tempSpec1 = fxc.StokesMaster(data,
                                               antennas,
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip1)

            for t in (1, 2):
                for l, p in enumerate(dataProducts):
                    dataSets['obs%i-%s%i' %
                             (obsID, p, t)][i, :] = tempSpec1[l, t - 1, :]

        else:
            freq, tempSpec1 = fxc.StokesMaster(data[:2, :],
                                               antennas[:2],
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip1)
            freq, tempSpec2 = fxc.StokesMaster(data[2:, :],
                                               antennas[2:],
                                               LFFT=LFFT,
                                               window=config['window'],
                                               verbose=config['verbose'],
                                               SampleRate=srate,
                                               ClipLevel=clip2)

            for l, p in enumerate(dataProducts):
                dataSets['obs%i-%s%i' % (obsID, p, 1)][i, :] = tempSpec1[l,
                                                                         0, :]
                dataSets['obs%i-%s%i' % (obsID, p, 2)][i, :] = tempSpec2[l,
                                                                         0, :]

        # We don't really need the data array anymore, so delete it
        del (data)

        # Are we done yet?
        if done:
            break

    return True
コード例 #9
0
ファイル: rmsIterator.py プロジェクト: kkirchhoff01/lslext
    def simpleRMS(self):
        self.timer = datetime.now()
        #Note: Every time readframe is used, the open file is incremented by the frame size
        i = 0
        self.chunkArrayT1P0 = []
        self.chunkArrayT1P1 = []
        self.chunkArrayT2P0 = []
        self.chunkArrayT2P1 = []
        #Make sure analysis starts with T1P0
        self.tmpFrame = drx.readFrame(self.input)
        self.beamTmp, self.tuneTmp, self.polTmp = self.tmpFrame.parseID()
        while self.tuneTmp != 1 & self.polTmp != 0:
            print "T" + str(self.tuneTmp) + ":P" + str(
                self.polTmp) + " isn't a good place to start."
            self.tmpFrame = drx.readFrame(self.input)
            self.beamTmp, self.tuneTmp, self.polTmp = self.tmpFrame.parseID()
            print "Seeking to T" + str(self.tuneTmp) + ":P" + str(self.polTmp)
        #Found it! Go back one frame and start analysis
        #print "Beginning with Tuning " + str(self.tuneTmp) + " @ " + str(self.tmpFrame.getCentralFreq()) + "MHz. Polarity " + str(self.polTmp) +".\n"
        self.input.seek(-4128, 1)

        while i <= self.chunkSizeinFrames:
            #T1P0 read
            self.thisFrame = drx.readFrame(self.input)
            #T1P1 read
            self.thatFrame = drx.readFrame(self.input)
            #T2P0 read
            self.anotherFrame = drx.readFrame(self.input)
            #T2P1 read
            self.theOtherFrame = drx.readFrame(self.input)
            #Store the relevent data
            self.chunkArrayT1P0.extend(self.thisFrame.data.iq.real[:])
            #print "Current T1P0 timestamp: " + str("%.9f" % self.thisFrame.getTime())
            self.chunkArrayT1P1.extend(self.thatFrame.data.iq.real[:])
            #print "Current T1P1 timestamp: " + str("%.9f" % self.thatFrame.getTime())
            self.chunkArrayT2P0.extend(self.anotherFrame.data.iq.real[:])
            #print "Current T2P0 timestamp: " + str("%.9f" % self.anotherFrame.getTime())
            self.chunkArrayT2P1.extend(self.theOtherFrame.data.iq.real[:])
            #print "Current T2P1 timestamp: " + str("%.9f" % self.theOtherFrame.getTime())
            i += 1
        #Store header info for labeling plots
        self.beam, self.dontCare, self.irrelevant = self.thisFrame.parseID()
        self.tune1 = self.thisFrame.getCentralFreq()
        self.tune2 = self.anotherFrame.getCentralFreq()
        #print 'Processing this many frames as a chunk: ' + str(-1+len(self.chunkArrayT1P0)/4096)
        numpyArrT1P0 = numpy.array(self.chunkArrayT1P0)
        numpyArrT1P1 = numpy.array(self.chunkArrayT1P1)
        numpyArrT2P0 = numpy.array(self.chunkArrayT2P0)
        numpyArrT2P1 = numpy.array(self.chunkArrayT2P1)
        #Store RMS for chunk in the relevant returnDataXXYY format
        self.returnDataT1P0.append(
            math.sqrt(
                numpy.dot(numpyArrT1P0, numpyArrT1P0) /
                (self.chunkSizeinFrames * 4096)))
        self.returnDataT1P1.append(
            math.sqrt(
                numpy.dot(numpyArrT1P1, numpyArrT1P1) /
                (self.chunkSizeinFrames * 4096)))
        self.returnDataT2P0.append(
            math.sqrt(
                numpy.dot(numpyArrT2P0, numpyArrT2P0) /
                (self.chunkSizeinFrames * 4096)))
        self.returnDataT2P1.append(
            math.sqrt(
                numpy.dot(numpyArrT2P1, numpyArrT2P1) /
                (self.chunkSizeinFrames * 4096)))
コード例 #10
0
def main(args):
    # Parse command line options
    config = parseOptions(args)

    # Length of the FFT
    LFFT = config['LFFT']

    # Open the file and find good data (not spectrometer data)
    filename = config['args'][0]
    fh = open(filename, "rb")
    nFramesFile = os.path.getsize(filename) / drx.FrameSize

    try:
        for i in xrange(5):
            junkFrame = drspec.readFrame(fh)
        raise RuntimeError(
            "ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file"
            % filename)
    except errors.syncError:
        fh.seek(0)

    while True:
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                t0 = junkFrame.getTime()
                break
            except ZeroDivisionError:
                pass
        except errors.syncError:
            fh.seek(-drx.FrameSize + 1, 1)

    fh.seek(-drx.FrameSize, 1)

    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol

    # Offset in frames for beampols beam/tuning/pol. sets
    inoffset = config['offset']
    offset = int(config['offset'] * srate / 4096 * beampols)
    offset = int(1.0 * offset / beampols) * beampols
    fh.seek(offset * drx.FrameSize, 1)

    # Iterate on the offsets until we reach the right point in the file.  This
    # is needed to deal with files that start with only one tuning and/or a
    # different sample rate.
    while True:
        ## Figure out where in the file we are and what the current tuning/sample
        ## rate is
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        t1 = junkFrame.getTime()
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        fh.seek(-drx.FrameSize, 1)

        ## See how far off the current frame is from the target
        tDiff = t1 - (t0 + config['offset'])

        ## Half that to come up with a new seek parameter
        tCorr = -tDiff / 2.0
        cOffset = int(tCorr * srate / 4096 * beampols)
        cOffset = int(1.0 * cOffset / beampols) * beampols
        offset += cOffset

        ## If the offset is zero, we are done.  Otherwise, apply the offset
        ## and check the location in the file again/
        if cOffset is 0:
            break
        fh.seek(cOffset * drx.FrameSize, 1)

    # Update the offset actually used
    config['offset'] = t1 - t0
    offset = int(round(config['offset'] * srate / 4096 * beampols))
    offset = int(1.0 * offset / beampols) * beampols

    # Make sure that the file chunk size contains is an integer multiple
    # of the FFT length so that no data gets dropped.  This needs to
    # take into account the number of beampols in the data, the FFT length,
    # and the number of samples per frame.
    maxFrames = int(1.0 * config['maxFrames'] / beampols * 4096 /
                    float(LFFT)) * LFFT / 4096 * beampols

    # Number of frames to integrate over
    print "Line 673: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols
    nFramesAvg = int(config['average'] * srate / 4096 * beampols)
    if (nFramesAvg == 0):
        nFramesAvg = 1 * beampols
    else:
        nFramesAvg = int(1.0 * nFramesAvg / beampols * 4096 /
                         float(LFFT)) * LFFT / 4096 * beampols
    config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
    maxFrames = nFramesAvg
    print "Line 678: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

    # Number of remaining chunks (and the correction to the number of
    # frames to read in).
    if config['metadata'] is not None:
        config['duration'] = 0
    if config['duration'] == 0:
        config['duration'] = 1.0 * nFramesFile / beampols * 4096 / srate
    else:
        config['duration'] = int(
            round(config['duration'] * srate * beampols / 4096) / beampols *
            4096 / srate)

    nChunks = int(round(config['duration'] / config['average']))
    if nChunks == 0:
        nChunks = 1
    nFrames = nFramesAvg * nChunks
    print "Line 693: config['average']", config[
        'average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

    # Date & Central Frequency
    t1 = junkFrame.getTime()
    beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 1:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from lsl.common.dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)

    config['freq1'] = centralFreq1
    config['freq2'] = centralFreq2

    # File summary
    print "Filename: %s" % filename
    print "Date of First Frame: %s" % str(beginDate)
    print "Beams: %i" % beams
    print "Tune/Pols: %i %i %i %i" % tunepols
    print "Sample Rate: %i Hz" % srate
    print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1,
                                                          centralFreq2)
    print "Frames: %i (%.3f s)" % (nFramesFile,
                                   1.0 * nFramesFile / beampols * 4096 / srate)
    print "---"
    print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
    print "Integration: %.6f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'], nFramesAvg, nFramesAvg / beampols)
    print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (
        config['average'] * nChunks, nFrames, nFrames / beampols)
    print "Chunks: %i" % nChunks
    print " "

    # Estimate clip level (if needed)
    if config['estimate']:
        clip1, clip2 = estimateClipLevel(fh, beampols)
    else:
        clip1 = config['clip']
        clip2 = config['clip']

    # Make the pseudo-antennas for Stokes calculation
    antennas = []
    for i in xrange(4):
        if i / 2 == 0:
            newAnt = stations.Antenna(1)
        else:
            newAnt = stations.Antenna(2)

        if i % 2 == 0:
            newAnt.pol = 0
        else:
            newAnt.pol = 1

        antennas.append(newAnt)

    # Setup the output file
    outname = os.path.split(filename)[1]
    outname = os.path.splitext(outname)[0]
    if (config['return'] == 'FFT'):
        outname = '%s-%d-waterfall-complex.hdf5' % (outname, inoffset)
    else:
        outname = '%s-waterfall.hdf5' % outname

    if os.path.exists(outname):
        #yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
        #if yn not in ('n', 'N'):
        #	os.unlink(outname)
        #else:
        raise RuntimeError("Output file '%s' already exists" % outname)

    f = hdfData.createNewFile(outname)

    # Look at the metadata and come up with a list of observations.  If
    # there are no metadata, create a single "observation" that covers the
    # whole file.
    obsList = {}
    if config['metadata'] is not None:
        sdf = metabundle.getSessionDefinition(config['metadata'])

        sdfBeam = sdf.sessions[0].drxBeam
        spcSetup = sdf.sessions[0].spcSetup
        if sdfBeam != beam:
            raise RuntimeError(
                "Metadata is for beam #%i, but data is from beam #%i" %
                (sdfBeam, beam))

        for i, obs in enumerate(sdf.sessions[0].observations):
            sdfStart = mcs.mjdmpm2datetime(obs.mjd, obs.mpm)
            sdfStop = mcs.mjdmpm2datetime(obs.mjd, obs.mpm + obs.dur)
            obsDur = obs.dur / 1000.0
            obsSR = drx.filterCodes[obs.filter]

            obsList[i + 1] = (sdfStart, sdfStop, obsDur, obsSR)

        print "Observations:"
        for i in sorted(obsList.keys()):
            obs = obsList[i]
            print " #%i: %s to %s (%.3f s) at %.3f MHz" % (
                i, obs[0], obs[1], obs[2], obs[3] / 1e6)
        print " "

        hdfData.fillFromMetabundle(f, config['metadata'])
    else:
        obsList[1] = (datetime.utcfromtimestamp(t1),
                      datetime(2222, 12, 31, 23, 59,
                               59), config['duration'], srate)

        hdfData.fillMinimum(f, 1, beam, srate)

    if config['linear']:
        dataProducts = ['XX', 'YY']
    else:
        dataProducts = ['I', 'Q', 'U', 'V']

    for o in sorted(obsList.keys()):
        for t in (1, 2):
            hdfData.createDataSets(
                f,
                o,
                t,
                numpy.arange(LFFT -
                             1 if float(fxc.__version__) < 0.8 else LFFT,
                             dtype=numpy.float32),
                int(round(obsList[o][2] / config['average'])),
                dataProducts,
                dataOut=config['return'])
    f.attrs['FileGenerator'] = 'hdfWaterfall.py'
    f.attrs['InputData'] = os.path.basename(filename)

    # Create the various HDF group holders
    ds = {}
    for o in sorted(obsList.keys()):
        obs = hdfData.getObservationSet(f, o)

        ds['obs%i' % o] = obs
        ds['obs%i-time' % o] = obs.create_dataset(
            'time', (int(round(obsList[o][2] / config['average'])), ), 'f8')

        for t in (1, 2):
            ds['obs%i-freq%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'freq')
            for p in dataProducts:
                if (config['return'] == 'PSD'):
                    ds["obs%i-%s%i" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p)
                else:
                    ds["obs%i-%s%imag" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'mag')
                    ds["obs%i-%s%iphase" % (o, p, t)] = hdfData.getDataSet(
                        f, o, t, p + 'phase')
            ds['obs%i-Saturation%i' % (o, t)] = hdfData.getDataSet(
                f, o, t, 'Saturation')
    # Load in the correct analysis function
    if config['linear']:
        processDataBatch = processDataBatchLinear
    else:
        processDataBatch = processDataBatchStokes

    # Go!
    for o in sorted(obsList.keys()):
        try:
            processDataBatch(fh,
                             antennas,
                             obsList[o][0],
                             obsList[o][2],
                             obsList[o][3],
                             config,
                             ds,
                             obsID=o,
                             clip1=clip1,
                             clip2=clip2)
        except RuntimeError, e:
            print "Observation #%i: %s, abandoning this observation" % (o,
                                                                        str(e))
コード例 #11
0
ファイル: drxHist.py プロジェクト: kkirchhoff01/lslext
ds = f['dataset_1'][:]
f.close()
ds = ds.tolist()[:]
maxbin = max(ds)[0]
minbin = min(ds)[0]

fh = open(file, 'r')
histdata = np.zeros(50, dtype='int32')

startTime = datetime.now()
print 'Correlating [                         ]',
print '\b' * 27,
sys.stdout.flush()
for i in xrange(numFrames):
    try:
        frame = drx.readFrame(fh)
    except errors.baseReaderError().eofError:
        errorfile = open('/u/home/kkirchhoff/Top_Correlations_HDF/errors.log',
                         'w')
        errorfile.write('File ended at ' + str(datetime.now()) + ' on frame ' +
                        str(i))
        errorfile.close()
        break

    corr = np.correlate(frame.data.iq.real, temp, 'same')
    hist, edges = np.histogram(corr, bins=50, range=(minbin, maxbin))
    histdata += hist
    if i % (numFrames / 25) == 0:
        print '\b=',
        sys.stdout.flush()
コード例 #12
0
def estimateClipLevel(fh, beampols):
    """
	Read in a set of 100 frames and come up with the 4-sigma clip levels 
	for each tuning.  These clip levels are returned as a two-element 
	tuple.
	"""

    filePos = fh.tell()

    # Read in the first 100 frames for each tuning/polarization
    count = {0: 0, 1: 0, 2: 0, 3: 0}
    data = numpy.zeros((4, 4096 * 100), dtype=numpy.csingle)
    for i in xrange(beampols * 100):
        try:
            cFrame = drx.readFrame(fh, Verbose=False)
        except errors.eofError:
            break
        except errors.syncError:
            continue

        beam, tune, pol = cFrame.parseID()
        aStand = 2 * (tune - 1) + pol

        data[aStand,
             count[aStand] * 4096:(count[aStand] + 1) * 4096] = cFrame.data.iq
        count[aStand] += 1

    # Go back to where we started
    fh.seek(filePos)

    # Compute the robust mean and standard deviation for I and Q for each
    # tuning/polarization
    meanI = []
    meanQ = []
    stdsI = []
    stdsQ = []
    for i in xrange(4):
        meanI.append(robust.mean(data[i, :].real))
        meanQ.append(robust.mean(data[i, :].imag))

        stdsI.append(robust.std(data[i, :].real))
        stdsQ.append(robust.std(data[i, :].imag))

    # Report
    print "Statistics:"
    for i in xrange(4):
        print " Mean %i: %.3f + %.3f j" % (i + 1, meanI[i], meanQ[i])
        print " Std  %i: %.3f + %.3f j" % (i + 1, stdsI[i], stdsQ[i])

    # Come up with the clip levels based on 4 sigma
    clip1 = (meanI[0] + meanI[1] + meanQ[0] + meanQ[1]) / 4.0
    clip2 = (meanI[2] + meanI[3] + meanQ[2] + meanQ[3]) / 4.0

    clip1 += 5 * (stdsI[0] + stdsI[1] + stdsQ[0] + stdsQ[1]) / 4.0
    clip2 += 5 * (stdsI[2] + stdsI[3] + stdsQ[2] + stdsQ[3]) / 4.0

    clip1 = int(round(clip1))
    clip2 = int(round(clip2))

    # Report again
    print "Clip Levels:"
    print " Tuning 1: %i" % clip1
    print " Tuning 2: %i" % clip2

    return clip1, clip2
コード例 #13
0
ファイル: drxVoltHisto.py プロジェクト: kkirchhoff01/lslext
    def compute(self, framesToIterate, startAtFrame):
        self.framesToIterate = 4*framesToIterate
        #Remember thre are four frames associated with a single sample in time, one for T1P0, T1P1, T2P0 and T2P1
        self.startAtFrame = 4*startAtFrame 
        try:
            self.histoFileA.seek((self.startAtFrame-1)*4128,0)
        except:
            print "Frame start bounds may be incorrect. Must start at frame >=1"
        self.histoArrT1P0A = []
        self.histoArrT1P1A = []
        self.histoArrT2P0A = []
        self.histoArrT2P1A = []
        try:
            self.histoFileB.seek((self.startAtFrame-1)*4128,0)
        except:
            print "Frame start bounds may be incorrect. Must start at frame >=1"
        self.histoArrT1P0B = []
        self.histoArrT1P1B = []
        self.histoArrT2P0B = []
        self.histoArrT2P1B = []
        self.beamA=-1
        self.beamB=-1
        self.tune1A=-1
        self.tune1B=-1
        self.tune2A=-1
        self.tune2B=-1
        i = 0
        while i<self.framesToIterate:
            i += 1
            self.histoT1P0TempA = drx.readFrame(self.histoFileA)
            self.histoArrT1P0A.extend(self.histoT1P0TempA.data.iq.real)
            self.histoT1P1TempA = drx.readFrame(self.histoFileA)
            self.histoArrT1P1A.extend(self.histoT1P1TempA.data.iq.real)
            self.histoT2P0TempA = drx.readFrame(self.histoFileA)
            self.histoArrT2P0A.extend(self.histoT2P0TempA.data.iq.real)
            self.histoT2P1TempA = drx.readFrame(self.histoFileA)
            self.histoArrT2P1A.extend(self.histoT2P1TempA.data.iq.real)
            self.histoT1P0TempB = drx.readFrame(self.histoFileB)
            self.histoArrT1P0B.extend(self.histoT1P0TempB.data.iq.real)
            self.histoT1P1TempB = drx.readFrame(self.histoFileB)
            self.histoArrT1P1B.extend(self.histoT1P1TempB.data.iq.real)
            self.histoT2P0TempB = drx.readFrame(self.histoFileB)
            self.histoArrT2P0B.extend(self.histoT2P0TempB.data.iq.real)
            self.histoT2P1TempB = drx.readFrame(self.histoFileB)
            self.histoArrT2P1B.extend(self.histoT2P1TempB.data.iq.real)
            self.tune1A = (self.histoT1P0TempA.getCentralFreq()/1e6)
            self.tune2A = (self.histoT2P0TempA.getCentralFreq()/1e6)
            self.tune1B = (self.histoT1P0TempB.getCentralFreq()/1e6)
            self.tune2B = (self.histoT2P0TempB.getCentralFreq()/1e6)
            self.beamA, self.dontCare, self.irrelevant = self.histoT1P0TempA.parseID()
            self.beamB, self.dontCare, self.irrelevant = self.histoT1P0TempB.parseID()
        fig = plt.figure()
        ax1 = fig.add_subplot(221)
        fileAx1 = plt.hist(self.histoArrT1P0A[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamA)+":T"+str("%.1f" % self.tune1A)+":P0")
        fileBx1 = plt.hist(self.histoArrT1P0B[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamB)+":T"+str("%.1f" % self.tune1B)+":P0")
        ax1.set_xlabel('Step')
        ax1.set_ylabel('Count')
        ax1.legend(loc='best')

        ax2 = fig.add_subplot(222)
        fileAx2 = plt.hist(self.histoArrT1P1A[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamA)+":T"+str("%.1f" % self.tune1A)+":P1")
        fileBx2 = plt.hist(self.histoArrT1P1B[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamB)+":T"+str("%.1f" % self.tune1A)+":P1")
        ax2.set_xlabel('Step')
        ax2.set_ylabel('Count')
        ax2.legend(loc='best')

        ax3 = fig.add_subplot(223)
        fileAx3 = plt.hist(self.histoArrT2P0A[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamA)+":T"+str("%.1f" % self.tune2A)+":P0")
        fileBx3 = plt.hist(self.histoArrT2P0B[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamB)+":T"+str("%.1f" % self.tune2A)+":P0")
        ax3.set_xlabel('Step')
        ax3.set_ylabel('Count')
        ax3.legend(loc='best')

        ax4 = fig.add_subplot(224)
        fileAx4 = plt.hist(self.histoArrT2P1A[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamA)+":T"+str("%.1f" % self.tune2A)+":P1")
        fileBx4 = plt.hist(self.histoArrT2P1B[:],bins=8,histtype='step',normed=True,label = "B"+str(self.beamB)+":T"+str("%.1f" % self.tune2A)+":P1")
        ax4.set_xlabel('Step')
        ax4.set_ylabel('Count')
        ax4.legend(loc='best')

        plt.show()
コード例 #14
0
lf_on = open('/u/data/leap/observations/056777_000085151', 'rb')
lf_off = open('/u/data/leap/observations/056777_000085152', 'rb')

hf_on.seek((startAtFrame - 1) * 4128, 0)
hf_off.seek((startAtFrame - 1) * 4128, 0)
lf_on.seek((startAtFrame - 1) * 4128, 0)
lf_off.seek((startAtFrame - 1) * 4128, 0)

hf_onArr = []
hf_offArr = []
lf_onArr = []
lf_offArr = []
i = 0
while i < framesToIterate:
    i += 1
    hf_onTemp = drx.readFrame(hf_on)
    hf_onArr.extend(hf_onTemp.data.iq.real)
    hf_offTemp = drx.readFrame(hf_off)
    hf_offArr.extend(hf_offTemp.data.iq.real)
    lf_onTemp = drx.readFrame(lf_on)
    lf_onArr.extend(lf_onTemp.data.iq.real)
    lf_offTemp = drx.readFrame(lf_off)
    lf_offArr.extend(lf_offTemp.data.iq.real)

fig = plt.figure()
ax1 = fig.add_subplot(121)
hf_onx = ax1.plot(hf_onArr[:], label='HF / On Moon')
hf_offx = ax1.plot(hf_offArr[:], label='HF / Off Moon')
ax1.set_xlabel('Step')
ax1.set_ylabel('Count')
ax1.legend(loc='best')
コード例 #15
0
ファイル: hdfWaterfallSW.py プロジェクト: kkirchhoff01/lslext
def main(args):
	# Parse command line options
	config = parseOptions(args)

	# Length of the FFT
	LFFT = config['LFFT']

	# Open the file and find good data (not spectrometer data)
	filename = config['args'][0]
	fh = open(filename, "rb")
	nFramesFile = os.path.getsize(filename) / drx.FrameSize
	
	try:
		for i in xrange(5):
			junkFrame = drspec.readFrame(fh)
		raise RuntimeError("ERROR: '%s' appears to be a DR spectrometer file, not a raw DRX file" % filename)
	except errors.syncError:
		fh.seek(0)
		
	while True:
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				t0 = junkFrame.getTime()
				break
			except ZeroDivisionError:
				pass
		except errors.syncError:
			fh.seek(-drx.FrameSize+1, 1)
			
	fh.seek(-drx.FrameSize, 1)
	
	beam,tune,pol = junkFrame.parseID()
	beams = drx.getBeamCount(fh)
	tunepols = drx.getFramesPerObs(fh)
	tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
	beampols = tunepol

	# Offset in frames for beampols beam/tuning/pol. sets
	inoffset = config['offset']
	offset = int(config['offset'] * srate / 4096 * beampols)
	offset = int(1.0 * offset / beampols) * beampols
	fh.seek(offset*drx.FrameSize, 1)
	
	# Iterate on the offsets until we reach the right point in the file.  This
	# is needed to deal with files that start with only one tuning and/or a 
	# different sample rate.  
	while True:
		## Figure out where in the file we are and what the current tuning/sample 
		## rate is
		junkFrame = drx.readFrame(fh)
		srate = junkFrame.getSampleRate()
		t1 = junkFrame.getTime()
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		fh.seek(-drx.FrameSize, 1)
		
		## See how far off the current frame is from the target
		tDiff = t1 - (t0 + config['offset'])
		
		## Half that to come up with a new seek parameter
		tCorr = -tDiff / 2.0
		cOffset = int(tCorr * srate / 4096 * beampols)
		cOffset = int(1.0 * cOffset / beampols) * beampols
		offset += cOffset
		
		## If the offset is zero, we are done.  Otherwise, apply the offset
		## and check the location in the file again/
		if cOffset is 0:
			break
		fh.seek(cOffset*drx.FrameSize, 1)
	
	# Update the offset actually used
	config['offset'] = t1 - t0
	offset = int(round(config['offset'] * srate / 4096 * beampols))
	offset = int(1.0 * offset / beampols) * beampols

	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
	maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols

	# Number of frames to integrate over
	print "Line 673: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols
	nFramesAvg = int(config['average'] * srate / 4096 * beampols)
	if( nFramesAvg == 0):
		nFramesAvg = 1 * beampols
	else:
		nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
	config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
	maxFrames = nFramesAvg
	print "Line 678: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
	if config['metadata'] is not None:
		config['duration'] = 0
	if config['duration'] == 0:
		config['duration'] = 1.0 * nFramesFile / beampols * 4096 / srate
	else:
		config['duration'] = int(round(config['duration'] * srate * beampols / 4096) / beampols * 4096 / srate)
	
	nChunks = int(round(config['duration'] / config['average']))
	if nChunks == 0:
		nChunks = 1
	nFrames = nFramesAvg*nChunks
	print "Line 693: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

	# Date & Central Frequency
	t1  = junkFrame.getTime()
	beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
	centralFreq1 = 0.0
	centralFreq2 = 0.0
	for i in xrange(4):
		junkFrame = drx.readFrame(fh)
		b,t,p = junkFrame.parseID()
		if p == 0 and t == 1:
			try:
				centralFreq1 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		elif p == 0 and t == 2:
			try:
				centralFreq2 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		else:
			pass
	fh.seek(-4*drx.FrameSize, 1)
	
	config['freq1'] = centralFreq1
	config['freq2'] = centralFreq2

	# File summary
	print "Filename: %s" % filename
	print "Date of First Frame: %s" % str(beginDate)
	print "Beams: %i" % beams
	print "Tune/Pols: %i %i %i %i" % tunepols
	print "Sample Rate: %i Hz" % srate
	print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
	print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
	print "---"
	print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
	print "Integration: %.6f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
	print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
	print "Chunks: %i" % nChunks
	print " "
	
	# Estimate clip level (if needed)
	if config['estimate']:
		clip1, clip2 = estimateClipLevel(fh, beampols)
	else:
		clip1 = config['clip']
		clip2 = config['clip']
		
	# Make the pseudo-antennas for Stokes calculation
	antennas = []
	for i in xrange(4):
		if i / 2 == 0:
			newAnt = stations.Antenna(1)
		else:
			newAnt = stations.Antenna(2)
			
		if i % 2 == 0:
			newAnt.pol = 0
		else:
			newAnt.pol = 1
			
		antennas.append(newAnt)
		
	# Setup the output file
	outname = os.path.split(filename)[1]
	outname = os.path.splitext(outname)[0]
        if( config['return'] == 'FFT' ):
		outname = '%s-%d-waterfall-complex.hdf5' %(outname, inoffset)
	else:
		outname = '%s-waterfall.hdf5' % outname
	
	if os.path.exists(outname):
		#yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname)
		#if yn not in ('n', 'N'):
		#	os.unlink(outname)
		#else:
		raise RuntimeError("Output file '%s' already exists" % outname)
			
	f = hdfData.createNewFile(outname)
	
	# Look at the metadata and come up with a list of observations.  If 
	# there are no metadata, create a single "observation" that covers the
	# whole file.
	obsList = {}
	if config['metadata'] is not None:
		sdf = metabundle.getSessionDefinition(config['metadata'])
		
		sdfBeam  = sdf.sessions[0].drxBeam
		spcSetup = sdf.sessions[0].spcSetup
		if sdfBeam != beam:
			raise RuntimeError("Metadata is for beam #%i, but data is from beam #%i" % (sdfBeam, beam))
			
		for i,obs in enumerate(sdf.sessions[0].observations):
			sdfStart = mcs.mjdmpm2datetime(obs.mjd, obs.mpm)
			sdfStop  = mcs.mjdmpm2datetime(obs.mjd, obs.mpm + obs.dur)
			obsDur   = obs.dur/1000.0
			obsSR    = drx.filterCodes[obs.filter]
			
			obsList[i+1] = (sdfStart, sdfStop, obsDur, obsSR)
			
		print "Observations:"
		for i in sorted(obsList.keys()):
			obs = obsList[i]
			print " #%i: %s to %s (%.3f s) at %.3f MHz" % (i, obs[0], obs[1], obs[2], obs[3]/1e6)
		print " "
			
		hdfData.fillFromMetabundle(f, config['metadata'])
	else:
		obsList[1] = (datetime.utcfromtimestamp(t1), datetime(2222,12,31,23,59,59), config['duration'], srate)
		
		hdfData.fillMinimum(f, 1, beam, srate)
		
	if config['linear']:
		dataProducts = ['XX', 'YY']
	else:
		dataProducts = ['I', 'Q', 'U', 'V']
		
	for o in sorted(obsList.keys()):
		for t in (1,2):
			hdfData.createDataSets(f, o, t, numpy.arange(LFFT-1 if float(fxc.__version__) < 0.8 else LFFT, dtype=numpy.float32), int(round(obsList[o][2]/config['average'])), dataProducts, dataOut=config['return'])
	f.attrs['FileGenerator'] = 'hdfWaterfall.py'
	f.attrs['InputData'] = os.path.basename(filename)
	
	# Create the various HDF group holders
	ds = {}
	for o in sorted(obsList.keys()):
		obs = hdfData.getObservationSet(f, o)
		
		ds['obs%i' % o] = obs
		ds['obs%i-time' % o] = obs.create_dataset('time', (int(round(obsList[o][2]/config['average'])),), 'f8')
		
		for t in (1,2):
			ds['obs%i-freq%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'freq')
			for p in dataProducts:
				if( config['return']=='PSD'):
					ds["obs%i-%s%i" % (o, p, t)] = hdfData.getDataSet(f, o, t, p)
				else:
					ds["obs%i-%s%imag" % (o, p, t)] = hdfData.getDataSet(f, o, t, p+'mag')
					ds["obs%i-%s%iphase" % (o, p, t)] = hdfData.getDataSet(f, o, t, p+'phase')
			ds['obs%i-Saturation%i' % (o, t)] = hdfData.getDataSet(f, o, t, 'Saturation')
	# Load in the correct analysis function
	if config['linear']:
		processDataBatch = processDataBatchLinear
	else:
		processDataBatch = processDataBatchStokes
		
	# Go!
	for o in sorted(obsList.keys()):
		try:
			processDataBatch(fh, antennas, obsList[o][0], obsList[o][2], obsList[o][3], config, ds, obsID=o, clip1=clip1, clip2=clip2)
		except RuntimeError, e:
			print "Observation #%i: %s, abandoning this observation" % (o, str(e))
コード例 #16
0
ファイル: hdfWaterfallSW.py プロジェクト: kkirchhoff01/lslext
def processDataBatchStokes(fh, antennas, tStart, duration, sampleRate, config, dataSets, obsID=1, clip1=0, clip2=0):
	"""
	Process a chunk of data in a raw DRX file into Stokes parameters and 
	add the contents to an HDF5 file.
	"""
	
	# Length of the FFT
	LFFT = config['LFFT']
	
	# Find the start of the observation
	junkFrame = drx.readFrame(fh)
	srate = junkFrame.getSampleRate()
	t0 = junkFrame.getTime()
	fh.seek(-drx.FrameSize, 1)
	
	print 'Looking for #%i at %s with sample rate %.1f Hz...' % (obsID, tStart, sampleRate)
	while datetime.utcfromtimestamp(t0) < tStart or srate != sampleRate:
		junkFrame = drx.readFrame(fh)
		srate = junkFrame.getSampleRate()
		t0 = junkFrame.getTime()
	print '... Found #%i at %s with sample rate %.1f Hz' % (obsID, datetime.utcfromtimestamp(t0), srate)
	tDiff = datetime.utcfromtimestamp(t0) - tStart
	try:
		duration = duration - tDiff.total_seconds()
	except:
		duration = duration - (tDiff.seconds + tDiff.microseconds/1e6)
	
	beam,tune,pol = junkFrame.parseID()
	beams = drx.getBeamCount(fh)
	tunepols = drx.getFramesPerObs(fh)
	tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
	beampols = tunepol
	
	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
	maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols
	
	# Number of frames to integrate over
	print "Line 455: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols
	nFramesAvg = int(round(config['average'] * srate / 4096 * beampols))
	nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
	config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
	maxFrames = nFramesAvg
	print "Line 460: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
	nChunks = int(round(duration / config['average']))
	if nChunks == 0:
		nChunks = 1
	nFrames = nFramesAvg*nChunks
	print "Line 468: config['average']", config['average'], ' sample rate ', srate, ' beampols ', beampols, " nFramesAvg ", nFramesAvg, " nChunks ", nChunks

	# Date & Central Frequency
	beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
	centralFreq1 = 0.0
	centralFreq2 = 0.0
	for i in xrange(4):
		junkFrame = drx.readFrame(fh)
		b,t,p = junkFrame.parseID()
		if p == 0 and t == 1:
			try:
				centralFreq1 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		elif p == 0 and t == 2:
			try:
				centralFreq2 = junkFrame.getCentralFreq()
			except AttributeError:
				from lsl.common.dp import fS
				centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
		else:
			pass
	fh.seek(-4*drx.FrameSize, 1)
	freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1/srate))
	if float(fxc.__version__) < 0.8:
		freq = freq[1:]
		
	dataSets['obs%i-freq1' % obsID][:] = freq + centralFreq1
	dataSets['obs%i-freq2' % obsID][:] = freq + centralFreq2
	
	obs = dataSets['obs%i' % obsID]
	obs.attrs['tInt'] = config['average']
	obs.attrs['tInt_Unit'] = 's'
	obs.attrs['LFFT'] = LFFT
	obs.attrs['nChan'] = LFFT-1 if float(fxc.__version__) < 0.8 else LFFT
	obs.attrs['RBW'] = freq[1]-freq[0]
	obs.attrs['RBW_Units'] = 'Hz'
	
	dataProducts = ['I', 'Q', 'U', 'V']
	done = False
	for i in xrange(nChunks):
		# Find out how many frames remain in the file.  If this number is larger
		# than the maximum of frames we can work with at a time (maxFrames),
		# only deal with that chunk
		framesRemaining = nFrames - i*maxFrames
		if framesRemaining > maxFrames:
			framesWork = maxFrames
		else:
			framesWork = framesRemaining
		print "Working on chunk %i, %i frames remaining" % (i+1, framesRemaining)
		
		count = {0:0, 1:0, 2:0, 3:0}
		data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
		# If there are fewer frames than we need to fill an FFT, skip this chunk
		if data.shape[1] < LFFT:
			break
			
		# Inner loop that actually reads the frames into the data array
		print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)
		
		for j in xrange(framesWork):
			# Read in the next frame and anticipate any problems that could occur
			try:
				cFrame = drx.readFrame(fh, Verbose=False)
			except errors.eofError:
				done = True
				break
			except errors.syncError:
				continue

			beam,tune,pol = cFrame.parseID()
			aStand = 2*(tune-1) + pol
			if j is 0:
				cTime = cFrame.getTime()
			
			try:
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
			except ValueError:
				raise RuntimeError("Invalid Shape")

		# Save out some easy stuff
		dataSets['obs%i-time' % obsID][i] = cTime
		
		if config['countSats']:
			sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1)
			dataSets['obs%i-Saturation1' % obsID][i,:] = sats[0:2]
			dataSets['obs%i-Saturation2' % obsID][i,:] = sats[2:4]
		else:
			dataSets['obs%i-Saturation1' % obsID][i,:] = -1
			dataSets['obs%i-Saturation2' % obsID][i,:] = -1
			
		# Calculate the spectra for this block of data and then weight the results by 
		# the total number of frames read.  This is needed to keep the averages correct.
		if clip1 == clip2:
			freq, tempSpec1 = fxc.StokesMaster(data, antennas, LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			
			for t in (1,2):
				for l,p in enumerate(dataProducts):
					dataSets['obs%i-%s%i' % (obsID, p, t)][i,:] = tempSpec1[l,t-1,:]
					
		else:
			freq, tempSpec1 = fxc.StokesMaster(data[:2,:], antennas[:2], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			freq, tempSpec2 = fxc.StokesMaster(data[2:,:], antennas[2:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip2)
			
			for l,p in enumerate(dataProducts):
				dataSets['obs%i-%s%i' % (obsID, p, 1)][i,:] = tempSpec1[l,0,:]
				dataSets['obs%i-%s%i' % (obsID, p, 2)][i,:] = tempSpec2[l,0,:]
				
		# We don't really need the data array anymore, so delete it
		del(data)
		
		# Are we done yet?
		if done:
			break
			
	return True
コード例 #17
0
ファイル: hdfWaterfallSW.py プロジェクト: kkirchhoff01/lslext
def estimateClipLevel(fh, beampols):
	"""
	Read in a set of 100 frames and come up with the 4-sigma clip levels 
	for each tuning.  These clip levels are returned as a two-element 
	tuple.
	"""
	
	filePos = fh.tell()
		
	# Read in the first 100 frames for each tuning/polarization
	count = {0:0, 1:0, 2:0, 3:0}
	data = numpy.zeros((4, 4096*100), dtype=numpy.csingle)
	for i in xrange(beampols*100):
		try:
			cFrame = drx.readFrame(fh, Verbose=False)
		except errors.eofError:
			break
		except errors.syncError:
			continue
		
		beam,tune,pol = cFrame.parseID()
		aStand = 2*(tune-1) + pol
		
		data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
		count[aStand] +=  1
	
	# Go back to where we started
	fh.seek(filePos)
	
	# Compute the robust mean and standard deviation for I and Q for each
	# tuning/polarization
	meanI = []
	meanQ = []
	stdsI = []
	stdsQ = []
	for i in xrange(4):
		meanI.append( robust.mean(data[i,:].real) )
		meanQ.append( robust.mean(data[i,:].imag) )
		
		stdsI.append( robust.std(data[i,:].real) )
		stdsQ.append( robust.std(data[i,:].imag) )
	
	# Report
	print "Statistics:"
	for i in xrange(4):
		print " Mean %i: %.3f + %.3f j" % (i+1, meanI[i], meanQ[i])
		print " Std  %i: %.3f + %.3f j" % (i+1, stdsI[i], stdsQ[i])
	
	# Come up with the clip levels based on 4 sigma
	clip1 = (meanI[0] + meanI[1] + meanQ[0] + meanQ[1]) / 4.0
	clip2 = (meanI[2] + meanI[3] + meanQ[2] + meanQ[3]) / 4.0
	
	clip1 += 5*(stdsI[0] + stdsI[1] + stdsQ[0] + stdsQ[1]) / 4.0
	clip2 += 5*(stdsI[2] + stdsI[3] + stdsQ[2] + stdsQ[3]) / 4.0
	
	clip1 = int(round(clip1))
	clip2 = int(round(clip2))
	
	# Report again
	print "Clip Levels:"
	print " Tuning 1: %i" % clip1
	print " Tuning 2: %i" % clip2
	
	return clip1, clip2
コード例 #18
0
ファイル: drxVoltHisto.py プロジェクト: kkirchhoff01/lslext
    def compute(self, framesToIterate, startAtFrame):
        self.framesToIterate = 4 * framesToIterate
        #Remember thre are four frames associated with a single sample in time, one for T1P0, T1P1, T2P0 and T2P1
        self.startAtFrame = 4 * startAtFrame
        try:
            self.histoFileA.seek((self.startAtFrame - 1) * 4128, 0)
        except:
            print "Frame start bounds may be incorrect. Must start at frame >=1"
        self.histoArrT1P0A = []
        self.histoArrT1P1A = []
        self.histoArrT2P0A = []
        self.histoArrT2P1A = []
        try:
            self.histoFileB.seek((self.startAtFrame - 1) * 4128, 0)
        except:
            print "Frame start bounds may be incorrect. Must start at frame >=1"
        self.histoArrT1P0B = []
        self.histoArrT1P1B = []
        self.histoArrT2P0B = []
        self.histoArrT2P1B = []
        self.beamA = -1
        self.beamB = -1
        self.tune1A = -1
        self.tune1B = -1
        self.tune2A = -1
        self.tune2B = -1
        i = 0
        while i < self.framesToIterate:
            i += 1
            self.histoT1P0TempA = drx.readFrame(self.histoFileA)
            self.histoArrT1P0A.extend(self.histoT1P0TempA.data.iq.real)
            self.histoT1P1TempA = drx.readFrame(self.histoFileA)
            self.histoArrT1P1A.extend(self.histoT1P1TempA.data.iq.real)
            self.histoT2P0TempA = drx.readFrame(self.histoFileA)
            self.histoArrT2P0A.extend(self.histoT2P0TempA.data.iq.real)
            self.histoT2P1TempA = drx.readFrame(self.histoFileA)
            self.histoArrT2P1A.extend(self.histoT2P1TempA.data.iq.real)
            self.histoT1P0TempB = drx.readFrame(self.histoFileB)
            self.histoArrT1P0B.extend(self.histoT1P0TempB.data.iq.real)
            self.histoT1P1TempB = drx.readFrame(self.histoFileB)
            self.histoArrT1P1B.extend(self.histoT1P1TempB.data.iq.real)
            self.histoT2P0TempB = drx.readFrame(self.histoFileB)
            self.histoArrT2P0B.extend(self.histoT2P0TempB.data.iq.real)
            self.histoT2P1TempB = drx.readFrame(self.histoFileB)
            self.histoArrT2P1B.extend(self.histoT2P1TempB.data.iq.real)
            self.tune1A = (self.histoT1P0TempA.getCentralFreq() / 1e6)
            self.tune2A = (self.histoT2P0TempA.getCentralFreq() / 1e6)
            self.tune1B = (self.histoT1P0TempB.getCentralFreq() / 1e6)
            self.tune2B = (self.histoT2P0TempB.getCentralFreq() / 1e6)
            self.beamA, self.dontCare, self.irrelevant = self.histoT1P0TempA.parseID(
            )
            self.beamB, self.dontCare, self.irrelevant = self.histoT1P0TempB.parseID(
            )
        fig = plt.figure()
        ax1 = fig.add_subplot(221)
        fileAx1 = plt.hist(self.histoArrT1P0A[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamA) + ":T" +
                           str("%.1f" % self.tune1A) + ":P0")
        fileBx1 = plt.hist(self.histoArrT1P0B[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamB) + ":T" +
                           str("%.1f" % self.tune1B) + ":P0")
        ax1.set_xlabel('Step')
        ax1.set_ylabel('Count')
        ax1.legend(loc='best')

        ax2 = fig.add_subplot(222)
        fileAx2 = plt.hist(self.histoArrT1P1A[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamA) + ":T" +
                           str("%.1f" % self.tune1A) + ":P1")
        fileBx2 = plt.hist(self.histoArrT1P1B[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamB) + ":T" +
                           str("%.1f" % self.tune1A) + ":P1")
        ax2.set_xlabel('Step')
        ax2.set_ylabel('Count')
        ax2.legend(loc='best')

        ax3 = fig.add_subplot(223)
        fileAx3 = plt.hist(self.histoArrT2P0A[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamA) + ":T" +
                           str("%.1f" % self.tune2A) + ":P0")
        fileBx3 = plt.hist(self.histoArrT2P0B[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamB) + ":T" +
                           str("%.1f" % self.tune2A) + ":P0")
        ax3.set_xlabel('Step')
        ax3.set_ylabel('Count')
        ax3.legend(loc='best')

        ax4 = fig.add_subplot(224)
        fileAx4 = plt.hist(self.histoArrT2P1A[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamA) + ":T" +
                           str("%.1f" % self.tune2A) + ":P1")
        fileBx4 = plt.hist(self.histoArrT2P1B[:],
                           bins=8,
                           histtype='step',
                           normed=True,
                           label="B" + str(self.beamB) + ":T" +
                           str("%.1f" % self.tune2A) + ":P1")
        ax4.set_xlabel('Step')
        ax4.set_ylabel('Count')
        ax4.legend(loc='best')

        plt.show()
コード例 #19
0
import numpy
import math
from lsl.reader import drx
from lsl.correlator import fx as fxc
from lsl.misc.mathutil import to_dB
from matplotlib import pyplot as plt
from datetime import datetime

file = open('/u/data/leap/observations/056777_000085152', 'rb')

frame1 = drx.readFrame(file)
frame2 = drx.readFrame(file)
frame3 = drx.readFrame(file)
frame4 = drx.readFrame(file)

b1, t1, p1 = frame1.parseID()
b2, t2, p2 = frame2.parseID()
b3, t3, p3 = frame3.parseID()
b4, t4, p4 = frame4.parseID()

print "THIS IS HF ON MOON DATA"

print "frame 1 stuff: " + str(b1) + ":" + str(t1) + ":" + str(p1)
print "Tuning is: " + str(frame1.getCentralFreq())
print str(frame1.data.iq.real[:])
print "frame 2 stuff: " + str(b2) + ":" + str(t2) + ":" + str(p2)
print "Tuning is: " + str(frame2.getCentralFreq())
print str(frame2.data.iq.real[:])
print "frame 3 stuff: " + str(b3) + ":" + str(t3) + ":" + str(p3)
print "Tuning is: " + str(frame3.getCentralFreq())
print str(frame3.data.iq.real[:])
コード例 #20
0
ファイル: t01.py プロジェクト: ilikeit813/Project-Backup
def main(args):
	t0=time.time()
	#nChunks = 10000#10000, the size of a file.
	#nFramesAvg = 1*4#200, 50 frames per pol, the subintergration time.
	nChunks = 3000 #10000 #the size of a file.
	nFramesAvg = 4*16 #the intergration time.
	fcl = 6000+7000
	fch = fcl + 3343
	for offset_i in range(0, 1409):# one offset = nChunks*nFramesAvg skiped
	#for offset_i in range(1500*2, 1500*3):# one offset = nChunks*nFramesAvg skiped
	#for offset_i in range(1500*4, 1500*5):# one offset = nChunks*nFramesAvg skiped
		offset = nChunks*nFramesAvg*offset_i
	# Parse command line options
		config = parseOptions(args)

	# Length of the FFT
		#LFFT = config['LFFT']
		LFFT = 4096*16
	# Build the DRX file
		try:
			#drxFile = drsu.getFileByName(config['args'][0], config['args'][1])
                        fh = open(config['args'][0], "rb")
                        nFramesFile = os.path.getsize(config['args'][0]) / drx.FrameSize
		except:
			print config['args']
			sys.exit(1)

		#drxFile.open()
		#nFramesFile = drxFile.size / drx.FrameSize
	
		while True:
			try:
				junkFrame = drx.readFrame(fh)
				try:
					srate = junkFrame.getSampleRate()
					#t0 = junkFrame.getTime()
					break
				except ZeroDivisionError:
					pass
			except errors.syncError:
				fh.seek(-drx.FrameSize+1, 1)
			
		fh.seek(-drx.FrameSize, 1)
	
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		config['offset'] = offset/srate/beampols*4096
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)

	# Make sure that the file chunk size contains is an integer multiple
	# of the FFT length so that no data gets dropped.  This needs to
	# take into account the number of beampols in the data, the FFT length,
	# and the number of samples per frame.
		maxFrames = int(1.0*config['maxFrames']/beampols*4096/float(LFFT))*LFFT/4096*beampols

	# Number of frames to integrate over
#	nFramesAvg = int(config['average'] * srate / 4096 * beampols)
#	nFramesAvg = int(1.0 * nFramesAvg / beampols*4096/float(LFFT))*LFFT/4096*beampols
		config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
		maxFrames = nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
#	nChunks = int(round(config['duration'] / config['average']))
		config['duration']=nChunks*config['average']
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
	
	# Date & Central Frequnecy
		beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 1:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)
	
		config['freq1'] = centralFreq1
		config['freq2'] = centralFreq2

	# File summary
		print "Filename: %s" % config['args']
		print "Date of First Frame: %s" % str(beginDate)
		print "Beams: %i" % beams
		print "Tune/Pols: %i %i %i %i" % tunepols
		print "Sample Rate: %i Hz" % srate
		print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
		print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
		print "---"
		print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
		print "Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
		print "Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
		print "Chunks: %i" % nChunks





		#sys.exit()




	# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time+offset is greater than file length")

	# Estimate clip level (if needed)
		if config['estimate']:
			filePos = fh.tell()
		
		# Read in the first 100 frames for each tuning/polarization
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4, 4096*100), dtype=numpy.csingle)
			for i in xrange(4*100):
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					break
				except errors.syncError:
					continue
			
				beam,tune,pol = cFrame.parseID()
				aStand = 2*(tune-1) + pol
			
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
		
		# Go back to where we started
			fh.seek(filePos)
		
		# Compute the robust mean and standard deviation for I and Q for each
		# tuning/polarization
			meanI = []
			meanQ = []
			stdsI = []
			stdsQ = []
			#for i in xrange(4):
			for i in xrange(2):
				meanI.append( robust.mean(data[i,:].real) )
				meanQ.append( robust.mean(data[i,:].imag) )
				
				stdsI.append( robust.std(data[i,:].real) )
				stdsQ.append( robust.std(data[i,:].imag) )
			
			# Come up with the clip levels based on 4 sigma
			clip1 = (meanI[0] + meanI[1] + meanQ[0] + meanQ[1]) / 4.0
			#clip2 = (meanI[2] + meanI[3] + meanQ[2] + meanQ[3]) / 4.0
			clip2 = 0
			
			clip1 += 5*(stdsI[0] + stdsI[1] + stdsQ[0] + stdsQ[1]) / 4.0
			#clip2 += 5*(stdsI[2] + stdsI[3] + stdsQ[2] + stdsQ[3]) / 4.0
			clip2 += 0
			
			clip1 = int(round(clip1))
			clip2 = int(round(clip2))
			
			# Report again
		else:
			clip1 = config['clip']
			clip2 = config['clip']
	
		# Master loop over all of the file chunks
		#masterSpectra = numpy.zeros((nChunks, 4, LFFT-1))
		masterSpectra = numpy.zeros((nChunks, 4, fch-fcl))
		masterTimes = numpy.zeros(nChunks)
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (maxFrames),
			# only deal with that chunk
			framesRemaining = nFrames - i*maxFrames
			if framesRemaining > maxFrames:
				framesWork = maxFrames
			else:
				framesWork = framesRemaining
			
			if framesRemaining%(nFrames/10)==0:
				print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
	
	
			
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				break
	
			# Inner loop that actually reads the frames into the data array
			if framesRemaining%(nFrames/10)==0:
				print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)
	
			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
						cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
	
				beam,tune,pol = cFrame.parseID()
				aStand = 2*(tune-1) + pol
				if j is 0:
					cTime = cFrame.getTime()
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
	
			# Calculate the spectra for this block of data and then weight the results by 
			# the total number of frames read.  This is needed to keep the averages correct.
			#freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip1)
			freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			
			#freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate, ClipLevel=clip2)
			freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			
			# Save the results to the various master arrays
			masterTimes[i] = cTime
			
			masterSpectra[i,0,:] = tempSpec1[0,fcl:fch]
			masterSpectra[i,1,:] = tempSpec1[1,fcl:fch]
			masterSpectra[i,2,:] = tempSpec2[0,fcl:fch]
			masterSpectra[i,3,:] = tempSpec2[1,fcl:fch]
			
	
			# We don't really need the data array anymore, so delete it
			del(data)
	
		#drxFile.close()
	
		# Now that we have read through all of the chunks, perform the final averaging by
		# dividing by all of the chunks
		outname = "%s_%i_fft_offset_%.9i_frames" % (config['args'][0], beam,offset)

#		numpy.savez(outname, freq=freq, freq1=freq+config['freq1'], freq2=freq+config['freq2'], times=masterTimes, spec=masterSpectra, tInt=(maxFrames*4096/beampols/srate), srate=srate,  standMapper=[4*(beam-1) + i for i in xrange(masterSpectra.shape[1])])

                #print 'fInt = ',(freq+config['freq1'])[1]-(freq+config['freq1'])[0]
                #print 'tInt = ',maxFrames*4096/beampols/srate

                masterSpectra[:,0,:]=masterSpectra[:,0:2,:].mean(1)
                masterSpectra[:,1,:]=masterSpectra[:,2:4,:].mean(1)

		numpy.save(outname[-46:], masterSpectra[:,1,:])
		#numpy.save(outname[-46:], masterSpectra[:,0:2,:])
		#numpy.save(outname[-46:], masterSpectra)
#		spec = numpy.squeeze( (masterWeight*masterSpectra).sum(axis=0) / masterWeight.sum(axis=0) )
	
#		offset_i = offset_i + 1
	
	print time.time()-t0
コード例 #21
0
import numpy
import math
from lsl.reader import drx
from lsl.correlator import fx as fxc
from lsl.misc.mathutil import to_dB
from matplotlib import pyplot as plt
from datetime import datetime

file = open('/u/data/leap/observations/056777_000085152','rb')

frame1 = drx.readFrame(file)
frame2 = drx.readFrame(file)
frame3 = drx.readFrame(file)
frame4 = drx.readFrame(file)

b1, t1, p1 = frame1.parseID()
b2, t2, p2 = frame2.parseID()
b3, t3, p3 = frame3.parseID()
b4, t4, p4 = frame4.parseID()

print "THIS IS HF ON MOON DATA"

print "frame 1 stuff: " + str(b1) + ":" + str(t1) + ":" + str(p1)
print "Tuning is: " + str(frame1.getCentralFreq())
print str(frame1.data.iq.real[:])
print "frame 2 stuff: " + str(b2) + ":" + str(t2) + ":" + str(p2)
print "Tuning is: " + str(frame2.getCentralFreq())
print str(frame2.data.iq.real[:])
print "frame 3 stuff: " + str(b3) + ":" + str(t3) + ":" + str(p3)
print "Tuning is: " + str(frame3.getCentralFreq())
print str(frame3.data.iq.real[:])