コード例 #1
0
def main(args):
	nodes = 4 #total blades used
	pps = 6   #process per blade

	windownumber = 4 # The length of FFT = windownumber * 4096

	#Low tuning frequency range
	Lfcl = 1700 * windownumber
	Lfch = 2100 * windownumber
	#High tuning frequency range
	Hfcl =  670 * windownumber
	Hfch = 1070 * windownumber

	totalrank = nodes*pps
        comm  = MPI.COMM_WORLD
        rank  = comm.Get_rank()
	t0 = time.time()
	nChunks = 3000 #the temporal shape of a file.
	LFFT = 4096 * windownumber #Length of the FFT. 4096 is the size of a frame readed. The mini quantized window lenght is 4096
	nFramesAvg = 1*4* windownumber # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
	
	#for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
	for offset_i in range(0, 1000 ):# one offset = nChunks*nFramesAvg*worker_rank skiped
                offset_i = 1.*totalrank*offset_i + rank
		offset = nChunks*nFramesAvg*offset_i
		# Build the DRX file
		try:
                        fh = open(getopt.getopt(args,':')[1][0], "rb")
                        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
		except:
			print getopt.getopt(args,':')[1][0],' not found'
			sys.exit(1)
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				pass
			except ZeroDivisionError:
				print 'zero division error'
				break
		except errors.syncError:
			print 'assuming the srate is 19.6 MHz'
			fh.seek(-drx.FrameSize+1, 1)
		fh.seek(-drx.FrameSize, 1)
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 0:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)
		# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time + offset is greater than file length")
		# Master loop over all of the file chunks
		#freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
		#tInt = 1.0*LFFT/srate
                #print 'Temporal resl = ',tInt
                #print 'Channel width = ',1./tInt
		#freq1 = freq+centralFreq1
		#freq2 = freq+centralFreq2
		#print tInt,freq1.mean(),freq2.mean()
		masterSpectra = numpy.zeros((nChunks, 2, Lfch-Lfcl))
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (nFramesAvg),
			# only deal with that chunk
			framesRemaining = nFrames - i*nFramesAvg
			if framesRemaining > nFramesAvg:
				framesWork = nFramesAvg
			else:
				framesWork = framesRemaining
			#if framesRemaining%(nFrames/10)==0:
			#	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				print 'data.shape[1]< LFFT, break'
				break
			# Inner loop that actually reads the frames into the data array
			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
				beam,tune,pol = cFrame.parseID()
				if tune == 0:
					tune += 1
				aStand = 2*(tune-1) + pol
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
			# Calculate the spectra for this block of data, in the unit of intensity
			masterSpectra[i,0,:] = ((numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:])[:,Lfcl:Lfch])**2.).mean(0)/LFFT/2.
			masterSpectra[i,1,:] = ((numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:])[:,Hfcl:Hfch])**2.).mean(0)/LFFT/2.
		# Save the results to the various master arrays
                outname = "%s_%i_fft_offset_%.9i_frames" % (getopt.getopt(args,':')[1][0], beam,offset)
		numpy.save(outname,masterSpectra)
コード例 #2
0
def main(args):
    totalrank = 12
    nodes = 2
    pps = 6
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    t0 = time.time()
    nChunks = 10000  #the temporal shape of a file.
    LFFT = 4096  #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1 * 4 * LFFT / 4096  # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    fn = sorted(glob.glob('waterfall05*.npy'))
    j = numpy.zeros((len(fn)))
    for i in range(len(fn)):
        j[i] = fn[i][39:48]

#x = total perfect offset
    x = numpy.arange(j[-1] / nChunks / nFramesAvg) * nChunks * nFramesAvg
    # k = the different between perfect and real
    k = numpy.setdiff1d(x, j)

    for m in xrange(len(k) / totalrank):
        #print 'offset = ',k[m*totalrank + rank]

        #for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
        #for offset_i in range(100, 1000 ):# one offset = nChunks*nFramesAvg skiped
        #offset_i = 1.*totalrank*offset_i + rank
        #offset = nChunks*nFramesAvg*offset_i
        offset = k[m * totalrank + rank]
        # Build the DRX file
        try:
            fh = open(getopt.getopt(args, ':')[1][0], "rb")
            nFramesFile = os.path.getsize(getopt.getopt(
                args, ':')[1][0]) / drx.FrameSize  #drx.FrameSize = 4128
        except:
            print getopt.getopt(args, ':')[1][0], ' not found'
            sys.exit(1)
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                pass
            except ZeroDivisionError:
                print 'zero division error'
                break
        except errors.syncError:
            print 'assuming the srate is 19.6 MHz'
            fh.seek(-drx.FrameSize + 1, 1)
        fh.seek(-drx.FrameSize, 1)
        beam, tune, pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        if offset != 0:
            fh.seek(offset * drx.FrameSize, 1)
        if nChunks == 0:
            nChunks = 1
        nFrames = nFramesAvg * nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b, t, p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            else:
                pass
        fh.seek(-4 * drx.FrameSize, 1)
        # Sanity check
        if nFrames > (nFramesFile - offset):
            raise RuntimeError(
                "Requested integration time + offset is greater than file length"
            )
        # Master loop over all of the file chunks
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1.0 / srate))
        tInt = 1.0 * LFFT / srate
        print 'Temporal resl = ', tInt
        print 'Channel width = ', 1. / tInt
        freq1 = freq + centralFreq1
        freq2 = freq + centralFreq2
        #print tInt,freq1.mean(),freq2.mean()
        masterSpectra = numpy.zeros((nChunks, 2, LFFT - 1))
        for i in xrange(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (nFramesAvg),
            # only deal with that chunk
            framesRemaining = nFrames - i * nFramesAvg
            if framesRemaining > nFramesAvg:
                framesWork = nFramesAvg
            else:
                framesWork = framesRemaining
            #if framesRemaining%(nFrames/10)==0:
            #	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
            count = {0: 0, 1: 0, 2: 0, 3: 0}
            data = numpy.zeros((4, framesWork * 4096 / beampols),
                               dtype=numpy.csingle)
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < LFFT:
                print 'data.shape[1]< LFFT, break'
                break
            # Inner loop that actually reads the frames into the data array
            for j in xrange(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = drx.readFrame(fh, Verbose=False)
                except errors.eofError:
                    print "EOF Error"
                    break
                except errors.syncError:
                    print "Sync Error"
                    continue
                beam, tune, pol = cFrame.parseID()
                if tune == 0:
                    tune += 1
                aStand = 2 * (tune - 1) + pol
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            # Calculate the spectra for this block of data
            masterSpectra[i,
                          0, :] = ((numpy.fft.fftshift(
                              numpy.abs(numpy.fft.fft2(data[:2, :]))[:, 1:]))**
                                   2.).mean(0) / LFFT / 2.  #in unit of energy
            masterSpectra[i,
                          1, :] = ((numpy.fft.fftshift(
                              numpy.abs(numpy.fft.fft2(data[2:, :]))[:, 1:]))**
                                   2.).mean(0) / LFFT / 2.  #in unit of energy
            # Save the results to the various master arrays
            #print masterSpectra.shape
            #numpy.save('data',data)
            #sys.exit()
            #if i % 100 ==1 :
            #	print i, ' / ', nChunks
        outname = "%s_%i_fft_offset_%.9i_frames" % (getopt.getopt(
            args, ':')[1][0], beam, offset)
        numpy.save('waterfall' + outname, masterSpectra.mean(0))
コード例 #3
0
def worker(offset):
    log("Working on offset %d" % offset)
    # Build the DRX file
    try:
        fh = open(filename, "rb")
    except:
        log("File not fonud: %s" % filename)
        sys.exit(1)
    try:
        junkFrame = drx.readFrame(fh)
        try:
            srate = junkFrame.getSampleRate()
            pass
        except ZeroDivisionError:
            log('zero division error')
            return
    except errors.syncError:
        log('assuming the srate is 19.6 MHz')
        srate = 19600000.0
        fh.seek(-drx.FrameSize + 1, 1)
    fh.seek(-drx.FrameSize, 1)
    beam, tune, pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol
    if offset != 0:
        fh.seek(offset * drx.FrameSize, 1)
    #if nChunks == 0:
    #    nChunks = 1
    nFrames = nFramesAvg * nChunks
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b, t, p = junkFrame.parseID()
        if p == 0 and t == 0:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                from dp import fS
                centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                     (2**32 - 1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                from dp import fS
                centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                     (2**32 - 1)) / 2**32
        else:
            pass
    fh.seek(-4 * drx.FrameSize, 1)
    # Sanity check
    if nFrames > (nFramesFile - offset):
        raise RuntimeError(
            "Requested integration time + offset is greater than file length")
    # Master loop over all of the file chunks
    #freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
    #tInt = 1.0*LFFT/srate
    #print 'Temporal resl = ',tInt
    #print 'Channel width = ',1./tInt
    #freq1 = freq+centralFreq1
    #freq2 = freq+centralFreq2
    #print tInt,freq1.mean(),freq2.mean()
    masterSpectra = numpy.zeros(
        (nChunks, 2, Lfch - Lfcl))  # Add 2 out front for two pols
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (nFramesAvg),
        # only deal with that chunk
        framesRemaining = nFrames - i * nFramesAvg
        if framesRemaining > nFramesAvg:
            framesWork = nFramesAvg
        else:
            framesWork = framesRemaining
        #if framesRemaining%(nFrames/10)==0:
        #   print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
        count = {0: 0, 1: 0, 2: 0, 3: 0}
        data = numpy.zeros((4, framesWork * 4096 / beampols),
                           dtype=numpy.csingle)
        # If there are fewer frames than we need to fill an FFT, skip this chunk
        if data.shape[1] < LFFT:
            log('data.shape[1]< LFFT, break')
            return
        # Inner loop that actually reads the frames into the data array
        for j in xrange(framesWork):
            # Read in the next frame and anticipate any problems that could occur
            try:
                cFrame = drx.readFrame(fh, Verbose=False)
            except errors.eofError:
                log("EOF Error")
                return
            except errors.syncError:
                log("Sync Error")
                return
            beam, tune, pol = cFrame.parseID()
            if tune == 0:
                tune += 1
            aStand = 2 * (tune - 1) + pol
            data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                 4096] = cFrame.data.iq
            count[aStand] += 1
        # Calculate the spectra for this block of data, in the unit of intensity
        masterSpectra[i, 0, :] = (
            (numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(
                data[:2, :]))[:, 1:])[:, Lfcl:Lfch])**2.).mean(0) / LFFT / 2.
        masterSpectra[i, 1, :] = (
            (numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(
                data[2:, :]))[:, 1:])[:, Hfcl:Hfch])**2.).mean(0) / LFFT / 2.
    # Save the results to the various master arrays
    outname = "%s_%i_fft_offset_%.9i_frames" % (filename, beam, offset)
    log("Writing %s" % outname)
    numpy.save(outname, masterSpectra)
コード例 #4
0
ファイル: dva0.py プロジェクト: ilikeit813/Project-Backup
def main(args):
    #comm = MPI.COMM_WORLD
    comm = 0
    #rank = comm.Get_rank()
    rank = 0
    tunes = int(getopt.getopt(args,':')[1][1]) # 0: low, 1: hig, read in from your command line
    nodes =  1 #89 #the number of node requensted in sh
    pps   =  1 #processer per node requensted in sh

    fcl =  6000+7000 #low frequency cut off
    fch =  fcl+3343 #high frequency cut off

    nChunks = 3000 #the temporal shape of a file.
    LFFT = 4096*16 #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    offset_i = rank # range to 4309

    if rank == 0:
        fh = open(getopt.getopt(args,':')[1][0], "rb")
        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        fh.seek(-drx.FrameSize, 1)
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b,t,p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq1 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq2 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            else:
                pass
            print i,centralFreq1,centralFreq2
        fh.seek(-4*drx.FrameSize, 1)
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
        freq1 = (freq+centralFreq1)[fcl:fch]
        freq2 = (freq+centralFreq2)[fcl:fch]
        np.save('tInt', 1.*LFFT/srate)
        np.save('freq1',freq1)
        np.save('freq2',freq2)

    for j in range(44):

        offset = nChunks*nFramesAvg*offset_i + j*(nChunks*nFramesAvg*nodes*pps)
        # Build the DRX file
        fh = open(getopt.getopt(args,':')[1][0], "rb")
        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        fh.seek(-drx.FrameSize, 1)
        beam,tune,pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        fh.seek(offset*drx.FrameSize, 1)
        nFrames = nFramesAvg*nChunks

        '''
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b,t,p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq1 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq2 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            else:
                pass
        fh.seek(-4*drx.FrameSize, 1)
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
        tInt = 1.0*LFFT/srate
        freq1 = (freq+centralFreq1)[fcl:fch]
        freq2 = (freq+centralFreq2)[fcl:fch]
        #print tInt,freq1.mean(),freq2.mean()
        '''

        masterSpectra = numpy.zeros((nChunks, 2, fch-fcl))
        for i in xrange(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (nFramesAvg),
            # only deal with that chunk
            framesRemaining = nFrames - i*nFramesAvg
            if framesRemaining > nFramesAvg:
                framesWork = nFramesAvg
            else:
                framesWork = framesRemaining
            #if framesRemaining%(nFrames/10)==0:
            #   print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
            count = {0:0, 1:0, 2:0, 3:0}
            data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < LFFT:
                print 'data.shape[1]< LFFT, break'
                break
            # Inner loop that actually reads the frames into the data array
            for j in xrange(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = drx.readFrame(fh, Verbose=False)
                except errors.eofError:
                    print "EOF Error"
                    break
                except errors.syncError:
                    print "Sync Error"
                    continue
                beam,tune,pol = cFrame.parseID()
                if tune == 0:
                    tune += 1
                aStand = 2*(tune-1) + pol
                if j is 0:
                    cTime = cFrame.getTime()
                data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
                count[aStand] +=  1
            #for k in range(2):
                #masterSpectra[i,k,:] = (np.abs(np.fft.fftshift(np.fft.fft(data[k+2*tunes,:]))[1:])**2/LFFT)[fcl:fch]
            del(data)
	    print fh.tell()
        outname = "%s_%i_fft_offset_%.9i_frames" % (getopt.getopt(args,':')[1][0], beam,offset)
コード例 #5
0
ファイル: t03.py プロジェクト: ilikeit813/Project-Backup
def main(args):
	t0 = time.time()
	nChunks = 1000 #the temporal shape of a file.
	LFFT = 4096*16 #Length of the FFT.4096 is the size of a frame readed.
	nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
	fcl = 6000+7000
	fch = fcl + 3343 #60000
	#for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
	for offset_i in range(0, 1):# one offset = nChunks*nFramesAvg skiped
		offset = nChunks*nFramesAvg*offset_i
		# Build the DRX file
		try:
                        fh = open(getopt.getopt(args,':')[1][0], "rb")
                        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
		except:
			print getopt.getopt(args,':')[1][0],' not found'
			sys.exit(1)
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				pass
			except ZeroDivisionError:
				print 'zero division error'
				break
		except errors.syncError:
			print 'assuming the srate is 19.6 MHz'
			fh.seek(-drx.FrameSize+1, 1)
		fh.seek(-drx.FrameSize, 1)
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 0:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)
		# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time + offset is greater than file length")
		# Master loop over all of the file chunks
		freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
		tInt = 1.0*LFFT/srate, nFramesAvg*4096/beampols/srate
		freq1 = freq+centralFreq1
		freq2 = freq+centralFreq2
		#print tInt,freq1.mean(),freq2.mean()
		masterSpectra = numpy.zeros((nChunks, 2, fch-fcl))
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (nFramesAvg),
			# only deal with that chunk
			framesRemaining = nFrames - i*nFramesAvg
			if framesRemaining > nFramesAvg:
				framesWork = nFramesAvg
			else:
				framesWork = framesRemaining
			#if framesRemaining%(nFrames/10)==0:
			#	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				print 'data.shape[1]< LFFT, break'
				break
			# Inner loop that actually reads the frames into the data array
			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
				beam,tune,pol = cFrame.parseID()
				if tune == 0:
					tune += 1
				aStand = 2*(tune-1) + pol
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
			# Calculate the spectra for this block of data
			#tempSpec1 = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:]/2.)[:,fcl:fch].mean(0)**2./LFFT*2. #in unit of energy
			masterSpectra[i,0,:] = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:])[:,fcl:fch].mean(0)**2./LFFT/2. #in unit of energy
			#tempSpec2 = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:]/2.)[:,fcl:fch].mean(0)**2./LFFT*2. #in unit of energy
			masterSpectra[i,1,:] = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:])[:,fcl:fch].mean(0)**2./LFFT/2. #in unit of energy
			# Save the results to the various master arrays
			print data.shape
			print masterSpectra.shape
			numpy.save('data',data)
			sys.exit()
		numpy.save('jamie',masterSpectra)
	print time.time()-t0
	print masterSpectra.shape
コード例 #6
0
def main(args):
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    tunes = int(getopt.getopt(args,':')[1][1]) # 0: low, 1: hig, read in from your command line
    nodes =  1 #89 #the number of node requensted in sh
    pps   =  16 #processer per node requensted in sh

    fcl =  6000+7000 #low frequency cut off
    fch =  fcl+3343 #high frequency cut off

    nChunks = 3000 #the temporal shape of a file.
    LFFT = 4096*16 #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    offset_i = rank # range to 4309


    fn = sorted(glob.glob('05*.npy'))
    j = np.zeros((len(fn)))
    for i in range(len(fn)):
        j[i] = fn[i][30:39]

    k=[]
    for i in range(len(j)-1):
        if j[i+1] != j[i] + nChunks*nFramesAvg:
            k.append(j[i]+nChunks*nFramesAvg)

    l = len(k)/(nodes*pps)+1

    if rank == 0 :
        fh = open(getopt.getopt(args,':')[1][0], "rb")
        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
        junkFrame = drx.readFrame(fh)
        srate = junkFrame.getSampleRate()
        fh.seek(-drx.FrameSize, 1)
        beam,tune,pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        nFrames = nFramesAvg*nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b,t,p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq1 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    centralFreq2 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
            else:
                pass
        fh.seek(-4*drx.FrameSize, 1)
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
        tInt = 1.0*LFFT/srate
        freq1 = (freq+centralFreq1)[fcl:fch]
        freq2 = (freq+centralFreq2)[fcl:fch]
        np.save('freq1',freq1)
        np.save('freq2',freq2)
        np.save('tInt',tInt)
コード例 #7
0
def main(args):

	windownumber = 2
	nodes = 1
	pps = 6
	nChunks = 1000 #the temporal shape of a file.

	#Low tuning frequency range
	Lfcl =  360 * windownumber
	Lfch = 3700 * windownumber
	#High tuning frequency range
	Hfcl =  360 * windownumber
	Hfch = 3700 * windownumber

	LFFT = 4096 * windownumber #Length of the FFT.4096 is the size of a frame readed.
	nFramesAvg = 1*4*windownumber # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
        totalrank = nodes*pps
        comm  = MPI.COMM_WORLD
        rank  = comm.Get_rank()
	t0 = time.time()

        fn = sorted(glob.glob('05*.npy'))
        j = numpy.zeros((len(fn)))
        for i in range(len(fn)):
                j[i] = fn[i][30:39]

        #x = total perfect offset
        x = numpy.arange(j[-1]/nChunks/nFramesAvg)*nChunks*nFramesAvg
        # k = the different between perfect and real
        k = numpy.setdiff1d(x, j)

        for m in xrange(len(k)/totalrank):
                offset = k[m*totalrank + rank]
		# Build the DRX file
		try:
                        fh = open(getopt.getopt(args,':')[1][0], "rb")
                        nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
		except:
			print getopt.getopt(args,':')[1][0],' not found'
			sys.exit(1)
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				pass
			except ZeroDivisionError:
				print 'zero division error'
				break
		except errors.syncError:
			print 'assuming the srate is 19.6 MHz'
			fh.seek(-drx.FrameSize+1, 1)
		fh.seek(-drx.FrameSize, 1)
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 0:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)
		# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time + offset is greater than file length")
		masterSpectra = numpy.zeros((nChunks, 2, Lfch-Lfcl))
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (nFramesAvg),
			# only deal with that chunk
			framesRemaining = nFrames - i*nFramesAvg
			if framesRemaining > nFramesAvg:
				framesWork = nFramesAvg
			else:
				framesWork = framesRemaining
			#if framesRemaining%(nFrames/10)==0:
			#	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				print 'data.shape[1]< LFFT, break'
				break
			# Inner loop that actually reads the frames into the data array
			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
				beam,tune,pol = cFrame.parseID()
				if tune == 0:
					tune += 1
				aStand = 2*(tune-1) + pol
				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
			# Calculate the spectra for this block of data, in the unit of intensity
			masterSpectra[i,0,:] = ((numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:])[:,Lfcl:Lfch])**2.).mean(0)/LFFT/2.
			masterSpectra[i,1,:] = ((numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:])[:,Hfcl:Hfch])**2.).mean(0)/LFFT/2.
		# Save the results to the various master arrays
                outname = "%s_%i_fft_offset_%.9i_frames" % (getopt.getopt(args,':')[1][0], beam,offset)
		numpy.save(outname,masterSpectra)
コード例 #8
0
def main(args):
    windownumber = 4

    #Low tuning frequency range
    Lfcl = 2700 * windownumber
    Lfch = 2800 * windownumber
    #High tuning frequency range
    Hfcl = 1500 * windownumber
    Hfch = 1600 * windownumber

    nChunks = 3000  #the temporal shape of a file.
    LFFT = 4096 * windownumber  #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1 * 4 * windownumber  # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    #for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
    for offset_i in range(0, 1):  # one offset = nChunks*nFramesAvg skiped
        offset = 0
        # Build the DRX file
        try:
            fh = open(getopt.getopt(args, ':')[1][0], "rb")
            nFramesFile = os.path.getsize(getopt.getopt(
                args, ':')[1][0]) / drx.FrameSize  #drx.FrameSize = 4128
        except:
            print getopt.getopt(args, ':')[1][0], ' not found'
            sys.exit(1)
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                pass
            except ZeroDivisionError:
                print 'zero division error'
                break
        except errors.syncError:
            print 'assuming the srate is 19.6 MHz'
            fh.seek(-drx.FrameSize + 1, 1)
        fh.seek(-drx.FrameSize, 1)
        beam, tune, pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        if offset != 0:
            fh.seek(offset * drx.FrameSize, 1)
        if nChunks == 0:
            nChunks = 1
        nFrames = nFramesAvg * nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b, t, p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            else:
                pass
        fh.seek(-4 * drx.FrameSize, 1)
        # Sanity check
        if nFrames > (nFramesFile - offset):
            raise RuntimeError(
                "Requested integration time + offset is greater than file length"
            )
        # Master loop over all of the file chunks
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1.0 / srate))
        tInt = 1.0 * LFFT / srate
        print 'Temporal resl = ', tInt
        print 'Channel width = ', 1. / tInt
        freq1 = freq + centralFreq1
        freq2 = freq + centralFreq2
        print 'Low  freq  = ', freq1[Lfcl], freq1[
            Lfch], ' at', freq1[Lfcl] / 2 + freq1[Lfch] / 2
        print 'High freq  = ', freq2[Hfcl], freq2[
            Hfch], ' at', freq2[Hfcl] / 2 + freq2[Hfch] / 2
        numpy.save('tInt', tInt)
        numpy.save('freq1', freq1[Lfcl:Lfch])
        numpy.save('freq2', freq2[Hfcl:Hfch])
コード例 #9
0
def main(args):
    log("Hello, world.")  # Mic check
    nChunks = 10000  #the temporal shape of a file.
    LFFT = 4096  #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1 * 4 * LFFT / 4096  # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
    filename = args[0]
    nFramesFile = os.path.getsize(
        filename) / drx.FrameSize  #drx.FrameSize = 4128
    lastOffset = int(nFramesFile / (totalrank * nChunks * nFramesAvg))
    log("fileSize %d" % os.path.getsize(filename))
    log("nFramesFile %d" % nFramesFile)
    log("lastOffset %d" % lastOffset)
    #for offset in [19320000,  19520000,  19720000,  19920000]:
    for offset_i in xrange(
            lastOffset):  # one offset = nChunks*nFramesAvg skiped
        offset_i = 1. * totalrank * offset_i + rank
        offset = nChunks * nFramesAvg * offset_i
        log("Working on offset %d" % offset)
        # Build the DRX file
        try:
            fh = open(filename, "rb")
        except:
            log("File not found: %s" % filename)
            sys.exit(1)
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                pass
            except ZeroDivisionError:
                log('zero division error')
                break
        except errors.syncError:
            log('assuming the srate is 19.6 MHz')
            srate = 19600000.0
            fh.seek(-drx.FrameSize + 1, 1)
        fh.seek(-drx.FrameSize, 1)
        beam, tune, pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        if offset != 0:
            fh.seek(offset * drx.FrameSize, 1)
        if nChunks == 0:
            nChunks = 1
        nFrames = nFramesAvg * nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b, t, p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            else:
                pass
        fh.seek(-4 * drx.FrameSize, 1)
        # Sanity check
        if nFrames > (nFramesFile - offset):
            raise RuntimeError(
                "Requested integration time + offset is greater than file length"
            )
        # Master loop over all of the file chunks
        freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=1.0 / srate))
        tInt = 1.0 * LFFT / srate
        log('Temporal resl = %f' % tInt)
        log('Channel width = %f' % (1. / tInt))
        freq1 = freq + centralFreq1
        freq2 = freq + centralFreq2
        #print tInt,freq1.mean(),freq2.mean()
        masterSpectra = numpy.zeros((nChunks, 2, LFFT - 1))
        for i in xrange(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (nFramesAvg),
            # only deal with that chunk
            framesRemaining = nFrames - i * nFramesAvg
            if framesRemaining > nFramesAvg:
                framesWork = nFramesAvg
            else:
                framesWork = framesRemaining
            #if framesRemaining%(nFrames/10)==0:
            #   print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
            count = {0: 0, 1: 0, 2: 0, 3: 0}
            data = numpy.zeros((4, framesWork * 4096 / beampols),
                               dtype=numpy.csingle)
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < LFFT:
                log('data.shape[1]< LFFT, break')
                break
            # Inner loop that actually reads the frames into the data array
            for j in xrange(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = drx.readFrame(fh, Verbose=False)
                except errors.eofError:
                    log("EOF Error")
                    break
                except errors.syncError:
                    log("Sync Error")
                    continue
                beam, tune, pol = cFrame.parseID()
                if tune == 0:
                    tune += 1
                aStand = 2 * (tune - 1) + pol
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            # Calculate the spectra for this block of data
            masterSpectra[i,
                          0, :] = ((numpy.fft.fftshift(
                              numpy.abs(numpy.fft.fft2(data[:2, :]))[:, 1:]))**
                                   2.).mean(0) / LFFT / 2.  #in unit of energy
            masterSpectra[i,
                          1, :] = ((numpy.fft.fftshift(
                              numpy.abs(numpy.fft.fft2(data[2:, :]))[:, 1:]))**
                                   2.).mean(0) / LFFT / 2.  #in unit of energy
            # Save the results to the various master arrays
            #print masterSpectra.shape
            #numpy.save('data',data)
            #sys.exit()
            #if i % 100 ==1 :
            #   print i, ' / ', nChunks
        outname = "%s_%i_fft_offset_%.9i_frames" % (filename, beam, offset)
        log("Writing %s" % outname)
        numpy.save('waterfall' + outname, masterSpectra.mean(0))
コード例 #10
0
ファイル: dva1.py プロジェクト: ilikeit813/Project-Backup
def main(args):
    comm  = MPI.COMM_WORLD
    rank  = comm.Get_rank()
    tune = 0 # 0 : low, 1: high

    nodes =  2 #89 #the number of node requensted in sh
    pps   =  16 #processer per node requensted in sh
    numberofFiles = nodes*pps #totalnumberofspec = 6895.

    maxpw = 10 #Maximum pulse width to search in seconds. default = 1 s.
    #dDM   = 1.0/(3700-360)*10 #1.0/len(freq)
    thresh= 5.0 #SNR cut off

    DMstart= 9.15 #1.0 #initial DM trial
    DMend  = 9.20 #90.0 #finial  DM trial

    fcl =  6000 #low frequency cut off
    fch = 60000 #high frequency cut off

    #fn   = sorted(glob.glob('05*.npy')) 
    #tInt = np.load('tInt.npy')
    #std = np.zeros((fpp, nodes*pps, 2))
    std = np.zeros((nodes*pps, 2))

    nChunks = 3000 #the temporal shape of a file.
    LFFT = 4096*16 #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
    
    offset_i = rank # range to 4309
    offset = nChunks*nFramesAvg*offset_i
    # Build the DRX file
    fh = open(getopt.getopt(args,':')[1][0], "rb")
    nFramesFile = os.path.getsize(getopt.getopt(args,':')[1][0]) / drx.FrameSize #drx.FrameSize = 4128
    junkFrame = drx.readFrame(fh)
    srate = junkFrame.getSampleRate()
    fh.seek(-drx.FrameSize, 1)
    beam,tune,pol = junkFrame.parseID()
    beams = drx.getBeamCount(fh)
    tunepols = drx.getFramesPerObs(fh)
    tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
    beampols = tunepol
    fh.seek(offset*drx.FrameSize, 1)
    nFrames = nFramesAvg*nChunks
    centralFreq1 = 0.0
    centralFreq2 = 0.0
    for i in xrange(4):
        junkFrame = drx.readFrame(fh)
        b,t,p = junkFrame.parseID()
        if p == 0 and t == 0:
            try:
                centralFreq1 = junkFrame.getCentralFreq()
            except AttributeError:
                centralFreq1 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
        elif p == 0 and t == 2:
            try:
                centralFreq2 = junkFrame.getCentralFreq()
            except AttributeError:
                centralFreq2 = dp.fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
        else:
            pass
    fh.seek(-4*drx.FrameSize, 1)
    # Master loop over all of the file chunks
    freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
    tInt = 1.0*LFFT/srate
    npws = int(np.round(np.log2(maxpw/tInt)))+1 # +1 Due to in range(y) it goes to y-1 only
    freq1 = (freq+centralFreq1)[fcl:fch]
    freq2 = (freq+centralFreq2)[fcl:fch]
    #print tInt,freq1.mean(),freq2.mean()
    masterSpectra = numpy.zeros((nChunks, 2, fch-fcl))
    for i in xrange(nChunks):
        # Find out how many frames remain in the file.  If this number is larger
        # than the maximum of frames we can work with at a time (nFramesAvg),
        # only deal with that chunk
        framesRemaining = nFrames - i*nFramesAvg
        if framesRemaining > nFramesAvg:
            framesWork = nFramesAvg
        else:
            framesWork = framesRemaining
        #if framesRemaining%(nFrames/10)==0:
        #   print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
        count = {0:0, 1:0, 2:0, 3:0}
        data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
        # If there are fewer frames than we need to fill an FFT, skip this chunk
        if data.shape[1] < LFFT:
            print 'data.shape[1]< LFFT, break'
            break
        # Inner loop that actually reads the frames into the data array
        for j in xrange(framesWork):
            # Read in the next frame and anticipate any problems that could occur
            try:
                cFrame = drx.readFrame(fh, Verbose=False)
            except errors.eofError:
                print "EOF Error"
                break
            except errors.syncError:
                print "Sync Error"
                continue
            beam,tune,pol = cFrame.parseID()
            if tune == 0:
                tune += 1
            aStand = 2*(tune-1) + pol
            if j is 0:
                cTime = cFrame.getTime()
            data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
            count[aStand] +=  1
        # Calculate the spectra for this block of data
        #masterSpectra[i,0,:] = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:])[:,fcl:fch].mean(0)**2./LFFT/2. #in unit of energy
        #masterSpectra[i,1,:] = numpy.fft.fftshift(numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:])[:,fcl:fch].mean(0)**2./LFFT/2. #in unit of energy

        for k in range(2):
            masterSpectra[i,k,:] = (np.abs(np.fft.fftshift(np.fft.fft(data[k+tune+1,:]))[1:])**2/LFFT)[fcl:fch]
        del(data)
#=================================================================

    spectarray = masterSpectra.mean(1) # (x+y) /2
    del(masterSpectra)

    #remove baseline
    spectarray /= np.median(spectarray)
    #remove bandpass
    bp = np.zeros((nodes*pps, fch-fcl))
    bp[rank,:]= np.median(spectarray, 0)
    bpall=bp*0. #initiate a 4 hour blank std
    comm.Allreduce(bp,bpall,op=MPI.SUM) #merge the 4 hour std from all processor
    if rank == 0:
        np.save('bpall',bpall)
    bp = np.median(bpall,0)
    bp = savitzky_golay(bp,151,2)
    spectarray /= bp

    #RFI removal in frequency domain
    std=np.zeros((nodes*pps))
    std[rank]=np.median(spectarray, 0).std()
    stdall=std*0. #initiate a 4 hour blank std
    comm.Allreduce(std,stdall,op=MPI.SUM) #merge the 4 hour std from all processor
    if rank ==0:
        np.save('stdall',stdall)
    spectarray[:,spectarray.mean(0) > np.median(spectarray) + 3*stdall.min()] = np.median(spectarray)

    if tune == 0:
        freq=freq1
    else: 
        freq=freq2
    freq /= freq/10**6
    cent_freq = np.median(freq)
    BW   = freq.max()-freq.min()
    DM=DMstart

    txtsize=np.zeros((npws,2),dtype=np.int32) #fileno = txtsize[ranki,0], pulse number = txtsize[ranki,1],ranki is the decimated order of 2
    txtsize[:,0]=1 #fileno star from 1

    tbmax=0 #repeat control, if dedispersion time series are identical, skip dedispersion calculation
    while DM < DMend:
        if DM >=1000.: dDM = 1.
        else: dDM = 0.02
        tb=np.round((delay2(freq,DM)/tInt)).astype(np.int32)
        if tb.max()-tbmax==0:#identical dedispersion time series checker
            tbmax=tb.max()
            #DM+=dDM*DM
            if DM > 0.:
                DM+=dDM
            #if rank ==0:
            #   print 'DM',DM,'skipped'
            continue
        tbmax=tb.max()
        ts=np.zeros((tb.max()+numberofFiles*np.load(fn[0],mmap_mode='r').shape[0]))
        for freqbin in range(len(freq)): 
            ts[tb.max()-tb[freqbin] + rank*spect.shape[0] :tb.max()-tb[freqbin] + (rank+1)*spect.shape[0] ] += spectarray[:,freqbin]

        tstotal=ts*0#initiate a 4 hour blank time series
        comm.Allreduce(ts,tstotal,op=MPI.SUM)#merge the 4 hour timeseries from all processor
        tstotal = tstotal[tb.max():len(tstotal)-tb.max()]#cut the dispersed time lag

        #'''
        # save the time series around the Pulsar's DM
        if rank == 0:
                #if (DM - 9.1950)**2 <= dDM**2:
                #print '1',tb.max()*tInt
                #print '2',tstotal.shape, tstotal.max(), tstotal.min(), tstotal.std()
                #print 'DM=',DM
                np.save('ts_pol%.1i_DMx100_%.6i' % (pol,DM*100),tstotal)
        #'''
        #"""#search for signal with decimated timeseries
        if rank<npws:#timeseries is ready for signal search
            ranki=rank
            filename = "pp_SNR_pol_%.1i_td_%.2i_no_%.05i.txt" % (pol,ranki,txtsize[ranki,0])
            outfile = open(filename,'a')
            ndown = 2**ranki #decimate the time series
            sn,mean,rms = Threshold(Decimate_ts(tstotal,ndown),thresh,niter=0)
            ones = np.where(sn!=-1)[0]
            for one in ones:# Now record all pulses above threshold
                pulse = OutputSource()
                txtsize[ranki,1] += 1
                pulse.pulse = txtsize[ranki,1]
                pulse.SNR = sn[one]
                pulse.DM = DM
                pulse.time = one*tInt*ndown
                pulse.dtau = tInt*ndown
                pulse.dnu = freq[1]-freq[0]
                pulse.nu = cent_freq
                pulse.mean = mean
                pulse.rms = rms
                outfile.write(pulse.formatter.format(pulse)[:-1]) 
                if txtsize[ranki,1] >200000*txtsize[ranki,0]:
                    outfile.close()
                    txtsize[ranki,0]+=1
                    filename = "pp_SNR_pol_%.1i_td_%.2i_no_%.05d.txt" % (pol,ranki,txtsize[ranki,0])
                    outfile = open(filename,'a')

        #DM+=dDM*DM # End of DM loop
        DM+=dDM # End of DM loop
コード例 #11
0
def main(args):
    nodes = 4  #total blades used
    pps = 6  #process per blade

    windownumber = 4  # The length of FFT = windownumber * 4096

    #Low tuning frequency range
    Lfcl = 1700 * windownumber
    Lfch = 2100 * windownumber
    #High tuning frequency range
    Hfcl = 670 * windownumber
    Hfch = 1070 * windownumber

    totalrank = nodes * pps
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    t0 = time.time()
    nChunks = 3000  #the temporal shape of a file.
    LFFT = 4096 * windownumber  #Length of the FFT. 4096 is the size of a frame readed. The mini quantized window lenght is 4096
    nFramesAvg = 1 * 4 * windownumber  # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    #for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
    for offset_i in range(
            0, 1000):  # one offset = nChunks*nFramesAvg*worker_rank skiped
        offset_i = 1. * totalrank * offset_i + rank
        offset = nChunks * nFramesAvg * offset_i
        # Build the DRX file
        try:
            fh = open(getopt.getopt(args, ':')[1][0], "rb")
            nFramesFile = os.path.getsize(getopt.getopt(
                args, ':')[1][0]) / drx.FrameSize  #drx.FrameSize = 4128
        except:
            print getopt.getopt(args, ':')[1][0], ' not found'
            sys.exit(1)
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                pass
            except ZeroDivisionError:
                print 'zero division error'
                break
        except errors.syncError:
            print 'assuming the srate is 19.6 MHz'
            fh.seek(-drx.FrameSize + 1, 1)
        fh.seek(-drx.FrameSize, 1)
        beam, tune, pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        if offset != 0:
            fh.seek(offset * drx.FrameSize, 1)
        if nChunks == 0:
            nChunks = 1
        nFrames = nFramesAvg * nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b, t, p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            else:
                pass
        fh.seek(-4 * drx.FrameSize, 1)
        # Sanity check
        if nFrames > (nFramesFile - offset):
            raise RuntimeError(
                "Requested integration time + offset is greater than file length"
            )
    # Master loop over all of the file chunks
    #freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d = 1.0/srate))
    #tInt = 1.0*LFFT/srate
    #print 'Temporal resl = ',tInt
    #print 'Channel width = ',1./tInt
    #freq1 = freq+centralFreq1
    #freq2 = freq+centralFreq2
    #print tInt,freq1.mean(),freq2.mean()
        masterSpectra = numpy.zeros((nChunks, 2, Lfch - Lfcl))
        for i in xrange(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (nFramesAvg),
            # only deal with that chunk
            framesRemaining = nFrames - i * nFramesAvg
            if framesRemaining > nFramesAvg:
                framesWork = nFramesAvg
            else:
                framesWork = framesRemaining
            #if framesRemaining%(nFrames/10)==0:
            #	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
            count = {0: 0, 1: 0, 2: 0, 3: 0}
            data = numpy.zeros((4, framesWork * 4096 / beampols),
                               dtype=numpy.csingle)
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < LFFT:
                print 'data.shape[1]< LFFT, break'
                break
            # Inner loop that actually reads the frames into the data array
            for j in xrange(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = drx.readFrame(fh, Verbose=False)
                except errors.eofError:
                    print "EOF Error"
                    break
                except errors.syncError:
                    print "Sync Error"
                    continue
                beam, tune, pol = cFrame.parseID()
                if tune == 0:
                    tune += 1
                aStand = 2 * (tune - 1) + pol
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            # Calculate the spectra for this block of data, in the unit of intensity
            masterSpectra[i, 0, :] = ((numpy.fft.fftshift(
                numpy.abs(numpy.fft.fft2(data[:2, :]))[:, 1:])[:, Lfcl:Lfch])**
                                      2.).mean(0) / LFFT / 2.
            masterSpectra[i, 1, :] = ((numpy.fft.fftshift(
                numpy.abs(numpy.fft.fft2(data[2:, :]))[:, 1:])[:, Hfcl:Hfch])**
                                      2.).mean(0) / LFFT / 2.
    # Save the results to the various master arrays
        outname = "%s_%i_fft_offset_%.9i_frames" % (getopt.getopt(
            args, ':')[1][0], beam, offset)
        numpy.save(outname, masterSpectra)
コード例 #12
0
ファイル: t02.py プロジェクト: ilikeit813/Project-Backup
def main(args):
	nChunks = 1000 #the temporal shape of a file.
	LFFT = 4096*16 #Length of the FFT.
	nFramesAvg = 1*4*LFFT/4096 # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)
	saveftint = 1 # if =1 save the frequency channel and tInt info
	config = parseOptions(args)#Parse command line options
	#for offset_i in range(4306, 4309):# one offset = nChunks*nFramesAvg skiped
	for offset_i in range(0, 4):# one offset = nChunks*nFramesAvg skiped
		offset = nChunks*nFramesAvg*offset_i

	# Build the DRX file
		try:
			#drxFile = drsu.getFileByName(config['args'][0], config['args'][1])
                        fh = open(config['args'][0], "rb")
                        nFramesFile = os.path.getsize(config['args'][0]) / drx.FrameSize #drx.FrameSize = 4128
		except:
			print config['args'],' not found'
			sys.exit(1)
		try:
			junkFrame = drx.readFrame(fh)
			try:
				srate = junkFrame.getSampleRate()
				t0 = junkFrame.getTime()
				pass
			except ZeroDivisionError:
				print 'zero division error'
				break
		except errors.syncError:
			fh.seek(-drx.FrameSize+1, 1)
		fh.seek(-drx.FrameSize, 1)
		beam,tune,pol = junkFrame.parseID()
		beams = drx.getBeamCount(fh)
		tunepols = drx.getFramesPerObs(fh)
		tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
		beampols = tunepol
		config['offset'] = offset/srate/beampols*4096
		if offset != 0:
			fh.seek(offset*drx.FrameSize, 1)

		config['average'] = 1.0 * nFramesAvg / beampols * 4096 / srate
		maxFrames = nFramesAvg

	# Number of remaining chunks (and the correction to the number of
	# frames to read in).
#	nChunks = int(round(config['duration'] / config['average']))
		config['duration']=nChunks*config['average']
		if nChunks == 0:
			nChunks = 1
		nFrames = nFramesAvg*nChunks
	
	# Date & Central Frequnecy
		beginDate = ephem.Date(unix_to_utcjd(junkFrame.getTime()) - DJD_OFFSET)
		centralFreq1 = 0.0
		centralFreq2 = 0.0
		for i in xrange(4):
			junkFrame = drx.readFrame(fh)
			b,t,p = junkFrame.parseID()
			if p == 0 and t == 0:
				try:
					centralFreq1 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq1 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			elif p == 0 and t == 2:
				try:
					centralFreq2 = junkFrame.getCentralFreq()
				except AttributeError:
					from lsl.common.dp import fS
					centralFreq2 = fS * ((junkFrame.data.flags[0]>>32) & (2**32-1)) / 2**32
			else:
				pass
		fh.seek(-4*drx.FrameSize, 1)

		config['freq1'] = centralFreq1
		config['freq2'] = centralFreq2

	# File summary
		print "Filename: %s" % config['args']
		print "Date of First Frame: %s" % str(beginDate)
		print "Beams: %i" % beams
		print "Tune/Pols: %i %i %i %i" % tunepols
		print "Sample Rate: %i Hz" % srate
		print "Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (centralFreq1, centralFreq2)
		print "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * 4096 / srate)
		print "---"
		print "Offset: %.3f s (%i frames)" % (config['offset'], offset)
		print "Integration: %.4f s (%i frames; %i frames per beam/tune/pol)" % (config['average'], nFramesAvg, nFramesAvg / beampols)
		print 'beampols', beampols
		print "Duration: %.4f s (%i frames; %i frames per beam/tune/pol)" % (config['average']*nChunks, nFrames, nFrames / beampols)
		#break

	# Sanity check
		if nFrames > (nFramesFile - offset):
			raise RuntimeError("Requested integration time + offset is greater than file length")

		# Master loop over all of the file chunks
		#masterSpectra = numpy.zeros((nChunks, 2, LFFT-1))
		masterSpectra = numpy.zeros((nChunks, 4, LFFT-1))
		masterTimes = numpy.zeros(nChunks)
		for i in xrange(nChunks):
			# Find out how many frames remain in the file.  If this number is larger
			# than the maximum of frames we can work with at a time (maxFrames),
			# only deal with that chunk
			framesRemaining = nFrames - i*maxFrames
			if framesRemaining > maxFrames:
				framesWork = maxFrames
			else:
				framesWork = framesRemaining
			if framesRemaining%(nFrames/10)==0:
				print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)

			count = {0:0, 1:0, 2:0, 3:0}
			data = numpy.zeros((4,framesWork*4096/beampols), dtype=numpy.csingle)
			#print 'data.shape', data.shape

			# If there are fewer frames than we need to fill an FFT, skip this chunk
			if data.shape[1] < LFFT:
				print 'data.shape[1]< LFFT, break'
				break
	
			# Inner loop that actually reads the frames into the data array
			#if framesRemaining%(nFrames/10)==0:
			#	print "Working on %.1f ms of data" % ((framesWork*4096/beampols/srate)*1000.0)

			for j in xrange(framesWork):
				# Read in the next frame and anticipate any problems that could occur
				try:
					cFrame = drx.readFrame(fh, Verbose=False)
				except errors.eofError:
					print "EOF Error"
					break
				except errors.syncError:
					print "Sync Error"
					continue
	
				beam,tune,pol = cFrame.parseID()

				if tune == 0:
					tune += 1
				aStand = 2*(tune-1) + pol
				if j is 0:
					cTime = cFrame.getTime()

				data[aStand, count[aStand]*4096:(count[aStand]+1)*4096] = cFrame.data.iq
				count[aStand] +=  1
	
			# Calculate the spectra for this block of data and then weight the results by 
			# the total number of frames read.  This is needed to keep the averages correct.

			#print 'data.shape',data.shape


			#continue
			#freq, tempSpec1 = fxc.SpecMaster(data[:2,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			tempSpec1 = numpy.abs(numpy.fft.fft2(data[:2,:]))[:,1:]/2.
			tempSpec1 = numpy.fft.fftshift(tempSpec1)**2./LFFT*2
			freq = numpy.fft.fftfreq(LFFT, d = 1.0/srate)
			freq = numpy.fft.fftshift(freq)

			#freq, tempSpec2 = fxc.SpecMaster(data[2:,:], LFFT=LFFT, window=config['window'], verbose=config['verbose'], SampleRate=srate)
			tempSpec2 = numpy.abs(numpy.fft.fft2(data[2:,:]))[:,1:]/2.
			tempSpec2 = numpy.fft.fftshift(tempSpec2)**2./LFFT*2

			#print 'tempSpec.shape', tempSpec1.shape	
			# Save the results to the various master arrays
			masterTimes[i] = cTime

			masterSpectra[i,0,:] = tempSpec1[0,:]
			masterSpectra[i,1,:] = tempSpec1[1,:]
			masterSpectra[i,2,:] = tempSpec2[0,:]
			masterSpectra[i,3,:] = tempSpec2[1,:]
			
	
			# We don't really need the data array anymore, so delete it
			del(data)

		#continue

		#drxFile.close()
	
		# Now that we have read through all of the chunks, perform the final averaging by
		# dividing by all of the chunks
		outname = "%s_%i_fft_offset_%.9i_LFFT_%.6i_frames" % (config['args'][0][-16:], beam,offset,LFFT)

#		numpy.savez(outname, freq=freq, freq1=freq+config['freq1'], freq2=freq+config['freq2'], times=masterTimes, spec=masterSpectra, tInt=(maxFrames*4096/beampols/srate), srate=srate,  standMapper=[4*(beam-1) + i for i in xrange(masterSpectra.shape[1])])
		if saveftint == 1:
			numpy.save('freq1', freq+config['freq1'])
			numpy.save('freq2', freq+config['freq2'])
			numpy.save('tInt',  maxFrames*4096/beampols/srate)
			saveftint = 0

                #print 'fInt = ',(freq+config['freq1'])[1]-(freq+config['freq1'])[0]
                #print 'tInt = ',maxFrames*4096/beampols/srate
                #print 'tInt = ', 1.0*LFFT/srate

                masterSpectra[:,0,:]=masterSpectra[:,0:2,:].mean(1)
                masterSpectra[:,1,:]=masterSpectra[:,2:4,:].mean(1)

		#numpy.save(outname[-46:], masterSpectra[:,0:2,:])
		#print 'spectrogram shape', masterSpectra[:,0:2,:].shape
		numpy.save(outname, masterSpectra[:,0:2,:])
コード例 #13
0
def main(args):
    log("Hello, world.")

    windownumber = 4
    nChunks = 3000  #the temporal shape of a file.

    #Low tuning frequency range
    Lfcl = 1000 * windownumber
    Lfch = 1350 * windownumber
    #High tuning frequency range
    Hfcl = 1825 * windownumber
    Hfch = 2175 * windownumber

    LFFT = 4096 * windownumber  #Length of the FFT.4096 is the size of a frame readed.
    nFramesAvg = 1 * 4 * windownumber  # the intergration time under LFFT, 4 = beampols = 2X + 2Y (high and low tunes)

    filename = args[0]
    nFramesFile = os.path.getsize(
        filename) / drx.FrameSize  #drx.FrameSize = 4128
    log("fileSize %d" % os.path.getsize(filename))
    log("nFramesFile %d" % nFramesFile)

    fn = sorted(glob.glob('05*.npy'))
    j = numpy.zeros((len(fn)))
    for i in range(len(fn)):
        j[i] = fn[i][30:39]

    #x = total perfect offset
    x = numpy.arange(j[-1] / nChunks / nFramesAvg) * nChunks * nFramesAvg
    # k = the different between perfect and real
    k = numpy.setdiff1d(x, j)

    for m in xrange(len(k) / totalrank):
        offset = k[m * totalrank + rank]
        # Build the DRX file
        try:
            fh = open(filename, "rb")
        except:
            log("File not fonud: %s" % filename)
            sys.exit(1)
        try:
            junkFrame = drx.readFrame(fh)
            try:
                srate = junkFrame.getSampleRate()
                pass
            except ZeroDivisionError:
                log('zero division error')
                break
        except errors.syncError:
            log('assuming the srate is 19.6 MHz')
            srate = 19600000.0
            fh.seek(-drx.FrameSize + 1, 1)
        fh.seek(-drx.FrameSize, 1)
        beam, tune, pol = junkFrame.parseID()
        beams = drx.getBeamCount(fh)
        tunepols = drx.getFramesPerObs(fh)
        tunepol = tunepols[0] + tunepols[1] + tunepols[2] + tunepols[3]
        beampols = tunepol
        if offset != 0:
            fh.seek(offset * drx.FrameSize, 1)
        if nChunks == 0:
            nChunks = 1
        nFrames = nFramesAvg * nChunks
        centralFreq1 = 0.0
        centralFreq2 = 0.0
        for i in xrange(4):
            junkFrame = drx.readFrame(fh)
            b, t, p = junkFrame.parseID()
            if p == 0 and t == 0:
                try:
                    centralFreq1 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq1 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            elif p == 0 and t == 2:
                try:
                    centralFreq2 = junkFrame.getCentralFreq()
                except AttributeError:
                    from dp import fS
                    centralFreq2 = fS * ((junkFrame.data.flags[0] >> 32) &
                                         (2**32 - 1)) / 2**32
            else:
                pass
        fh.seek(-4 * drx.FrameSize, 1)
        # Sanity check
        if nFrames > (nFramesFile - offset):
            raise RuntimeError(
                "Requested integration time + offset is greater than file length"
            )
        masterSpectra = numpy.zeros((nChunks, 2, Lfch - Lfcl))
        for i in xrange(nChunks):
            # Find out how many frames remain in the file.  If this number is larger
            # than the maximum of frames we can work with at a time (nFramesAvg),
            # only deal with that chunk
            framesRemaining = nFrames - i * nFramesAvg
            if framesRemaining > nFramesAvg:
                framesWork = nFramesAvg
            else:
                framesWork = framesRemaining
            #if framesRemaining%(nFrames/10)==0:
            #	print "Working on chunk %i, %i frames remaining" % (i, framesRemaining)
            count = {0: 0, 1: 0, 2: 0, 3: 0}
            data = numpy.zeros((4, framesWork * 4096 / beampols),
                               dtype=numpy.csingle)
            # If there are fewer frames than we need to fill an FFT, skip this chunk
            if data.shape[1] < LFFT:
                log('data.shape[1]< LFFT, break')
                break
            # Inner loop that actually reads the frames into the data array
            for j in xrange(framesWork):
                # Read in the next frame and anticipate any problems that could occur
                try:
                    cFrame = drx.readFrame(fh, Verbose=False)
                except errors.eofError:
                    log("EOF Error")
                    break
                except errors.syncError:
                    log("Sync Error")
                    continue
                beam, tune, pol = cFrame.parseID()
                if tune == 0:
                    tune += 1
                aStand = 2 * (tune - 1) + pol
                data[aStand, count[aStand] * 4096:(count[aStand] + 1) *
                     4096] = cFrame.data.iq
                count[aStand] += 1
            # Calculate the spectra for this block of data, in the unit of intensity
            masterSpectra[i, 0, :] = ((numpy.fft.fftshift(
                numpy.abs(numpy.fft.fft2(data[:2, :]))[:, 1:])[:, Lfcl:Lfch])**
                                      2.).mean(0) / LFFT / 2.
            masterSpectra[i, 1, :] = ((numpy.fft.fftshift(
                numpy.abs(numpy.fft.fft2(data[2:, :]))[:, 1:])[:, Hfcl:Hfch])**
                                      2.).mean(0) / LFFT / 2.

# Save the results to the various master arrays
        outname = "%s_%i_fft_offset_%.9i_frames" % (filename, beam, offset)
        log("Writing %s" % outname)
        numpy.save(outname, masterSpectra)