def get_thread_count(filehandle): """ Find out how many thrads are present by examining the first 1024 records. Return the number of threads found. """ # Get the frame size frame_size = get_frame_size(filehandle) with FilePositionSaver(filehandle): # Build up the list-of-lists that store ID codes and loop through 1024 # frames. In each case, parse pull the thread ID and append the thread # ID to the relevant thread array if it is not already there. threads = [] i = 0 while i < 1024: try: cFrame = read_frame(filehandle) except SyncError: filehandle.seek(frame_size, 1) continue except EOFError: break cID = cFrame.header.thread_id if cID not in threads: threads.append(cID) i += 1 # Return the number of threads found return len(threads)
def get_frames_per_obs(filehandle): """ Find out how many frames are present per time stamp by examining the first 1000 TBF records. Return the number of frames per observation. """ with FilePositionSaver(filehandle): # Build up the list-of-lists that store the index of the first frequency # channel in each frame. channels = [] for i in range(1000): try: cFrame = read_frame(filehandle) if not cFrame.is_tbf: continue except EOFError: break except SyncError: continue chan = cFrame.header.first_chan if chan not in channels: channels.append(chan) # Return the number of channels return len(channels)
def get_integration_time(filehandle): """ Find out what the integration time is at the current file location. """ with FilePositionSaver(filehandle): frame = read_frame(filehandle) return frame.integration_time
def get_frame_size(filehandle, nframes=None): """ Find out what the frame size is in bytes from a single observation. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) return newFrame.header.frame_length * 8
def get_transform_size(filehandle): """ Find out what the transform size in a file is at the current file location. """ with FilePositionSaver(filehandle): frame = read_frame(filehandle) return frame.transform_size
def get_frames_per_obs(filehandle): """ Find out how many frames are present per observation by examining the first 2,080 TBN frames. Return the number of frames per observations as a two- element tuple, one for each polarization. So many TBN frames are read in order to try to compensate for the inter- leaving of the packets from the various DP1 boards during the recording. .. note:: Post-IOC it is probably simpler to adopt a value of the number of frames per observation of 520 rather than try to find it from the file. """ with FilePositionSaver(filehandle): # Build up the list-of-lists that store ID codes and loop through 600 # frames. In each case, parse pull the TBN ID, extract the stand # number, and append the stand number to the relevant polarization array # if it is not already there. idCodes = [[], []] maxX = 0 maxY = 0 for i in range(4 * 520): try: cFrame = read_frame(filehandle) except EOFError: break except SyncError: continue cID, cPol = cFrame.header.id if cID not in idCodes[cPol]: idCodes[cPol].append(cID) # Also, look at the actual IDs to try to figure out how many of # each there are in case the frames are out of order. Will this # help in all cases? Probably not but we should at least try it if cPol == 0: if cID > maxX: maxX = cID else: if cID > maxY: maxY = cID # Compare idCodes sizes with maxX and maxY. Load maxX and maxY with # the larger of the two values. if maxX < len(idCodes[0]): maxX = len(idCodes[0]) if maxY < len(idCodes[1]): maxY = len(idCodes[1]) # Get the length of each beam list and return them as a tuple return (maxX, maxY)
def is_linear(filehandle): """ Find out if the file contains linear polarization products or not. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) # Return the verdict return newFrame.header.is_linear
def is_stokes(filehandle): """ Find out if the file contains Stokes parameters or not. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) # Return the verdict return newFrame.header.is_stokes
def get_data_products(filehandle): """ Find out the data products contained in the file by looking at a frame. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) # Return the data products return newFrame.header.data_products
def get_ffts_per_integration(filehandle): """ Find out what the number of FFT windows per integration is at the current file location. .. versionadded:: 1.0.1 """ with FilePositionSaver(filehandle): frame = read_frame(filehandle) return frame.ffts_per_integration
def get_frame_size(filehandle): """ Find out what the frame size in a file is at the current file location. Returns the frame size in bytes. """ with FilePositionSaver(filehandle): cPos = filehandle.tell() frame = read_frame(filehandle) nPos = filehandle.tell() return nPos - cPos
def get_data_bits(filehandle): """ Find out the number of data bits used in the file be reading in the first frame. """ with FilePositionSaver(filehandle): # Read a frame cFrame = read_frame(filehandle) # Get the number of bits used to represent the data dataBits = cFrame.data_bits return dataBits
def get_sample_rate(filehandle, nframes=None, filter_code=False): """ Find out what the sampling rate/filter code is from a single observations. By default, the rate in Hz is returned. However, the corresponding filter code can be returned instead by setting the FilterCode keyword to true. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) if not filter_code: return newFrame.sample_rate else: return newFrame.filter_code
def get_sample_rate(filehandle): """ Find and return the sample rate in Hz by looking at how many frames there are per second and how many samples there are in a frame. """ # Get the number of frames per second nFramesSecond = get_frames_per_second(filehandle) with FilePositionSaver(filehandle): # Read in a frame cFrame = read_frame(filehandle) # Get the sample rate sample_rate = cFrame.payload.data.shape[-1] * nFramesSecond return float(sample_rate)
def get_sample_rate(filehandle, nframes=None, filter_code=False): """ Find out what the sampling rate/filter code is from a single observations. By default, the rate in Hz is returned. However, the corresponding filter code can be returned instead by setting the FilterCode keyword to true. This function is included to make easier to write code for TBN analysis and modify it for DRX data. """ with FilePositionSaver(filehandle): # Read in one frame newFrame = read_frame(filehandle) if not filter_code: return newFrame.sample_rate else: return newFrame.filter_code
def get_first_frame_count(filehandle): """ Find and return the lowest frame count encountered in a TBF file. """ # Find out how many frames there are per observation nFrames = get_frames_per_obs(filehandle) with FilePositionSaver(filehandle): firstFrameCount = 2**64 - 1 freqs = [] while len(freqs) < nFrames: cFrame = read_frame(filehandle) freq = cFrame.header.first_chan if freq not in freqs: freqs.append(freq) if cFrame.header.frame_count < firstFrameCount: firstFrameCount = cFrame.header.frame_count # Return the lowest frame number found return firstFrameCount
def get_channel_count(filehandle): """ Find out the total number of channels that are present by examining the first several COR records. Return the number of channels found. """ with FilePositionSaver(filehandle): # Build up the list-of-lists that store the index of the first frequency # channel in each frame. channels = [] for i in range(64): try: cFrame = read_frame(filehandle) except: break chan = cFrame.header.first_chan if chan not in channels: channels.append(chan) # Return the number of channels return len(channels) * NCHAN_COR
def has_guppi_header(filehandle): """ Determine if a VDIF file has a GUPPI header or not. .. versionadded:: 2.0.0 """ has_header = False with FilePositionSaver(filehandle): # Read in the first 16kB block = filehandle.read(16384) try: block = block.decode(encoding='ascii', errors='ignore') except AttributeError: pass if block.find('TELESCOP') != -1 \ or block.find('END') != -1 \ or block.find('CONTINUE') != -1: has_header = True return has_header
def get_beam_count(filehandle): """ Find out how many beams are present by examining the first 32 DRX records. Return the number of beams found. """ with FilePositionSaver(filehandle): # Build up the list-of-lists that store ID codes and loop through 32 # frames. In each case, parse pull the DRX ID, extract the beam number, # and append the DRX ID to the relevant beam array if it is not already # there. beams = [] for i in range(32): cFrame = read_frame(filehandle) cID = cFrame.header.drx_id beam = cID & 7 if beam not in beams: beams.append(beam) # Return the number of beams found return len(beams)
def get_frames_per_obs(filehandle): """ Find out how many frames are present per beam by examining the first 32 DRX records. Return the number of frames per observations as a four- element tuple, one for each beam. """ with FilePositionSaver(filehandle): # Build up the list-of-lists that store ID codes and loop through 32 # frames. In each case, parse pull the DRX ID, extract the beam number, # and append the DRX ID to the relevant beam array if it is not already # there. idCodes = [[], [], [], []] for i in range(32): cFrame = read_frame(filehandle) cID = cFrame.header.drx_id beam = cID & 7 if cID not in idCodes[beam - 1]: idCodes[beam - 1].append(cID) # Get the length of each beam list and return them as a tuple return (len(idCodes[0]), len(idCodes[1]), len(idCodes[2]), len(idCodes[3]))
def get_frames_per_obs(filehandle): """ Find out how many frames are present per observation by examining the first frames for what would be 260 stands. This is done by reading two frames and then skipping the next 30,000. .. note:: Post-IOC it is probably simpler to adopt a value of the number of frames per observation of 260 rather than try to find it from the file. """ with FilePositionSaver(filehandle): idCodes = [] for i in range(260): currentPosition = filehandle.tell() try: cFrame1 = read_frame(filehandle) cFrame2 = read_frame(filehandle) except EOFError: break except SyncError: continue cID = cFrame1.id if cID not in idCodes: idCodes.append(cID) cID = cFrame2.id if cID not in idCodes: idCodes.append(cID) # Junk 30,000 frames since that is how many frames there are per stand filehandle.seek(currentPosition + 30000 * FRAME_SIZE) # Get the length of the stand list and return return len(idCodes)
def get_first_channel(filehandle, frequency=False): """ Find and return the lowest frequency channel in a TBF file. If the `frequency` keyword is True the returned value is in Hz. """ # Find out how many frames there are per observation nFrames = get_frames_per_obs(filehandle) with FilePositionSaver(filehandle): # Find the lowest frequency channel freqs = [] while len(freqs) < nFrames: cFrame = read_frame(filehandle) if frequency: freq = cFrame.channel_freqs[0] else: freq = cFrame.header.first_chan if freq not in freqs: freqs.append(freq) # Return the lowest frequency channel return min(freqs)
def get_frames_per_second(filehandle): """ Find out the number of frames per second in a file by watching how the headers change. Returns the number of frames in a second. """ # Get the frame size frame_size = get_frame_size(filehandle) # Get the number of threads nThreads = get_thread_count(filehandle) with FilePositionSaver(filehandle): # Get the current second counts for all threads ref = {} i = 0 while i < nThreads: try: cFrame = read_frame(filehandle) except SyncError: filehandle.seek(frame_size, 1) continue except EOFError: break cID = cFrame.header.thread_id cSC = cFrame.header.seconds_from_epoch ref[cID] = cSC i += 1 # Read frames until we see a change in the second counter cur = {} fnd = [] while True: ## Get a frame try: cFrame = read_frame(filehandle) except SyncError: filehandle.seek(frame_size, 1) continue except EOFError: break ## Pull out the relevant metadata cID = cFrame.header.thread_id cSC = cFrame.header.seconds_from_epoch cFC = cFrame.header.frame_in_second ## Figure out what to do with it if cSC == ref[cID]: ### Same second as the reference, save the frame number cur[cID] = cFC else: ### Different second than the reference, we've found something ref[cID] = cSC if cID not in fnd: fnd.append(cID) if len(fnd) == nThreads: break # Pull out the mode mode = {} for key, value in cur.items(): try: mode[value] += 1 except KeyError: mode[value] = 1 best, bestValue = 0, 0 for key, value in mode.items(): if value > bestValue: best = key bestValue = value # Correct for a zero-based counter and return best += 1 return best
def get_sample_rate(filehandle, nframe=None, filter_code=False): """ Find out what the sampling rate/filter code is from consecutive sets of observations. By default, the rate in Hz is returned. However, the corresponding filter code can be returned instead by setting the FilterCode keyword to True. """ if nframe is None: nframe = 520 nframe = 4 * nframe with FilePositionSaver(filehandle): # Build up the list-of-lists that store ID codes and loop through 2,080 # frames. In each case, parse pull the TBN ID, extract the stand # number, and append the stand number to the relevant polarization array # if it is not already there. frames = {} for i in range(nframe): try: cFrame = read_frame(filehandle) except EOFError: break except SyncError: continue stand, pol = cFrame.id key = 2 * stand + pol try: frames[key].append(cFrame) except: frames[key] = [ cFrame, ] # Any key with complete data will work for this, so pick the first key with two # valid frames keyCount = 0 frame1 = None frame2 = None frameKeys = list(frames.keys()) while frame1 is None and frame2 is None: validKey = frameKeys[keyCount] try: frame1 = frames[validKey][0] except IndexError: frame1 = None try: frame2 = frames[validKey][1] except IndexError: frame2 = None keyCount = keyCount + 1 # Now that we have two valid frames that follow one another in time, load in their # time tags and calculate the sampling rate. Since the time tags are based off f_S # @ 196 MSPS, and each frame contains 512 samples, the sampling rate is: # f_S / <difference in time tags per 512 samples> time1 = frame1.payload.timetag time2 = frame2.payload.timetag rate = dp_common.fS / (abs(time2 - time1) / 512) if not filter_code: return rate else: sampleCodes = {} for key, value in FILTER_CODES.items(): sampleCodes[value] = key return sampleCodes[rate]