def test_vdif_threads(self): """Test finding out how many threads file.""" fh = open(vdifFile, 'rb') nt = vdif.get_thread_count(fh) self.assertEqual(nt, 1) fh.close()
def main(args): # Parse the command line filename = args.filename fh = open(filename, 'rb') header = vdif.read_guppi_header(fh) vdif.FRAME_SIZE = vdif.get_frame_size(fh) nFramesFile = os.path.getsize(filename) // vdif.FRAME_SIZE junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate vdif.DATA_LENGTH = junkFrame.payload.data.size beam, pol = junkFrame.id tunepols = vdif.get_thread_count(fh) beampols = tunepols if args.skip != 0: print("Skipping forward %.3f s" % args.skip) print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime)) offset = int(args.skip * srate / vdif.DATA_LENGTH) fh.seek(beampols * vdif.FRAME_SIZE * offset, 1) junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) fh.seek(-vdif.FRAME_SIZE, 1) print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime)) tStart = junkFrame.time # Get the frequencies cFreq = 0.0 for j in xrange(4): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) s, p = junkFrame.id if p == 0: cFreq = junkFrame.central_freq # Set integration time tInt = args.avg_time nFrames = int(round(tInt * srate / vdif.DATA_LENGTH)) tInt = nFrames * vdif.DATA_LENGTH / srate nFrames = int(round(tInt * srate / vdif.DATA_LENGTH)) # Read in some data tFile = nFramesFile / beampols * vdif.DATA_LENGTH / srate # Date junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) fh.seek(-vdif.FRAME_SIZE, 1) beginDate = junkFrame.time.datetime # Report print("Filename: %s" % os.path.basename(filename)) print(" Date of First Frame: %s" % beginDate) print(" Station: %i" % beam) print(" Sample Rate: %i Hz" % srate) print(" Tuning 1: %.1f Hz" % cFreq) print(" Bit Depth: %i" % junkFrame.header.bits_per_sample) print(" Integration Time: %.3f s" % tInt) print(" Integrations in File: %i" % int(tFile / tInt)) print(" ") # Go! data = numpy.zeros((beampols, vdif.DATA_LENGTH * nFrames), dtype=numpy.complex64) count = [0 for i in xrange(data.shape[0])] for i in xrange(beampols * nFrames): try: cFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) except errors.SyncError: print("Error @ %i" % i) fh.seek(vdif.FRAME_SIZE, 1) continue std, pol = cFrame.id sid = pol data[sid, count[sid] * vdif.DATA_LENGTH:(count[sid] + 1) * vdif.DATA_LENGTH] = cFrame.payload.data count[sid] += 1 # Plot nBins = 2**junkFrame.header.bits_per_sample weights = numpy.ones(data.shape[1], dtype=numpy.float32) * 100.0 / data.shape[1] fig = plt.figure() ## X ax = fig.add_subplot(2, 1, 1) c, b, o = ax.hist(data[0, :].real, bins=nBins, weights=weights) ax.set_xlim((b[0], b[-1])) ax.set_xlabel('Value - X') ax.set_ylabel('Fraction [%]') ## Y ax = fig.add_subplot(2, 1, 2) c, b, o = ax.hist(data[1, :].real, bins=nBins, weights=weights) ax.set_xlim((b[0], b[-1])) ax.set_xlabel('Value - Y') ax.set_ylabel('Fraction [%]') plt.show() # Done fh.close()
def main(args): # Parse the command line filenames = args.filename # Check if the first argument on the command line is a directory. If so, # use what is in that directory if os.path.isdir(filenames[0]): filenames = [ os.path.join(filenames[0], filename) for filename in os.listdir(filenames[0]) ] filenames.sort() # Convert the filenames to absolute paths filenames = [os.path.abspath(filename) for filename in filenames] # Open the database connection to NRAO to find the antenna locations try: db = database('params') except Exception as e: sys.stderr.write("WARNING: %s" % str(e)) sys.stderr.flush() db = None # Pass 1 - Get the LWA metadata so we know where we are pointed context = { 'observer': 'Unknown', 'project': 'Unknown', 'session': None, 'vlaref': None } setup = None sources = [] metadata = {} lwasite = {} for filename in filenames: # Figure out what to do with the file ext = os.path.splitext(filename)[1] if ext == '.tgz': ## LWA Metadata try: ## Extract the SDF if len(sources) == 0: try: sdf = metabundle.get_sdf(filename) except Exception as e: sdf = metabundleADP.get_sdf(filename) context['observer'] = sdf.observer.name context['project'] = sdf.id context['session'] = sdf.sessions[0].id comments = sdf.project_office.sessions[0] mtch = CORR_CHANNELS.search(comments) if mtch is not None: corr_channels = int(mtch.group('channels'), 10) else: corr_channels = None mtch = CORR_INTTIME.search(comments) if mtch is not None: corr_inttime = float(mtch.group('inttime')) else: corr_inttime = None mtch = CORR_BASIS.search(comments) if mtch is not None: corr_basis = mtch.group('basis') else: sys.stderr.write( "WARNING: No output correlation polarization basis defined, assuming 'linear'.\n" ) corr_basis = 'linear' if corr_channels is not None and corr_inttime is not None: setup = { 'channels': corr_channels, 'inttime': corr_inttime, 'basis': corr_basis } else: sys.stderr.write( "WARNING: No or incomplete correlation configuration defined, setting to be defined at correlation time.\n" ) for o, obs in enumerate(sdf.sessions[0].observations): if type(obs).__name__ == 'Solar': name = 'Sun' intent = 'target' ra = None dec = None elif type(obs).__name__ == 'Jovian': name = 'Jupiter' intent = 'target' ra = None dec = None else: name = obs.target intent = obs.name ra = ephem.hours(str(obs.ra)) dec = ephem.degrees(str(obs.dec)) tStart = mjdmpm_to_datetime(obs.mjd, obs.mpm) tStop = mjdmpm_to_datetime(obs.mjd, obs.mpm + obs.dur) sources.append({ 'name': name, 'intent': intent, 'ra2000': ra, 'dec2000': dec, 'start': tStart, 'stop': tStop }) ### Alternate phase centers comments = sdf.project_office.observations[0][o] alts = {} for mtch in ALT_TARGET.finditer(comments): alt_id = int(mtch.group('id'), 10) alt_name = mtch.group('target') try: alts[alt_id]['name'] = alt_name except KeyError: alts[alt_id] = { 'name': alt_name, 'intent': 'dummy', 'ra': None, 'dec': None } for mtch in ALT_INTENT.finditer(comments): alt_id = int(mtch.group('id'), 10) alt_intent = mtch.group('intent') try: alts[alt_id]['intent'] = alt_intent except KeyError: alts[alt_id] = { 'name': None, 'intent': alt_intent, 'ra': None, 'dec': None } for mtch in ALT_RA.finditer(comments): alt_id = int(mtch.group('id'), 10) alt_ra = ephem.hours(mtch.group('ra')) try: alts[alt_id]['ra'] = alt_ra except KeyError: alts[alt_id] = { 'name': None, 'intent': 'dummy', 'ra': alt_ra, 'dec': None } for mtch in ALT_DEC.finditer(comments): alt_id = int(mtch.group('id'), 10) alt_dec = ephem.degrees(mtch.group('dec')) try: alts[alt_id]['dec'] = alt_dec except KeyError: alts[alt_id] = { 'name': None, 'intent': 'dummy', 'ra': None, 'dec': alt_dec } for alt_id in sorted(alts.keys()): alt_name, alt_ra, alt_dec = alts[alt_id] if alt_name is None or alt_ra is None or alt_dec is None: sys.stderr.write( "WARNING: Incomplete alternate phase center %i, skipping.\n" % alt_id) else: sources.append({ 'name': alt_name, 'ra2000': alt_ra, 'dec2000': alt_dec, 'start': tStart, 'stop': tStop }) ## Extract the file information so that we can pair things together fileInfo = metabundle.get_session_metadata(filename) for obsID in fileInfo.keys(): metadata[fileInfo[obsID]['tag']] = filename ## Figure out LWA1 vs LWA-SV try: cs = metabundle.get_command_script(filename) for c in cs: if c['subsystem_id'] == 'DP': site = 'LWA1' break elif c['subsystem_id'] == 'ADP': site = 'LWA-SV' break except (RuntimeError, ValueError): site = 'LWA-SV' for obsID in fileInfo.keys(): lwasite[fileInfo[obsID]['tag']] = site except Exception as e: sys.stderr.write("ERROR reading metadata file: %s\n" % str(e)) sys.stderr.flush() # Setup what we need to write out a configuration file corrConfig = { 'context': context, 'setup': setup, 'source': { 'name': '', 'ra2000': '', 'dec2000': '' }, 'inputs': [] } metadata = {} for filename in filenames: #print("%s:" % os.path.basename(filename)) # Skip over empty files if os.path.getsize(filename) == 0: continue # Open the file fh = open(filename, 'rb') # Figure out what to do with the file ext = os.path.splitext(filename)[1] if ext == '': ## DRX try: ## Get the site try: sitename = lwasite[os.path.basename(filename)] except KeyError: sitename = 'LWA1' ## Get the location so that we can set site-specific parameters if sitename == 'LWA1': xyz = LWA1_ECEF off = args.lwa1_offset elif sitename == 'LWA-SV': xyz = LWASV_ECEF off = args.lwasv_offset else: raise RuntimeError("Unknown LWA site '%s'" % site) ## Move into the LWA1 coordinate system ### ECEF to LWA1 rho = xyz - LWA1_ECEF sez = numpy.dot(LWA1_ROT, rho) enz = sez[[1, 0, 2]] # pylint: disable=invalid-sequence-index enz[1] *= -1 ## Read in the first few frames to get the start time frames = [drx.read_frame(fh) for i in xrange(1024)] streams = [] freq1, freq2 = 0.0, 0.0 for frame in frames: beam, tune, pol = frame.id if tune == 1: freq1 = frame.central_freq else: freq2 = frame.central_freq if (beam, tune, pol) not in streams: streams.append((beam, tune, pol)) tStart = frames[0].time.datetime tStartAlt = (frames[-1].time - 1023 // len(streams) * 4096 / frames[-1].sample_rate).datetime tStartDiff = tStart - tStartAlt if abs(tStartDiff) > timedelta(microseconds=10000): sys.stderr.write( "WARNING: Stale data found at the start of '%s', ignoring\n" % os.path.basename(filename)) sys.stderr.flush() tStart = tStartAlt ### ^ Adjustment to the start time to deal with occasional problems ### with stale data in the DR buffers at LWA-SV ## Read in the last few frames to find the end time fh.seek(os.path.getsize(filename) - 1024 * drx.FRAME_SIZE) backed = 0 while backed < 2 * drx.FRAME_SIZE: try: drx.read_frame(fh) fh.seek(-drx.FRAME_SIZE, 1) break except errors.SyncError: backed += 1 fh.seek(-drx.FRAME_SIZE - 1, 1) for i in xrange(32): try: frame = drx.read_frame(fh) beam, tune, _ = frame.id if tune == 1: freq1 = frame.central_freq else: freq2 = frame.central_freq except errors.SyncError: continue tStop = frame.time.datetime ## Save corrConfig['inputs'].append({ 'file': filename, 'type': 'DRX', 'antenna': sitename, 'pols': 'X, Y', 'location': (enz[0], enz[1], enz[2]), 'clockoffset': (off, off), 'fileoffset': 0, 'beam': beam, 'tstart': tStart, 'tstop': tStop, 'freq': (freq1, freq2) }) except Exception as e: sys.stderr.write("ERROR reading DRX file: %s\n" % str(e)) sys.stderr.flush() elif ext == '.vdif': ## VDIF try: ## Read in the GUPPI header header = vdif.read_guppi_header(fh) ## Read in the first frame vdif.FRAME_SIZE = vdif.get_frame_size(fh) frame = vdif.read_frame(fh) antID = frame.id[0] - 12300 tStart = frame.time.datetime nThread = vdif.get_thread_count(fh) ## Read in the last frame nJump = int(os.path.getsize(filename) / vdif.FRAME_SIZE) nJump -= 30 fh.seek(nJump * vdif.FRAME_SIZE, 1) mark = fh.tell() while True: try: frame = vdif.read_frame(fh) tStop = frame.time.datetime except Exception as e: break ## Find the antenna location pad, edate = db.get_pad('EA%02i' % antID, tStart) x, y, z = db.get_xyz(pad, tStart) #print(" Pad: %s" % pad) #print(" VLA relative XYZ: %.3f, %.3f, %.3f" % (x,y,z)) ## Move into the LWA1 coordinate system ### relative to ECEF xyz = numpy.array([x, y, z]) xyz += VLA_ECEF ### ECEF to LWA1 rho = xyz - LWA1_ECEF sez = numpy.dot(LWA1_ROT, rho) enz = sez[[1, 0, 2]] # pylint: disable=invalid-sequence-index enz[1] *= -1 ## Set an apparent position if WiDAR is already applying a delay model apparent_enz = (None, None, None) if args.no_vla_delay_model: apparent_xyz = VLA_ECEF apparent_rho = apparent_xyz - LWA1_ECEF apparent_sez = numpy.dot(LWA1_ROT, apparent_rho) apparent_enz = apparent_sez[[1, 0, 2]] # pylint: disable=invalid-sequence-index apparent_enz[1] *= -1 ## VLA time offset off = args.vla_offset ## Save corrConfig['context']['observer'] = header['OBSERVER'] try: corrConfig['context']['project'] = header[ 'BASENAME'].split('_')[0] corrConfig['context']['session'] = header[ 'BASENAME'].split('_')[1].replace('sb', '') except IndexError: corrConfig['context']['project'] = header[ 'BASENAME'].split('.')[0] corrConfig['context']['session'] = header[ 'BASENAME'].split('.')[1].replace('sb', '') corrConfig['context']['vlaref'] = re.sub( '\.[0-9]+\.[0-9]+\.[AB][CD]-.*', '', header['BASENAME']) corrConfig['source']['name'] = header['SRC_NAME'] corrConfig['source']['intent'] = 'target' corrConfig['source']['ra2000'] = header['RA_STR'] corrConfig['source']['dec2000'] = header['DEC_STR'] corrConfig['inputs'].append({ 'file': filename, 'type': 'VDIF', 'antenna': 'EA%02i' % antID, 'pols': 'Y, X', 'location': (enz[0], enz[1], enz[2]), 'apparent_location': (apparent_enz[0], apparent_enz[1], apparent_enz[2]), 'clockoffset': (off, off), 'fileoffset': 0, 'pad': pad, 'tstart': tStart, 'tstop': tStop, 'freq': header['OBSFREQ'] }) except Exception as e: sys.stderr.write("ERROR reading VDIF file: %s\n" % str(e)) sys.stderr.flush() elif ext == '.tgz': ## LWA Metadata try: ## Extract the file information so that we can pair things together fileInfo = metabundle.get_session_metadata(filename) for obsID in fileInfo.keys(): metadata[fileInfo[obsID]['tag']] = filename except Exception as e: sys.stderr.write("ERROR reading metadata file: %s\n" % str(e)) sys.stderr.flush() # Done fh.close() # Close out the connection to NRAO try: db.close() except AttributeError: pass # Choose a VDIF reference file, if there is one, and mark whether or # not DRX files were found vdifRefFile = None isDRX = False for cinp in corrConfig['inputs']: if cinp['type'] == 'VDIF': if vdifRefFile is None: vdifRefFile = cinp elif cinp['type'] == 'DRX': isDRX = True # Set a state variable so that we can generate a warning about missing # DRX files drxFound = False # Purge DRX files that don't make sense toPurge = [] drxFound = False lwasvFound = False for cinp in corrConfig['inputs']: ### Sort out multiple DRX files - this only works if we have only one LWA station if cinp['type'] == 'DRX': if vdifRefFile is not None: l0, l1 = cinp['tstart'], cinp['tstop'] v0, v1 = vdifRefFile['tstart'], vdifRefFile['tstop'] ve = (v1 - v0).total_seconds() overlapWithVDIF = (v0 >= l0 and v0 < l1) or (l0 >= v0 and l0 < v1) lvo = (min([v1, l1]) - max([v0, l0])).total_seconds() if not overlapWithVDIF or lvo < 0.25 * ve: toPurge.append(cinp) drxFound = True if cinp['antenna'] == 'LWA-SV': lwasvFound = True for cinp in toPurge: del corrConfig['inputs'][corrConfig['inputs'].index(cinp)] # Sort the inputs based on the antenna name - this puts LWA1 first, # LWA-SV second, and the VLA at the end in 'EA' antenna order, i.e., # EA01, EA02, etc. corrConfig['inputs'].sort(key=lambda x: 0 if x['antenna'] == 'LWA1' else ( 1 if x['antenna'] == 'LWA-SV' else int(x['antenna'][2:], 10))) # VDIF/DRX warning check/report if vdifRefFile is not None and isDRX and not drxFound: sys.stderr.write( "WARNING: DRX files provided but none overlapped with VDIF data") # Duplicate antenna check antCounts = {} for cinp in corrConfig['inputs']: try: antCounts[cinp['antenna']] += 1 except KeyError: antCounts[cinp['antenna']] = 1 for ant in antCounts.keys(): if antCounts[ant] != 1: sys.stderr.write("WARNING: Antenna '%s' is defined %i times" % (ant, antCounts[ant])) # Update the file offsets to get things lined up better tMax = max([cinp['tstart'] for cinp in corrConfig['inputs']]) for cinp in corrConfig['inputs']: diff = tMax - cinp['tstart'] offset = diff.days * 86400 + diff.seconds + diff.microseconds / 1e6 cinp['fileoffset'] = max([0, offset]) # Reconcile the source lists for when we have eLWA data. This is needed so # that we use the source information contained in the VDIF files rather than # the stub information contained in the SDFs if len(sources) <= 1: if corrConfig['source']['name'] != '': ## Update the source information with what comes from the VLA try: sources[0] = corrConfig['source'] except IndexError: sources.append(corrConfig['source']) # Update the dwell time using the minimum on-source time for all inputs if # there is only one source, i.e., for full eLWA runs if len(sources) == 1: sources[0]['start'] = max( [cinp['tstart'] for cinp in corrConfig['inputs']]) sources[0]['stop'] = min( [cinp['tstop'] for cinp in corrConfig['inputs']]) # Render the configuration startRef = sources[0]['start'] s = 0 for source in sources: startOffset = source['start'] - startRef startOffset = startOffset.total_seconds() dur = source['stop'] - source['start'] dur = dur.total_seconds() ## Skip over dummy scans and scans that start after the files end if source['intent'] in (None, 'dummy'): continue if source['start'] > max( [cinp['tstop'] for cinp in corrConfig['inputs']]): print( "Skipping scan of %s which starts at %s, %.3f s after the data end" % (source['name'], source['start'], (source['start'] - max([cinp['tstop'] for cinp in corrConfig['inputs']])).total_seconds())) continue ## Small correction for the first scan to compensate for stale data at LWA-SV if lwasvFound and s == 0: startOffset += 10.0 dur -= 10.0 ## Skip over scans that are too short if dur < args.minimum_scan_length: continue ## Setup if args.output is None: fh = sys.stdout else: outname = args.output if len(sources) > 1: outname += str(s + 1) fh = open(outname, 'w') try: repo = git.Repo(os.path.dirname(os.path.abspath(__file__))) try: branch = repo.active_branch.name hexsha = repo.active_branch.commit.hexsha except TypeError: branch = '<detached>' hexsha = repo.head.commit.hexsha shortsha = hexsha[-7:] dirty = ' (dirty)' if repo.is_dirty() else '' except git.exc.GitError: branch = 'unknown' hexsha = 'unknown' shortsha = 'unknown' dirty = '' ## Preamble fh.write("# Created\n") fh.write("# on %s\n" % datetime.now()) fh.write("# using %s, revision %s.%s%s\n" % (os.path.basename(__file__), branch, shortsha, dirty)) fh.write("\n") ## Observation context fh.write("Context\n") fh.write(" Observer %s\n" % corrConfig['context']['observer']) fh.write(" Project %s\n" % corrConfig['context']['project']) if corrConfig['context']['session'] is not None: fh.write(" Session %s\n" % corrConfig['context']['session']) if corrConfig['context']['vlaref'] is not None: fh.write(" VLARef %s\n" % corrConfig['context']['vlaref']) fh.write("EndContext\n") fh.write("\n") ## Configuration, if present if corrConfig['setup'] is not None: fh.write("Configuration\n") fh.write(" Channels %i\n" % corrConfig['setup']['channels']) fh.write(" IntTime %.3f\n" % corrConfig['setup']['inttime']) fh.write(" PolBasis %s\n" % corrConfig['setup']['basis']) fh.write("EndConfiguration\n") fh.write("\n") ## Source fh.write("Source\n") fh.write("# Observation start is %s\n" % source['start']) fh.write("# Duration is %s\n" % (source['stop'] - source['start'], )) fh.write(" Name %s\n" % source['name']) fh.write(" Intent %s\n" % source['intent'].lower()) if source['name'] not in ('Sun', 'Jupiter'): fh.write(" RA2000 %s\n" % source['ra2000']) fh.write(" Dec2000 %s\n" % source['dec2000']) fh.write(" Duration %.3f\n" % dur) fh.write("SourceDone\n") fh.write("\n") ## Input files for cinp in corrConfig['inputs']: fh.write("Input\n") fh.write("# Start time is %s\n" % cinp['tstart']) fh.write("# Stop time is %s\n" % cinp['tstop']) try: fh.write("# Beam is %i\n" % cinp['beam']) except KeyError: pass try: fh.write("# VLA pad is %s\n" % cinp['pad']) except KeyError: pass try: fh.write("# Frequency tuning 1 is %.3f Hz\n" % cinp['freq'][0]) fh.write("# Frequency tuning 2 is %.3f Hz\n" % cinp['freq'][1]) except TypeError: fh.write("# Frequency tuning is %.3f Hz\n" % cinp['freq']) fh.write(" File %s\n" % cinp['file']) try: metaname = metadata[os.path.basename(cinp['file'])] fh.write(" MetaData %s\n" % metaname) except KeyError: if cinp['type'] == 'DRX': sys.stderr.write( "WARNING: No metadata found for '%s', source %i\n" % (os.path.basename(cinp['file']), s + 1)) sys.stderr.flush() pass fh.write(" Type %s\n" % cinp['type']) fh.write(" Antenna %s\n" % cinp['antenna']) fh.write(" Pols %s\n" % cinp['pols']) fh.write(" Location %.6f, %.6f, %.6f\n" % cinp['location']) try: if cinp['apparent_location'][0] is not None: fh.write(" ApparentLocation %.6f, %.6f, %.6f\n" % cinp['apparent_location']) except KeyError: pass fh.write(" ClockOffset %s, %s\n" % cinp['clockoffset']) fh.write(" FileOffset %.3f\n" % (startOffset + cinp['fileoffset'], )) fh.write("InputDone\n") fh.write("\n") if fh != sys.stdout: fh.close() # Increment the source/file counter s += 1
def main(args): # Parse the command line filename = args.filename fh = open(filename, 'rb') header = vdif.read_guppi_header(fh) vdif.FRAME_SIZE = vdif.get_frame_size(fh) nFramesFile = os.path.getsize(filename) // vdif.FRAME_SIZE junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate vdif.DATA_LENGTH = junkFrame.payload.data.size beam, pol = junkFrame.id tunepols = vdif.get_thread_count(fh) beampols = tunepols # Get the frequencies cFreq = 0.0 for j in xrange(4): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) s, p = junkFrame.id if p == 0: cFreq = junkFrame.central_freq # Date junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) fh.seek(-vdif.FRAME_SIZE, 1) beginDate = junkFrame.time.datetime # Report print("Filename: %s" % os.path.basename(filename)) print(" Date of First Frame: %s" % beginDate) print(" Station: %i" % beam) print(" Sample Rate: %i Hz" % srate) print(" Bit Depth: %i" % junkFrame.header.bits_per_sample) print(" Tuning 1: %.1f Hz" % cFreq) print(" ") # Determine the clip level if args.trim_level is None: if junkFrame.header.bits_per_sample == 1: args.trim_level = abs(1.0)**2 elif junkFrame.header.bits_per_sample == 2: args.trim_level = abs(3.3359)**2 elif junkFrame.header.bits_per_sample == 4: args.trim_level = abs(7 / 2.95)**2 elif junkFrame.header.bits_per_sample == 8: args.trim_level = abs(255 / 256.)**2 else: args.trim_level = 1.0 print("Setting clip level to %.3f" % args.trim_level) print(" ") # Convert chunk length to total frame count chunkLength = int(args.length * srate / vdif.DATA_LENGTH * tunepols) chunkLength = int(1.0 * chunkLength / tunepols) * tunepols # Convert chunk skip to total frame count chunkSkip = int(args.skip * srate / vdif.DATA_LENGTH * tunepols) chunkSkip = int(1.0 * chunkSkip / tunepols) * tunepols # Output arrays clipFraction = [] meanPower = [] meanRMS = [] # Go! i = 1 done = False print(" | Clipping | Power | RMS |") print(" | 1X 1Y | 1X 1Y | 1X 1Y |") print("----+-----------------+---------------+---------------+") while True: count = {0: 0, 1: 0} data = numpy.empty((2, chunkLength * vdif.DATA_LENGTH // tunepols), dtype=numpy.float32) for j in xrange(chunkLength): # Read in the next frame and anticipate any problems that could occur try: cFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0, verbose=False) except errors.EOFError: done = True break except errors.SyncError: continue beam, pol = cFrame.id aStand = pol try: data[aStand, count[aStand] * vdif.DATA_LENGTH:(count[aStand] + 1) * vdif.DATA_LENGTH] = cFrame.payload.data # Update the counters so that we can average properly later on count[aStand] += 1 except ValueError: pass if done: break else: rms = numpy.sqrt((data**2).mean(axis=1)) data = numpy.abs(data)**2 clipFraction.append(numpy.zeros(2)) meanPower.append(data.mean(axis=1)) meanRMS.append(rms) for j in xrange(2): bad = numpy.nonzero(data[j, :] > args.trim_level)[0] clipFraction[-1][j] = 1.0 * len(bad) / data.shape[1] clip = clipFraction[-1] power = meanPower[-1] print("%3i | %6.2f%% %6.2f%% | %6.3f %6.3f | %6.3f %6.3f |" % (i, clip[0] * 100.0, clip[1] * 100.0, power[0], power[1], rms[0], rms[1])) i += 1 fh.seek(vdif.FRAME_SIZE * chunkSkip, 1) clipFraction = numpy.array(clipFraction) meanPower = numpy.array(meanPower) meanRMS = numpy.array(meanRMS) clip = clipFraction.mean(axis=0) power = meanPower.mean(axis=0) rms = meanRMS.mean(axis=0) print("----+-----------------+---------------+---------------+") print("%3s | %6.2f%% %6.2f%% | %6.3f %6.3f | %6.3f %6.3f |" % ('M', clip[0] * 100.0, clip[1] * 100.0, power[0], power[1], rms[0], rms[1]))
def main(args): # Parse the command line filename = args.filename # Length of the FFT LFFT = args.fft_length fh = open(filename, 'rb') header = vdif.read_guppi_header(fh) vdif.FRAME_SIZE = vdif.get_frame_size(fh) nFramesFile = os.path.getsize(filename) // vdif.FRAME_SIZE junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate vdif.DATA_LENGTH = junkFrame.payload.data.size beam, pol = junkFrame.id tunepols = vdif.get_thread_count(fh) beampols = tunepols if args.skip != 0: print("Skipping forward %.3f s" % args.skip) print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime)) offset = int(args.skip * srate / vdif.DATA_LENGTH) fh.seek(beampols * vdif.FRAME_SIZE * offset, 1) junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) fh.seek(-vdif.FRAME_SIZE, 1) print("-> %.6f (%s)" % (junkFrame.time, junkFrame.time.datetime)) tStart = junkFrame.time # Get the frequencies cFreq = 0.0 for j in xrange(4): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) s, p = junkFrame.id if p == 0: cFreq = junkFrame.central_freq # Set integration time tInt = args.avg_time nFrames = int(round(tInt * srate / vdif.DATA_LENGTH)) tInt = nFrames * vdif.DATA_LENGTH / srate nFrames = int(round(tInt * srate / vdif.DATA_LENGTH)) # Read in some data tFile = nFramesFile / beampols * vdif.DATA_LENGTH / srate # Date junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) fh.seek(-vdif.FRAME_SIZE, 1) beginDate = junkFrame.time.datetime # Report print("Filename: %s" % os.path.basename(filename)) print(" Date of First Frame: %s" % beginDate) print(" Station: %i" % beam) print(" Sample Rate: %i Hz" % srate) print(" Tuning 1: %.1f Hz" % cFreq) print(" Bit Depth: %i" % junkFrame.header.bits_per_sample) print(" Integration Time: %.3f s" % tInt) print(" Integrations in File: %i" % int(tFile / tInt)) print(" ") # Go! data = numpy.zeros((beampols, vdif.DATA_LENGTH * nFrames), dtype=numpy.complex64) count = [0 for i in xrange(data.shape[0])] for i in xrange(beampols * nFrames): try: cFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) except errors.SyncError: print("Error @ %i" % i) fh.seek(vdif.FRAME_SIZE, 1) continue std, pol = cFrame.id sid = pol data[sid, count[sid] * vdif.DATA_LENGTH:(count[sid] + 1) * vdif.DATA_LENGTH] = cFrame.payload.data count[sid] += 1 # Transform and trim off the negative frequencies freq, psd = fxc.SpecMaster(data, LFFT=2 * LFFT, sample_rate=srate, central_freq=header['OBSFREQ'] - srate / 4) freq, psd = freq[LFFT:], psd[:, LFFT:] # Plot fig = plt.figure() ax = fig.gca() for i in xrange(psd.shape[0]): ax.plot(freq / 1e6, numpy.log10(psd[i, :]) * 10, label='%i' % i) ax.set_title('%i' % beam) ax.set_xlabel('Frequency [MHz]') ax.set_ylabel('PSD [arb. dB]') ax.legend(loc=0) plt.show() # Done fh.close()
def processDataBatchLinear(fh, header, antennas, tStart, duration, sample_rate, args, dataSets, obsID=1, clip1=0, clip2=0): """ Process a chunk of data in a raw vdif file into linear polarization products and add the contents to an HDF5 file. """ # Length of the FFT LFFT = args.fft_length # Find the start of the observation junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate t0 = junkFrame.time fh.seek(-vdif.FRAME_SIZE, 1) print('Looking for #%i at %s with sample rate %.1f Hz...' % (obsID, tStart, sample_rate)) while t0.datetime < tStart or srate != sample_rate: junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate t0 = junkFrame.time print('... Found #%i at %s with sample rate %.1f Hz' % (obsID, junkFrame.time.datetime, srate)) tDiff = t0.datetime - tStart try: duration = duration - tDiff.total_seconds() except: duration = duration - (tDiff.seconds + tDiff.microseconds / 1e6) beam, pol = junkFrame.id beams = vdif.get_thread_count(fh) tunepols = vdif.get_thread_count(fh) tunepol = tunepols beampols = tunepol # Make sure that the file chunk size contains is an integer multiple # of the FFT length so that no data gets dropped. This needs to # take into account the number of beampols in the data, the FFT length, # and the number of samples per frame. maxFrames = int(1.0 * 28000 / beampols * vdif.DATA_LENGTH / float(2 * LFFT)) * 2 * LFFT // vdif.DATA_LENGTH * beampols # Number of frames per second nFramesSecond = int(srate) // vdif.DATA_LENGTH # Number of frames to integrate over nFramesAvg = int(round(args.average * srate / vdif.DATA_LENGTH * beampols)) nFramesAvg = int(1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / float(2 * LFFT)) * 2 * LFFT // vdif.DATA_LENGTH * beampols args.average = 1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / srate maxFrames = nFramesAvg # Number of remaining chunks (and the correction to the number of # frames to read in). nChunks = int(round(duration / args.average)) if nChunks == 0: nChunks = 1 nFrames = nFramesAvg * nChunks # Line up the time tags for the various tunings/polarizations timetags = [] for i in xrange(16): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) timetags.append(junkFrame.header.seconds_from_epoch * nFramesSecond + junkFrame.header.frame_in_second) fh.seek(-16 * vdif.FRAME_SIZE, 1) i = 0 if beampols == 4: while (timetags[i + 0] != timetags[i + 1]) or ( timetags[i + 0] != timetags[i + 2]) or (timetags[i + 0] != timetags[i + 3]): i += 1 fh.seek(vdif.FRAME_SIZE, 1) elif beampols == 2: while timetags[i + 0] != timetags[i + 1]: i += 1 fh.seek(vdif.FRAME_SIZE, 1) # Date & Central Frequency beginDate = junkFrame.time.datetime central_freq1 = 0.0 central_freq2 = 0.0 for i in xrange(4): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) b, p = junkFrame.id if p == 0: central_freq1 = junkFrame.central_freq elif p == 0: central_freq2 = junkFrame.central_freq else: pass fh.seek(-4 * vdif.FRAME_SIZE, 1) freq = numpy.fft.fftshift(numpy.fft.fftfreq(LFFT, d=2 / srate)) if float(fxc.__version__) < 0.8: freq = freq[1:] dataSets['obs%i-freq1' % obsID][:] = freq + central_freq1 dataSets['obs%i-freq2' % obsID][:] = freq + central_freq2 obs = dataSets['obs%i' % obsID] obs.attrs['tInt'] = args.average obs.attrs['tInt_Unit'] = 's' obs.attrs['LFFT'] = LFFT obs.attrs['nchan'] = LFFT - 1 if float(fxc.__version__) < 0.8 else LFFT obs.attrs['RBW'] = freq[1] - freq[0] obs.attrs['RBW_Units'] = 'Hz' # Create the progress bar so that we can keep up with the conversion. pbar = progress.ProgressBarPlus(max=nChunks) data_products = ['XX', 'YY'] done = False for i in xrange(nChunks): # Find out how many frames remain in the file. If this number is larger # than the maximum of frames we can work with at a time (maxFrames), # only deal with that chunk framesRemaining = nFrames - i * maxFrames if framesRemaining > maxFrames: framesWork = maxFrames else: framesWork = framesRemaining count = {0: 0, 1: 0, 2: 0, 3: 0} data = numpy.zeros((4, framesWork * vdif.DATA_LENGTH // beampols), dtype=numpy.csingle) # If there are fewer frames than we need to fill an FFT, skip this chunk if data.shape[1] < LFFT: break # Inner loop that actually reads the frames into the data array for j in xrange(framesWork): # Read in the next frame and anticipate any problems that could occur try: cFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0, verbose=False) except errors.EOFError: done = True break except errors.SyncError: continue beam, pol = cFrame.id aStand = pol if j is 0: cTime = cFrame.time try: data[aStand, count[aStand] * vdif.DATA_LENGTH:(count[aStand] + 1) * vdif.DATA_LENGTH] = cFrame.payload.data count[aStand] += 1 except ValueError: raise RuntimeError("Invalid Shape") # Save out some easy stuff dataSets['obs%i-time' % obsID][i] = float(cTime) if not args.without_sats: sats = ((data.real**2 + data.imag**2) >= 49).sum(axis=1) dataSets['obs%i-Saturation1' % obsID][i, :] = sats[0:2] dataSets['obs%i-Saturation2' % obsID][i, :] = sats[2:4] else: dataSets['obs%i-Saturation1' % obsID][i, :] = -1 dataSets['obs%i-Saturation2' % obsID][i, :] = -1 # Calculate the spectra for this block of data and then weight the results by # the total number of frames read. This is needed to keep the averages correct. if clip1 == clip2: freq, tempSpec1 = fxc.SpecMaster(data, LFFT=2 * LFFT, window=args.window, verbose=args.verbose, sample_rate=srate, clip_level=clip1) freq, tempSpec1 = freq[LFFT:], tempSpec1[:, LFFT:] l = 0 for t in (1, 2): for p in data_products: dataSets['obs%i-%s%i' % (obsID, p, t)][i, :] = tempSpec1[l, :] l += 1 else: freq, tempSpec1 = fxc.SpecMaster(data[:2, :], LFFT=2 * LFFT, window=args.window, verbose=args.verbose, sample_rate=srate, clip_level=clip1) freq, tempSpec2 = fxc.SpecMaster(data[2:, :], LFFT=2 * LFFT, window=args.window, verbose=args.verbose, sample_rate=srate, clip_level=clip2) freq, tempSpec1, tempSpec2 = freq[ LFFT:], tempSpec1[:, LFFT:], tempSpec2[:, LFFT:] for l, p in enumerate(data_products): dataSets['obs%i-%s%i' % (obsID, p, 1)][i, :] = tempSpec1[l, :] dataSets['obs%i-%s%i' % (obsID, p, 2)][i, :] = tempSpec2[l, :] # We don't really need the data array anymore, so delete it del (data) # Are we done yet? if done: break ## Update the progress bar and remaining time estimate pbar.inc() sys.stdout.write('%s\r' % pbar.show()) sys.stdout.flush() pbar.amount = pbar.max sys.stdout.write('%s\n' % pbar.show()) sys.stdout.flush() return True
def main(args): # Length of the FFT LFFT = args.fft_length if args.bartlett: window = numpy.bartlett elif args.blackman: window = numpy.blackman elif args.hanning: window = numpy.hanning else: window = fxc.null_window args.window = window # Open the file and find good data (not spectrometer data) filename = args.filename fh = open(filename, "rb") header = vdif.read_guppi_header(fh) vdif.FRAME_SIZE = vdif.get_frame_size(fh) nFramesFile = os.path.getsize(filename) // vdif.FRAME_SIZE while True: try: junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) try: srate = junkFrame.sample_rate t0 = junkFrame.time vdif.DATA_LENGTH = junkFrame.payload.data.size break except ZeroDivisionError: pass except errors.SyncError: fh.seek(-vdif.FRAME_SIZE + 1, 1) fh.seek(-vdif.FRAME_SIZE, 1) beam, pol = junkFrame.id beams = 1 tunepols = vdif.get_thread_count(fh) tunepol = tunepols beampols = tunepol # Offset in frames for beampols beam/tuning/pol. sets offset = int(args.skip * srate / vdif.DATA_LENGTH * beampols) offset = int(1.0 * offset / beampols) * beampols fh.seek(offset * vdif.FRAME_SIZE, 1) # Iterate on the offsets until we reach the right point in the file. This # is needed to deal with files that start with only one tuning and/or a # different sample rate. while True: ## Figure out where in the file we are and what the current tuning/sample ## rate is junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) srate = junkFrame.sample_rate t1 = junkFrame.time tunepols = (vdif.get_thread_count(fh), ) tunepol = tunepols[0] beampols = tunepol fh.seek(-vdif.FRAME_SIZE, 1) ## See how far off the current frame is from the target tDiff = t1 - (t0 + args.skip) ## Half that to come up with a new seek parameter tCorr = -tDiff / 2.0 cOffset = int(tCorr * srate / vdif.DATA_LENGTH * beampols) cOffset = int(1.0 * cOffset / beampols) * beampols offset += cOffset ## If the offset is zero, we are done. Otherwise, apply the offset ## and check the location in the file again/ if cOffset is 0: break fh.seek(cOffset * vdif.FRAME_SIZE, 1) # Update the offset actually used args.skip = t1 - t0 offset = int(round(args.skip * srate / vdif.DATA_LENGTH * beampols)) offset = int(1.0 * offset / beampols) * beampols # Make sure that the file chunk size contains is an integer multiple # of the FFT length so that no data gets dropped. This needs to # take into account the number of beampols in the data, the FFT length, # and the number of samples per frame. maxFrames = int(1.0 * 28000 / beampols * vdif.DATA_LENGTH / float(2 * LFFT)) * 2 * LFFT / vdif.DATA_LENGTH * beampols # Number of frames to integrate over nFramesAvg = int(args.average * srate / vdif.DATA_LENGTH * beampols) nFramesAvg = int(1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / float(2 * LFFT)) * 2 * LFFT / vdif.DATA_LENGTH * beampols args.average = 1.0 * nFramesAvg / beampols * vdif.DATA_LENGTH / srate maxFrames = nFramesAvg # Number of remaining chunks (and the correction to the number of # frames to read in). if args.duration == 0: args.duration = 1.0 * nFramesFile / beampols * vdif.DATA_LENGTH / srate args.duration -= args.skip else: args.duration = int( round(args.duration * srate * beampols / vdif.DATA_LENGTH) / beampols * vdif.DATA_LENGTH / srate) nChunks = int(round(args.duration / args.average)) if nChunks == 0: nChunks = 1 nFrames = nFramesAvg * nChunks # Date & Central Frequency t1 = junkFrame.time beginDate = junkFrame.time.datetime central_freq1 = 0.0 central_freq2 = 0.0 for i in xrange(4): junkFrame = vdif.read_frame(fh, central_freq=header['OBSFREQ'], sample_rate=header['OBSBW'] * 2.0) b, p = junkFrame.id if p == 0: central_freq1 = junkFrame.central_freq elif p == 0: central_freq2 = junkFrame.central_freq else: pass fh.seek(-4 * vdif.FRAME_SIZE, 1) # File summary print("Filename: %s" % filename) print("Date of First Frame: %s" % str(beginDate)) print("Beams: %i" % beams) print("Tune/Pols: %i" % tunepols) print("Sample Rate: %i Hz" % srate) print("Bit Depth: %i" % junkFrame.header.bits_per_sample) print("Tuning Frequency: %.3f Hz (1); %.3f Hz (2)" % (central_freq1, central_freq2)) print( "Frames: %i (%.3f s)" % (nFramesFile, 1.0 * nFramesFile / beampols * vdif.DATA_LENGTH / srate)) print("---") print("Offset: %.3f s (%i frames)" % (args.skip, offset)) print("Integration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (args.average, nFramesAvg, nFramesAvg // beampols)) print("Duration: %.3f s (%i frames; %i frames per beam/tune/pol)" % (args.average * nChunks, nFrames, nFrames // beampols)) print("Chunks: %i" % nChunks) print(" ") # Get the clip levels clip1 = args.clip_level clip2 = args.clip_level # Make the pseudo-antennas for Stokes calculation antennas = [] for i in xrange(4): if i // 2 == 0: newAnt = stations.Antenna(1) else: newAnt = stations.Antenna(2) if i % 2 == 0: newAnt.pol = 0 else: newAnt.pol = 1 antennas.append(newAnt) # Setup the output file outname = os.path.split(filename)[1] outname = os.path.splitext(outname)[0] outname = '%s-waterfall.hdf5' % outname if os.path.exists(outname): if not args.force: yn = raw_input("WARNING: '%s' exists, overwrite? [Y/n] " % outname) else: yn = 'y' if yn not in ('n', 'N'): os.unlink(outname) else: raise RuntimeError("Output file '%s' already exists" % outname) f = hdfData.createNewFile(outname) # Look at the metadata and come up with a list of observations. If # there are no metadata, create a single "observation" that covers the # whole file. obsList = {} obsList[1] = (datetime.utcfromtimestamp(t1), datetime(2222, 12, 31, 23, 59, 59), args.duration, srate) hdfData.fillMinimum(f, 1, beam, srate) if (not args.stokes): data_products = ['XX', 'YY'] else: data_products = ['I', 'Q', 'U', 'V'] for o in sorted(obsList.keys()): for t in (1, 2): hdfData.createDataSets( f, o, t, numpy.arange(LFFT - 1 if float(fxc.__version__) < 0.8 else LFFT, dtype=numpy.float32), int(round(obsList[o][2] / args.average)), data_products) f.attrs['FileGenerator'] = 'hdfWaterfall.py' f.attrs['InputData'] = os.path.basename(filename) # Create the various HDF group holders ds = {} for o in sorted(obsList.keys()): obs = hdfData.getObservationSet(f, o) ds['obs%i' % o] = obs ds['obs%i-time' % o] = obs.create_dataset( 'time', (int(round(obsList[o][2] / args.average)), ), 'f8') for t in (1, 2): ds['obs%i-freq%i' % (o, t)] = hdfData.get_data_set(f, o, t, 'freq') for p in data_products: ds["obs%i-%s%i" % (o, p, t)] = hdfData.get_data_set(f, o, t, p) ds['obs%i-Saturation%i' % (o, t)] = hdfData.get_data_set( f, o, t, 'Saturation') # Load in the correct analysis function if (not args.stokes): processDataBatch = processDataBatchLinear else: processDataBatch = processDataBatchStokes # Go! for o in sorted(obsList.keys()): try: processDataBatch(fh, header, antennas, obsList[o][0], obsList[o][2], obsList[o][3], args, ds, obsID=o, clip1=clip1, clip2=clip2) except RuntimeError as e: print("Observation #%i: %s, abandoning this observation" % (o, str(e))) # Save the output to a HDF5 file f.close()