Exemple #1
0
    def _open(self):
        results.file_name = askopenfilename(filetypes=[
            ("allfiles", "*"), ("binary files", "*.dat"),
            ("FITS files", "*.fits"), ("LoFASM Data Files", "*.lofasm")
        ])  #open data file, currently .lofasm
        #tk.Label(master=self,text=results.file_name.split('/')[-1][:20]+'...').grid(row=0, column=1)
        tk.Label(master=self,
                 text=os.path.basename(results.file_name)[:20] + '...').grid(
                     row=0, column=1)

        self.crawler_mid = pdat.LoFASMFileCrawler(results.file_name)
        self.crawler_front = pdat.LoFASMFileCrawler(results.file_name)
        self.crawler_mid.open()
        self.crawler_front.open()

        for i in range(63):
            data = 10 * np.log10(
                self.crawler_front.autos[results.current_correlation])
            self.filter_bank_data = np.hstack(
                (self.filter_bank_data, data.reshape((2048, 1))))
            self.crawler_front.forward()
        print np.shape(self.filter_bank_data)

        self.create_figure()

        self.openedfile = True
Exemple #2
0
 def _getMjdStartTimeFromFileHeader(self, f):
     lfobj = pdat.LoFASMFileCrawler(f)
     lfobj.open()
     hdr = lfobj.getFileHeader()
     mjd_timestamp = float(hdr[8][1])
     mjd_timestamp += float(hdr[9][1]) * msec
     del lfobj
     return mjd_timestamp
Exemple #3
0
def convert_lofasm_file(lofasm_file, pols=pols):
    """
    convert old .lofasm file to csv format.

    :param lofasm_file: str
        path to .lofasm file
    :param pols: list, optional
        list of polarizations from lofasm_file to process. default is all pols.
    """
    import lofasm.parse_data as pdat

    crawler = pdat.LoFASMFileCrawler(lofasm_file)
    crawler.open()

    if not crawler.corrupt:

        for pol in pols:
            print "Writing ", pol
            polstart = time()

            csvname = get_csv_filename(
                lofasm_file.split('.')[0] + '_' + pol + '.lofasm')
            if not os.path.isdir(os.path.dirname(csvname)):
                try:
                    os.makedirs(os.path.dirname(csvname))
                except:
                    print "unable to create directory : {}".format(
                        os.path.dirname(csvname))
                    print "please create the directory manually and try again"
                    sys.stdout.flush()

            with open(csvname, 'wb') as polfile:
                writePol(pol, polfile, crawler)
            polend = time()
            print "Wrote ", pol, " in ", polend - polstart
            sys.stdout.flush()
    else:
        print "ignoring corrupt file: {}".format(lofasm_file)
Exemple #4
0
    file_day = file_time_info[6:8]
    file_date_string = file_month_name + ' ' + file_day + ', ' + file_year  #this is the month day and year of the obs as a string
    file_hour = file_time_info[9:11]
    file_minute = file_time_info[11:13]
    file_second = file_time_info[13:15]
    file_time_string = file_hour + ':' + file_minute + ':' + file_second  #this is the hour:min:sec of the begining of the obs

    #sets freq band and finds bin #
    lower_freq = 15
    higher_freq = 80
    lower_bin_number = int(lower_freq * (2048.0 / 200.0))
    higher_bin_number = int(higher_freq * (2048.0 / 200.0))
    freqs = np.linspace(0, 200, 2048)

    #open crawler
    crawler = pdat.LoFASMFileCrawler(f)
    crawler.open()
    polarizations = [
        'AA', 'BB', 'CC', 'DD', 'AB', 'AC', 'AD', 'BC', 'BD', 'CD', 'BE', 'BN'
    ]

    length = (crawler.getNumberOfIntegrationsInFile() - 1)

    #initialize memory

    #	bufferLen = int(np.ceil(args.duration / 0.083))
    bufferShape = (2048, length + 1)
    logging.debug("buffersize: ({},{})".format(bufferShape[0], bufferShape[1]))
    #	print "buffer shape: ", bufferShape
    #	print "length: ", length
Exemple #5
0
                        '--verbose',
                        dest='v',
                        action='store_true',
                        help='print header and progress information')
    parser.add_argument(
        '-s',
        '--scanfirst',
        dest='scanfirst',
        action='store_true',
        help='scan .lofasm file prior to extraction. (not recommended)')

    args = parser.parse_args()

    basename = os.path.basename(args.lofasm_file).split('.', 1)[0]

    crawler = pdat.LoFASMFileCrawler(args.lofasm_file,
                                     scan_file=args.scanfirst)
    crawler.open()

    hdr = crawler.getFileHeader()

    if args.v:
        pdat.print_hdr(hdr)
        print 'starting integration number: ', crawler.getAccReference()
        raw_input('Press enter to continue.')

    TARGET = 'NULL' if hdr[12][1] == '' else hdr[12][1]

    NUM_INTEGRATIONS = crawler.getNumberOfIntegrationsInFile()

    #open output files and write file headers
    if args.v:
Exemple #6
0
    data = np.zeros((Nbins,MAXSIZE), dtype=np.complex)
    timestamps = []

    print "shape of initialized array: {}".format(np.shape(data))

    #loop over files and extract necessary samples
    i=0
    enterDataset = time()

    for f in flist:
        try:
            enterLoop = time()
            print "{}/{} processing {}".format(flist.index(f), len(flist), f),
            sys.stdout.flush()

            crawler = pdat.LoFASMFileCrawler(os.path.join(os.path.dirname(args.filelist), f))        
            crawler.open()
            crawler.setPol(args.polarization.upper())
            n = crawler.getNumberOfIntegrationsInFile() / cadence
            timestamps.append(crawler.time.datetime)

            #apply median filter
            dfilt = filter.medfilt(crawler.get()[LBIN:HBIN], medfilt_stride)

            #dfft_raw = np.fft.fft(dfilt)
            #dfft[:Nbins/2] = dfft_raw[Nbins/2:]
            #dfft[Nbins/2:] = dfft_raw[:Nbins/2]
            #data[:,i] = np.abs(dfft)**2

            data[:,i] = dfilt #complex array
            
Exemple #7
0
    splitname = os.path.splitext(inname)
    if splitname[1] != ".lofasm":
        print "Skipping " + inname + " (not a .lofasm file)"
        continue
    if not args.force:
        alldone = True
        for pol in filepols:
            outname = splitname[0] + "_" + pol + ".abx"
            alldone &= os.path.isfile(outname)
        if alldone:
            print "Skipping " + inname + " (all output files exist)"
            continue

    # Open input file and read metadata.
    try:
        crawler = pdat.LoFASMFileCrawler(inname)
        crawler.open()
        header = crawler.getFileHeader()
        nint = crawler.getNumberOfIntegrationsInFile()
        tstart = crawler.time_start.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
    except:
        print "Skipping " + inname + " (unable to read metadata)"
        continue
    nin += 1

    # Store header metadata in local variables.
    for key in header.keys():
        field = header[key]
        if field[0] == "station":
            station = field[1]
        elif field[0] == "Nbins":
Exemple #8
0
    data = np.zeros((Nbins, MAXSIZE), dtype=np.complex)
    timestamps = []

    print "shape of initialized array: {}".format(np.shape(data))

    #loop over files and extract necessary samples
    i = 0
    enterDataset = time()

    for f in flist:
        try:
            enterLoop = time()
            print "{}/{} processing {}".format(flist.index(f), len(flist), f),
            sys.stdout.flush()

            crawler = pdat.LoFASMFileCrawler(os.path.join(args.dataDir, f))
            crawler.open()
            crawler.setPol(args.polarization.upper())
            n = crawler.getNumberOfIntegrationsInFile() / cadence
            timestamps.append(crawler.time.datetime)

            #apply median filter
            dfilt = filter.medfilt(crawler.get()[LBIN:HBIN], medfilt_stride)

            #dfft_raw = np.fft.fft(dfilt)
            #dfft[:Nbins/2] = dfft_raw[Nbins/2:]
            #dfft[Nbins/2:] = dfft_raw[:Nbins/2]
            #data[:,i] = np.abs(dfft)**2

            data[:, i] = dfilt  #complex array
Exemple #9
0
        exit()
    else:
        input_filename = opts.input_filename
        if input_filename.endswith(".gz") or opts.gzip:
            lofasm_input_file = gzip.open(input_filename, 'r')
        else:
            lofasm_input_file = open(input_filename, 'rb')

    hdr_dict = pdat.parse_file_header(lofasm_input_file)

    lofasm_station = hdr_dict[4][1]

    #get starting location (beginning of data)
    if opts.start_position < 0:
        print "Starting from location 0."
        crawler = pdat.LoFASMFileCrawler(opts.input_filename)
        crawler.open()
    else:
        print "Skipping to specified location: %i" % (opts.start_position)
        lofasm_input_file.seek(opts.start_position)  #is this still necessary?
        crawler = pdat.LoFASMFileCrawler(opts.input_filename,
                                         start_loc=opts.start_position)
        crawler.open()
        print crawler.getFileHeader()

    burst_size_bytes = opts.packet_size_bytes * 17
    filesize_bytes = pdat.get_filesize(lofasm_input_file)
    num_frames = int(
        (filesize_bytes - opts.start_position) / burst_size_bytes) - 1

    #get filesize and exit
Exemple #10
0
    nout = 0  # files written

    for inname in args.files:
        # Check input file name
        filepols = list(pols)
        splitname = os.path.splitext(inname)
        if splitname[1] != ".lofasm" and splitname[1] != '.gz':
            print "Skipping " + inname + " (not a .lofasm file)"
            continue

        EOF_REACHED = False
        subfile_id = 0

        # Open file and read metadata.
        try:
            crawler = pdat.LoFASMFileCrawler(inname, unpack_binary=args.ascii)
            crawler.open()
            header = crawler.getFileHeader()
            tstart = crawler.time_start.datetime.strftime(
                "%Y-%m-%dT%H:%M:%S.%fZ")
        except:
            print "Skipping " + inname + " (unable to read metadata)"
            continue
        nin += 1

        # Store header metadata in local variables.
        for key in header.keys():
            field = header[key]
            if field[0] == "station":
                station = field[1]
            elif field[0] == "Nbins":