Esempio n. 1
0
 def __init__(self, filname, mask=None, gulp=1024):
     """
     Creates a filterbank instance interfacing with `sigpyproc`
     """
     self.filname = filname
     self.fil = spp.FilReader(filname)
     self.h = self.fil.header
     ##
     self.gulp = gulp
     self.mask = None
Esempio n. 2
0
def worker(packed):
    """
    Generates h5 file of candidate with resized frequency-time and DM-time arrays
    :param cand_val: List of candidate parameters (fil_name, snr, width, dm, label, tcand(s))
    :type cand_val: Candidate
    :return: None

    TODO: Add option to use cand.resize for reshaping FT and DMT

    QQQ: is h5 really needed?

    """
    #####
    fil_name, cands = packed

    # read filterbank
    fil = spp.FilReader(fil_name)
    fh = fil.header

    tstart = fh['tstart']
    tsamp = fh['tsamp']
    fch1, foff, nchans = [fh[x] for x in ['fch1', 'foff', 'nchans']]

    tcands = tqdm.tqdm(cands.index, desc='Candidates', unit='cand')
    for idx in tcands:
        gc.collect()
        ret = dict()

        SIGMA = cands.sigma[idx]
        DM = cands.dm[idx]
        TIME = cands.time[idx]
        WIDTH = cands.width_units[idx]

        wfac = max(1, WIDTH // 2)

        ret['tcand'] = TIME
        ret['dm'] = DM
        ret['snr'] = SIGMA
        ret['width'] = WIDTH
        ret['cand_id'] = f'cand_tstart_{tstart:.12f}_tcand_{TIME:.7f}_dm_{DM:.5f}_snr_{SIGMA:.5f}'
        ret['label'] = None

        btdd = fetch_btdd.BTDD(fch1=fch1,
                               foff=foff,
                               nchans=nchans,
                               tsamp=tsamp * wfac,
                               ndm=DSIZE)

        btdd(DM)
        """
        NB: `FETCH:candmaker.py` which makes use of `pysigproc` to do the candidate computations
            dedisperses to the top-of-the-band and performs a center crop. 

        We take care here to match whatever we get here with whatever that gives.
        That having being said, we take a different approach, 
        we slice asymmetrically. 

        We need decimated 128 samples before the trigger => `128*wfac` samples before the trigger
        we need decimated 128 samples after the trigger and after de-dispersion/bowtie
        ==> thankfully, since we are taking care with start_slice, we just blindly take 256 samples and it will match up.
        """
        pre_take = wfac * TSIZE // 2
        post_take = wfac * (btdd.max_d + WIDTH + TSIZE // 2)

        start_sample = int((TIME / tsamp) - pre_take)
        width_sample = int(pre_take + post_take)

        ## check if start_sample is before the start of obs
        if start_sample < 0:
            width_sample = width_sample - start_sample
            start_sample = 0

        # print (f"takes pre={pre_take} post={post_take} width_sample={width_sample} delays={btdd.bt_delays[-1]} wfac={wfac}")
        # print (f" half_take={half_take} disp_sample={disp_sample} chunk_sample={chunk_sample} delays={btdd.bt_delays[-1]}")
        # print (f"delays={btdd.bt_delays[-1]} WIDTH={WIDTH} wfac={wfac} width_sample={width_sample}")

        fb = decimated_read(fil,
                            start_sample,
                            width_sample,
                            wfac,
                            ffac=1,
                            mask=MASK)
        # fb           = fil.readBlock (start_sample, width_sample)

        bt, dd = btdd.work(fb)

        ## normalize
        bt = normalize(bt)
        dd = normalize(dd)

        # fig = plt.figure ()
        # plt.imshow (bt, aspect='auto', cmap='jet', origin='lower')
        # plt.show ()
        # adsfasdf

        ## crop
        ## XXX We no longer use center_crop because we slice carefully
        #  since our slicing is symmetric, our cropping is also symmetric
        # ret['bt'] = center_crop (bt, axis=1, take=TSIZE)
        # ret['dd'] = center_crop (dd, axis=1, take=TSIZE)
        ret['bt'] = bt[:, :TSIZE]
        ## XXX we have only done time crunching until now.
        #  we do frequency crunching+cropping at the same time here
        ret['dd'] = block_reduce(dd[:, :TSIZE], (int(nchans // FSIZE), 1),
                                 func=np.mean,
                                 cval=np.median(dd))

        if ret['bt'].shape != (DSIZE, TSIZE) or ret['dd'].shape != (FSIZE,
                                                                    TSIZE):
            print(cands.loc[idx])
            print(
                f"payload.shapes bt={ret['bt'].shape} dd={ret['dd'].shape} width={WIDTH} wfac={wfac} fb={fb.shape}"
            )
            raise ValueError(" Size is not matching")

        ## save
        fetch_cands.save_cand_h5(ret, out_dir=ODIR, fil_header=fh)
        ## call the `h5plotter` if you want to plot

    return None
Esempio n. 3
0
    parser.add_argument("-o",
                        dest='prefix',
                        default='plot',
                        help="Plot file prefix")

    args = parser.parse_args()

    if args.overlap > 1:
        raise RuntimeError(f"Invalid overlap time {args.overlap} > 1")
    elif args.overlap < 0:
        raise RuntimeError(f"Invalid overlap time {args.overlap} < 0")

    if args.plot_raw == args.plot_deci == args.plot_deci_sec == False:
        raise RuntimeError("Failed to provide any task to perform. Exiting.")

    filReader = spp.FilReader(args.input)

    startingSamples = args.init
    baseTime = args.init

    samplesPerBlock = int(args.samples * (1. + args.overlap))
    samplesPerBlock += samplesPerBlock % args.deci
    print(f"We will be reading {samplesPerBlock} samples per block.")

    dataBlock = filReader.readBlock(readTimestamp, samplesPerBlock)

    if args.plot_deci:
        deciBlock = baseBlock.downsample(tfactor=args.deci)

    if args.plot_raw:
        normalInit = initFigure(dataBlock)
def handleHeimdall(folderName,
                   filLoc,
                   h5Name='output_pulses.h5',
                   dump=True,
                   dm0=55,
                   dm1=62,
                   snr0=6,
                   maxlen=512,
                   fitDM=57.61):
    filObj = spp.FilReader(filLoc)
    f0 = filObj.header.fch1
    df = filObj.header.foff
    tsamp = filObj.header.tsamp
    maxSamples = filObj.header.nsamples

    filName = filLoc.split('/')[-1]

    count = 0
    dm = []
    snr = []
    boxcar = []
    length = []

    for file in os.listdir(folderName):
        if file.split('.')[-1] == 'cand':
            with open(f'{folderName}/{file}', 'r') as currentFile:
                fileData = currentFile.readlines()
                print(file, fileData)
                for currLine in fileData:
                    if len(currLine) < 10: continue
                    lineData = list(filter(None, currLine.split('\t')))
                    print(lineData)

                    #if float(lineData[0]) < 9 and int(lineData[3]) > 8: continue
                    if float(lineData[5]) < dm0 or float(lineData[5]) > dm1:
                        continue
                    if float(lineData[0]) < snr0: continue
                    if int(lineData[8]) - int(lineData[7]) > maxlen: continue

                    count += 1
                    snr.append(float(lineData[0]))
                    dm.append(float(lineData[5]))
                    boxcar.append(2**float(lineData[3]))
                    length.append(int(lineData[8]) - int(lineData[7]))

    if not dump:
        return count, snr, dm, boxcar, length

    count = 0

    lengths, counts = np.unique(length, return_counts=True)
    combinedData = np.vstack([lengths, counts]).T

    with h5py.File(h5Name, 'w') as h5ref:
        headGroup = h5ref.create_group(f"{filObj.header.source_name}_pulses")
        #headGroup.attrs.create('sigproc_hdr', filObj.header.SPPHeader(True))
        headGroup.attrs.create('source_fil', filLoc)
        headGroup.attrs.create('source_cands', folderName)
        headGroup.attrs.create('dm0', dm0)
        headGroup.attrs.create('dm1', dm1)
        headGroup.attrs.create('snr-', snr0)
        headGroup.attrs.create('maxlen', maxlen)
        headGroup.attrs.create('fitDM', fitDM)

        for key, val in filObj.header.items():
            headGroup.attrs.create(key, val)

        for lengthVal, counts in combinedData:
            print(f"{lengthVal} samples: {counts}x")
            headGroup.create_dataset(
                f'{lengthVal}_sample_pulse',
                (filObj.header.nchans, lengthVal * 2, counts),
                compression='lzf',
                dtype=np.uint8)
            headGroup.create_dataset(f'{lengthVal}_sample_pulse_snr',
                                     (counts, ),
                                     compression='lzf',
                                     dtype=np.float16)

        lengthCounts = {length: 0 for length in lengths}
        for file in os.listdir(folderName):
            if file.split('.')[-1] == 'cand':
                with open(f'{folderName}/{file}', 'r') as currentFile:
                    fileData = currentFile.readlines()
                    print(file, fileData)
                    for currLine in fileData:
                        if len(currLine) < 10: continue
                        lineData = list(filter(None, currLine.split('\t')))
                        print(lineData)

                        #if float(lineData[0]) < 9 and int(lineData[3]) > 8: continue
                        if float(lineData[5]) < dm0 or float(
                                lineData[5]) > dm1:
                            continue
                        if float(lineData[0]) < snr0: continue
                        if int(lineData[8]) - int(lineData[7]) > maxlen:
                            continue

                        snrVal = snr[count]
                        dmVal = dm[count]
                        lengthVal = length[count]
                        count += 1

                        endSample = int(lineData[8])
                        startSample = int(lineData[7])

                        dmDelays = filObj.header.getDMdelays(fitDM)
                        endSample += dmDelays[
                            -1] + lengthVal  # double the observed length for tail investigations
                        if endSample > maxSamples:
                            endSample = maxSamples

                        idx = lengthCounts[lengthVal]
                        lengthCounts[lengthVal] += 1
                        saveData(headGroup, idx, filObj, lengthVal, snrVal,
                                 fitDM, startSample, endSample)

    return count, snr, dm, boxcar, length
Esempio n. 5
0
from scipy.signal import stft, hann


def rollingAverage(data, step=8):
    rollingSum = np.cumsum(data)
    return rollingSum[step:] - rollingSum[:-step]


def decimate(data, step=64):
    rollingSum = np.cumsum(data)
    return rollingSum[step::step] - rollingSum[:-step:step]


filename = '/mnt/ucc4_data1/data/Uranus/2021_11_30/Uranus_IE613_2021-11-30T23:00:00.000_stokesI.fil'

filReader = spp.FilReader(filename)

time = Time(filReader.header.tstart, format="mjd")
time_filReader = Time(filReader.header.tstart, format="mjd")
samplesPerBlock = filReader.header.nsamples
readTimestamp = 0
dataBlock_all = filReader.readBlock(readTimestamp, samplesPerBlock)

# P_AA = np.zeros([filReader.header.nchans,int(samplesPerBlock/4)])
# P_BB = np.zeros([filReader.header.nchans,int(samplesPerBlock/4)])
# P_AB = np.zeros([filReader.header.nchans,int(samplesPerBlock/4)])
# P_BA = np.zeros([filReader.header.nchans,int(samplesPerBlock/4)])

# for j in range(int(samplesPerBlock/4)):
#     P_AA[:,j] = dataBlock_all[:,j*4+0]
#     P_BB[:,j] = dataBlock_all[:,j*4+1]
Esempio n. 6
0
    # Integrate over time, by reshaping and summing over axis (efficient)
    x_psd = x_psd.reshape(x_psd.shape[0] // n_int, n_int, x_psd.shape[1])
    x_psd = x_psd.mean(axis=1)

    return x_psd


if __name__ == "__main__":
    import pylab as plt
    import seaborn as sns
    import sigpyproc as spp
    nTaps = 4
    nBranch = 16
    nWindowPerGulp = 1024
    nInt = 1
    rawReader = spp.FilReader("./path/to/my.fil")
    for idx, dataBlock in enuemrate(
            rawReader.readPlan(start=0,
                               nsamps=nTaps * nBranch * nWindowPerGulp)):
        for channel in dataBlock:
            dataBlock[channel, :] = pfb_spectrometer(dataBlock[channel, :],
                                                     n_taps=nTaps,
                                                     n_chan=nBranch,
                                                     n_int=nInt,
                                                     window_fn="hamming")
        # Validate, does ntaps modify this or not?
        dataBlock.header.tsamp *= nTaps * nBranch
        datablock.header.nchans *= nTaps * nBranch
        dataBlock.header.foff /= nTaps * nBranch
        #dataBlock.header.fch1 = <needs updating>
        dataBlock.toFile(f"output_{idx:03d}.fil")