Пример #1
0
def get_fs(waterfall):
    """
    Gets frequency values from filterbank file.

    Parameters
    ----------
    waterfall : str or Waterfall
        Name of filterbank file or Waterfall object

    Returns
    -------
    fs : ndarray
        Frequency values
    """
    if isinstance(waterfall, str):
        fch1 = read_header(waterfall)[b'fch1']
        df = read_header(waterfall)[b'foff']
        fchans = read_header(waterfall)[b'nchans']
    elif isinstance(waterfall, Waterfall):
        fch1 = waterfall.header[b'fch1']
        df = waterfall.header[b'foff']
        fchans = waterfall.header[b'nchans']
    else:
        sys.exit('Invalid data file!')

    return np.arange(fch1, fch1 + fchans * df, df)
Пример #2
0
def test_voyager_fix_header():
    filename = '/workdata/bl/data/voyager_f1032192_t300_v2.fil'
    new_filename = 'voyager_ext.fil'

    fb = Filterbank(filename, f_start=8420.1, f_stop=8420.3)
    fb.write_to_filterbank(new_filename)
    fb = Filterbank(new_filename)

    filename = new_filename
    assert read_header(filename)['ibeam'] == 1

    fix_header(filename, 'ibeam', 7)
    assert read_header(filename)['ibeam'] == 7

    fix_header(filename, 'ibeam', 1)
    assert read_header(filename)['ibeam'] == 1

    fix_header(filename, 'ibeam', 13)
    assert read_header(filename)['ibeam'] == 13

    pprint(read_header(filename))

    fix_header(filename, 'rawdatafile',
               './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw')
    assert read_header(filename)[
        'rawdatafile'] == './blc3_9bit_guppi_57386_VOYAGER1_0004.0000.raw'
    fix_header(filename, 'rawdatafile',
               './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw')
    assert read_header(filename)[
        'rawdatafile'] == './blc3_2bit_guppi_57386_VOYAGER1_0004.0000.raw'

    os.remove(new_filename)
Пример #3
0
def get_ts(waterfall):
    """
    Gets time values from filterbank file.

    Parameters
    ----------
    waterfall : str or Waterfall
        Name of filterbank file or Waterfall object

    Returns
    -------
    ts : ndarray
        Time values
    """
    if isinstance(waterfall, str):
        tsamp = read_header(waterfall)[b'tsamp']
    elif isinstance(waterfall, Waterfall):
        tsamp = waterfall.header[b'tsamp']
    else:
        sys.exit('Invalid fil file!')

    if isinstance(waterfall, str):
        fch1 = read_header(waterfall)[b'fch1']
        df = read_header(waterfall)[b'foff']
    else:
        fch1 = waterfall.header[b'fch1']
        df = waterfall.header[b'foff']

    waterfall0 = Waterfall(waterfall, f_start=fch1, f_stop=fch1 + df)

    try:
        tchans = get_data(waterfall0).shape[0]
    except Exception as e:
        sys.exit('No data in filterbank file!')

    return np.arange(0, tchans * tsamp, tsamp)
Пример #4
0
def split_waterfall_generator(waterfall_fn, fchans, tchans=None, f_shift=None):
    """
    Creates a generator that returns smaller Waterfall objects by 'splitting'
    an input filterbank file according to the number of frequency samples.

    Since this function only loads in data in chunks according to fchans,
    it handles very large observations well. Specifically, it will not attempt
    to load all the data into memory before splitting, which won't work when
    the data is very large anyway.

    Parameters
    ----------
    waterfall_fn : str
        Filterbank filename with .fil extension
    fchans : int
        Number of frequency samples per new filterbank file
    tchans : int, optional
        Number of time samples to select - will default from start of observation.
        If None, just uses the entire integration time
    f_shift : int, optional
        Number of samples to shift when splitting filterbank. If
        None, defaults to `f_shift=fchans` so that there is no
        overlap between new filterbank files

    Returns
    -------
    split : Waterfall
        A blimpy Waterfall object containing a smaller section of the data
    """

    fch1 = read_header(waterfall_fn)[b'fch1']
    nchans = read_header(waterfall_fn)[b'nchans']
    df = abs(read_header(waterfall_fn)[b'foff'])
    tchans_tot = Waterfall(waterfall_fn,
                           load_data=False).container.selection_shape[0]

    if f_shift is None:
        f_shift = fchans

    if tchans is None:
        tchans = tchans_tot
    elif tchans > tchans_tot:
        raise ValueError('tchans value must be less than the total number of \
                          time samples in the observation')

    # Note that df is negative!
    f_start = fch1 - fchans * df
    f_stop = fch1

    # Iterates down frequencies, starting from highest
    while f_start >= fch1 - nchans * df:
        waterfall = Waterfall(waterfall_fn,
                              f_start=f_start,
                              f_stop=f_stop,
                              t_start=0,
                              t_stop=tchans)

        yield waterfall

        f_start -= f_shift * df
        f_stop -= f_shift * df
Пример #5
0
fn = '/datax/scratch/bbrzycki/data/blc00_guppi_58331_12383_DIAG_SGR_B2_0014.gpuspec.0000.fil'

##### Search! #####

notable_hits = {
    'choppy_rfi': [],
    'constant': [],
    'noise': [],
    'scintillated': [],
    'unclear': []
}

with open('notable_hits.json', 'w') as f:
    json.dump(notable_hits, f)

fch1 = read_header(fn)[b'fch1']
nchans = read_header(fn)[b'nchans']

notable_indices = []

for index in range(0, int(nchans / fchans)):
    f_stop = fch1 + index * fchans * df
    f_start = fch1 + (index + 1) * fchans * df
    frame = stg.get_data(Waterfall(fn, f_start=f_start, f_stop=f_stop))
    normalized = stg.normalize(frame, cols = 128, exclude = 0.2, use_median=False)
    
    # Predict each frame
    plt.imsave('temp_normalized.png', normalized)

    img = load_img('temp_normalized.png',False,target_size=(32, 1024))
    x = img_to_array(img)