Esempio n. 1
0
    def prepare_file( self, file ):

        """
        Prepares the PSRFITS file in the correct format for the program.
        """

        try:
            hdul = fits.open( file )
        except OSError:
            return -1

        name = hdul[0].header[ 'SRC_NAME' ]
        fe = hdul[0].header[ 'FRONTEND' ]
        if hdul[0].header[ 'OBS_MODE' ] != "PSR" or name != self.psr_name or fe != self.frontend:
            hdul.close()
            return -1
        hdul.close()

        ar = Archive( file, verbose = self.verbose )
        ar.tscrunch( nsubint = self.subbands )
        nsubint = ar.getNsubint()
        ar.fscrunch( nchan = 1 )
        nsubint = ar.getNsubint()
        nbin = ar.getNbin()
        data = ar.getData()

        return np.copy( data ), nsubint, nbin
Esempio n. 2
0
    def __init__(self,
                 filename,
                 template,
                 directory=None,
                 SNLim=3000,
                 verbose=False):
        '''
        Initializes all archives and parameters in the data cube for a given file.
        Also requires a template to be parsed in.
        A custom signal / noise lower bound can also be set on initialization but
        the default is 3000. This will exit the current archive if the SNR is
        lower than the threshold.
        One can also set whether long arrays and other bits of console text
        are to be printed in full or in shorthand.
        '''

        if verbose:
            print("Initializing DataCull object...")

        self.SNError = False

        # Parse directory in string or choose CWD if no directory given
        if directory == None:
            self.directory = str(os.getcwd())
        else:
            self.directory = str(directory)

        # Parse filename
        if os.path.isfile(self.directory + filename):
            self.filename = str(filename)
        else:
            raise FileNotFoundError(
                "File {} not found in this directory...".format(filename))

        # Load the template
        self.template = self._loadTemplate(template)

        # Parse verbose option
        self.verbose = verbose

        # Parse SNLim
        self.SNLim = SNLim

        # Load the file in the archive
        self.ar = Archive(self.__str__(), verbose=self.verbose)

        # Togglable print options
        if self.verbose:
            np.set_printoptions(threshold=np.inf)

        # Check if Signal / Noise is too low
        if self.ar.getSN() < SNLim:
            if self.verbose:
                print("Signal / Noise ratio is way too low. (Below {})".format(
                    SNLim))
                print("Data set to be thrown out...")
            self.SNError = True

        # Load the data cube for the file
        self.data = self.ar.getData()
Esempio n. 3
0
    def __init__(self, file, cont_name, cont_fits_dir, verbose=False):
        self.file = file
        self.cont_name = cont_name
        self.cont_fits_dir = cont_fits_dir
        self.verbose = verbose
        # Change these to match filename format...
        self.obs_num = self.file[-14:-10]
        self.num = self.file[-9:-5]

        self.ar = Archive(self.file, prepare=True, verbose=self.verbose)
        self.mjd = self.ar.getMJD()
        self.fe = self.ar.getFrontend()
Esempio n. 4
0
def getTemplate(PSR, rcvr):
    if rcvr == "Rcvr1_2" or rcvr == "Rcvr_800":
        dirname = "guppi2"
        be = "GUPPI"
    elif rcvr == "L-wide" or rcvr == "S0wide" or rcvr == "430" or rcvr == "327":
        dirname = "puppi"
        be = "PUPPI"
    tempfilename = "/nanograv/releases/11y/%s/%s/%s.%s.%s.11y.x.sum.sm" % (
        dirname, PSR, PSR, rcvr, be)
    ar = Archive(tempfilename, verbose=False)
    sptemp = SinglePulse(u.normalize(ar.getData(), simple=True),
                         windowsize=256,
                         period=ar.getPeriod())
    return sptemp
Esempio n. 5
0
    def _loadArchive(self):
        '''
        Loads the archive from the PyPulse Archive class and initializes the main parameters.
        '''

        loadedArchive = Archive(self.directory + self.file, verbose=False)

        return loadedArchive
Esempio n. 6
0
def apply_cal_factor_to_prof(dir,
                             psr_file,
                             cal_factor,
                             rfi_mitigation=False,
                             threshold=3,
                             setdata=True):

    file = dir + psr_file
    ar = Archive(file, verbose=False)
    if rfi_mitigation:
        rfi_mit = RFIMitigator(ar)
        rfi_mit.zap_minmax(threshold=threshold)

    nchan = ar.getNchan()
    nsub = ar.getNsubint()

    data = ar.getData()
    converted_data_aa, converted_data_bb = IQUV_to_AABB(data,
                                                        basis="cartesian")

    new_converted_aa, new_converted_bb, new_converted, new_data = np.empty(
        nsub, dtype=object), np.empty(nsub, dtype=object), np.empty(
            nsub, dtype=object), np.empty(nsub, dtype=object)
    for i, sub in enumerate(np.arange(nsub)):
        new_converted_aa[i] = row_multiply(converted_data_aa[i], cal_factor[0])
        new_converted_bb[i] = row_multiply(converted_data_bb[i], cal_factor[1])
        new_converted[i] = np.array((new_converted_aa[i], new_converted_bb[i]))
        new_data[i] = AABB_to_IQUV(new_converted[i], basis="cartesian")

    if setdata:
        ar.setData(new_data)

    return new_data, ar
Esempio n. 7
0
    def prepare_file(self, file, do_fit=False):
        """
        Prepares the PSRFITS file in the correct format for the program.
        """

        try:
            hdul = fits.open(file)
        except OSError:
            return -1

        name = hdul[0].header['SRC_NAME']
        fe = hdul[0].header['FRONTEND']
        mjd = hdul[0].header['STT_IMJD']
        if hdul[0].header['OBS_MODE'] != "PSR" or name != self.psr_name:
            hdul.close()
            return -1
        hdul.close()

        tmp_fn = "{0}_{1}_nchan1_template.npy".format(self.psr_name, fe)
        try:
            template = self.load_template(self.temp_dir, tmp_fn)
        except TemplateLoadError:
            print("Template not found")
            reply = str(
                input("Would you like to make a suitable one? ('y' for yes)")
            ).lower().strip()
            if reply[0] == 'y':
                temp = FD_Template(self.psr_name,
                                   fe,
                                   1,
                                   template_dir="templates",
                                   verbose=self.verbose,
                                   *self.dirs)
                template = temp.make_template(gaussian_fit=do_fit)
            else:
                raise TemplateLoadError(
                    "You can make a suitable template via the following command: python template_builder.py psr_name -b [frontend] -d [dirs]"
                )

        ar = Archive(file, verbose=self.verbose)
        if self.epoch_average:
            ar.tscrunch(nsubint=1)

        return ar, template, fe, mjd
Esempio n. 8
0
 def __init__(self,
              file,
              template,
              method='chauvenet',
              nn_params=None,
              verbose=False,
              **kwargs):
     self.file = file
     if "cal" in self.file:
         raise ValueError(f"File {self.file} is not in PSR format.")
     elif "59071" in self.file:
         raise ValueError(f"Not doing 59071...")
     self.method = method
     self.verbose = verbose
     self.ar = Archive(file, verbose=False)
     if method != 'NN':
         _, self.template = u.get_data_from_asc(template)
         self.opw = u.get_1D_OPW_mask(self.template, windowsize=128)
         self.omit, self.rms_mu, self.rms_sigma = self.get_omission_matrix(
             **kwargs)
         unique, counts = np.unique(self.omit, return_counts=True)
         print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
         print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
     elif nn_params != None:
         df = pd.DataFrame(
             np.reshape(self.ar.getData(),
                        (self.ar.getNsubint() * self.ar.getNchan(),
                         self.ar.getNbin())))
         scaler = MinMaxScaler()
         scaled_df = scaler.fit_transform(df.iloc[:, :])
         scaled_df = pd.DataFrame(scaled_df)
         self.x = scaled_df.iloc[:, :].values.transpose()
         self.nn = NeuralNet(self.x, np.array([[0], [0]]))
         self.nn.dims = [self.ar.getNbin(), 512, 10, 13, 8, 6, 6, 4, 4, 1]
         self.nn.threshold = 0.5
         self.nn.load_params(root=nn_params)
         self.omit = self.nn_get_omission()
         np.set_printoptions(threshold=sys.maxsize)
         unique, counts = np.unique(self.omit, return_counts=True)
         print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
         print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
     else:
         sys.exit()
Esempio n. 9
0
    def calculate_Jy_per_count( self, cal_file_list ):

        """
        Input list: [ PSR_CAL, ON_CAL, OFF_CAL, CAL_MJD ]

        Returns:
        conversion_factor  :  np.ndarray
        """

        G = 11.0

        if type( cal_file_list ) != np.ndarray:
            cal_file_list = np.array( cal_file_list )

        if cal_file_list.ndim != 1:
            raise ValueError( "Should be a vector" )

        archives = []
        freqs = []
        for f in cal_file_list[:-1]:
            hdul = fits.open( f )
            freqs.append( hdul[3].data[ 'DAT_FREQ' ][0] )
            hdul.close()
            archives.append( Archive( f, verbose = self.verbose ) )


        aabb_list = []

        for i, arc in enumerate( archives ):
            arc.tscrunch()
            A, B, C, D = self.convert_subint_pol_state( arc.getData(), arc.subintheader[ 'POL_TYPE' ], "AABBCRCI", linear = arc.header[ 'FD_POLN' ] )
            l = { 'ARC' : arc, 'DATA' : [ A, B ], 'FREQS' : freqs[i], 'S_DUTY' : arc.getValue( 'CAL_PHS' ) , 'DUTY' : arc.getValue( 'CAL_DCYC' ), 'BW' : arc.getBandwidth() }
            aabb_list.append( l )


        H, L, T0 = self._prepare_calibration( aabb_list )
        F_ON = ( H[1]/L[1] ) - 1
        F_OFF = ( H[2]/L[2] ) - 1

        C0 = T0[1:] / ( ( 1 / F_ON ) - ( 1 / F_OFF ) )
        T_sys = C0 / F_OFF
        F_cal = ( T_sys * F_OFF ) / G


        Fa, Fb = interp1d( aabb_list[1][ 'FREQS' ], F_cal[0][0], kind='cubic', fill_value = 'extrapolate' ), interp1d( aabb_list[2][ 'FREQS' ], F_cal[0][1], kind='cubic', fill_value = 'extrapolate' )

        conversion_factor = [ np.array(Fa( aabb_list[0][ 'FREQS' ] ) / ( H[0][0] - L[0][0] )), np.array( Fb( aabb_list[0][ 'FREQS' ] ) / ( H[0][1] - L[0][1] ) ) ]
        conversion_factor = np.array( conversion_factor )

        return conversion_factor
Esempio n. 10
0
def apply_cal_factor_to_prof(dir,
                             psr_file,
                             cal_factor,
                             rfi_mitigation=False,
                             threshold=3,
                             setdata=True):

    cal_fa = vector_to_diagonal(cal_factor[0])
    cal_fb = vector_to_diagonal(cal_factor[1])

    file = dir + psr_file
    ar = Archive(file, verbose=False)
    if rfi_mitigation:
        rfi_mit = RFIMitigator(ar)
        rfi_mit.zap_minmax(threshold=threshold)

    nchan = ar.getNchan()
    nsub = ar.getNsubint()

    data = ar.getData(squeeze=False)
    print(data.shape)
    print(cal_fa.shape)

    converted_data = IQUV_to_AABB(data, basis="cartesian")
    print(converted_data.shape)
    exit()

    new_converted = []
    new_data = []
    s = np.array(s)
    aa = np.dot(converted_data, cal_fa)
    bb = np.dot(s, cal_fb)
    new_converted.append((aa, bb))
    new_data.append(AABB_to_IQUV(new_converted[i], basis="cartesian"))

    print("Almost done")

    a = np.array(new_converted)
    new_data = np.array(new_data)
    print(new_data.shape)

    if setdata:
        ra = ar.setData(new_data)

    return new_data, ra
Esempio n. 11
0
#!/usr/local/bin/python3
# Plots pulsar time series

import sys
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from pypulse.archive import Archive
from physics import calculate_rms_matrix

if __name__ == "__main__":

    file = sys.argv[1]
    ar = Archive(file, prepare=True)
    ch = ar.getNsubint()
    chan = ar.getNchan()

    def on_key(event):
        print(event.key, math.floor(event.xdata), math.floor(event.ydata))
        if event.key == 'z':
            with open(f'../Zap/{file[6:20]}_lbw_{file[-9:-5]}_2048.zap',
                      'a+') as t:
                t.write(
                    f'{math.floor(event.xdata)} {ar.freq[math.floor(event.xdata)][math.floor(event.ydata)]}\n'
                )
        elif event.key == 'r':
            with open(f'../Zap/{file[6:20]}_lbw_{file[-9:-5]}_2048.zap',
                      'a+') as t:
                for n in range(ch):
                    t.write(f'{n} {ar.freq[n][math.floor(event.ydata)]}\n')
Esempio n. 12
0
def load_profile(filename):
    ar = Archive(filename, verbose=False)
    return SinglePulse(ar.getData(), windowsize=256)
Esempio n. 13
0
def get_AABB_Fcal(dir, continuum_on, continuum_off, args, G=10.0, T0=1.0):

    ON, OFF = dir + continuum_on, dir + continuum_off

    if args.freq_zap is not None:
        for i, arg in enumerate(args.freq_zap):
            args.freq_zap[i] = int(args.freq_zap[i])

    ar_on, ar_off = Archive(ON, verbose=False), Archive(OFF, verbose=False)
    rfi_on, rfi_off = RFIMitigator(ar_on), RFIMitigator(ar_off)
    s_duty_on, s_duty_off = ar_on.getValue("CAL_PHS"), ar_off.getValue(
        "CAL_PHS")
    duty_on, duty_off = ar_on.getValue("CAL_DCYC"), ar_off.getValue("CAL_DCYC")
    nchan_on, nchan_off = ar_on.getNchan(), ar_off.getNchan()
    npol_on, npol_off = ar_on.getNpol(), ar_off.getNpol()
    nbin_on, nbin_off = ar_on.getNbin(), ar_off.getNbin()
    BW_on, BW_off = ar_on.getBandwidth(), ar_off.getBandwidth()
    CTR_FREQ_on, CTR_FREQ_off = ar_on.getCenterFrequency(
        weighted=True), ar_off.getCenterFrequency(weighted=True)
    ar_on.tscrunch()
    ar_off.tscrunch()

    if args.freq_zap is not None:
        if len(args.freq_zap) == 1:
            if args.channel_space:
                rfi_on.zap_channels(args.freq_zap)
                rfi_off.zap_channels(args.freq_zap)
            else:
                print(
                    "No zapping occurred (tried to zap channels in frequency space). Carrying on with calibration..."
                )
        elif len(args.freq_zap) == 2 and not args.channel_space:
            rfi_on.zap_frequency_range(args.freq_zap[0], args.freq_zap[1])
            rfi_off.zap_frequency_range(args.freq_zap[0], args.freq_zap[1])
        else:
            rfi_on.zap_channels(args.freq_zap)
            rfi_off.zap_channels(args.freq_zap)

    data_on, data_off = ar_on.getData(squeeze=True), ar_off.getData(
        squeeze=True)

    converted_data_on = IQUV_to_AABB(data_on, basis="cartesian")
    converted_data_off = IQUV_to_AABB(data_off, basis="cartesian")

    # Initialize the continuum data. SUBINT, POL,
    continuum_on_source, high_on_mean, low_on_mean = np.zeros(
        (2, nchan_on, nbin_on)), np.zeros((2, nchan_on)), np.zeros(
            (2, nchan_on))
    continuum_off_source, high_off_mean, low_off_mean = np.zeros(
        (2, nchan_off, nbin_off)), np.zeros((2, nchan_off)), np.zeros(
            (2, nchan_off))
    f_on, f_off, C0 = np.zeros_like(high_on_mean), np.zeros_like(
        high_off_mean), np.zeros_like(high_off_mean)
    T_sys = np.zeros_like(C0)
    F_cal = np.zeros_like(T_sys)

    # Load the continuum data
    for i in np.arange(2):
        for j in np.arange(nchan_on):

            continuum_on_source[i][j], high_on_mean[i][j], low_on_mean[i][
                j] = prepare_cal_profile(converted_data_on[0][i][j], s_duty_on,
                                         duty_on)
            continuum_off_source[i][j], high_off_mean[i][j], low_off_mean[i][
                j] = prepare_cal_profile(converted_data_off[0][i][j],
                                         s_duty_off, duty_off)

            f_on[i][j] = (high_on_mean[i][j] / low_on_mean[i][j]) - 1
            f_off[i][j] = (high_off_mean[i][j] / low_off_mean[i][j]) - 1

            if np.isnan(f_on[i][j]):
                f_on[i][j] = 1
            if np.isnan(f_on[i][j]):
                f_off[i][j] = 1

            C0[i][j] = T0 / ((1 / f_on[i][j]) - (1 / f_off[i][j]))
            T_sys[i][j] = C0[i][j] / f_off[i][j]
            F_cal[i][j] = (T_sys[i][j] *
                           f_off[i][j]) / G  # F_cal has units Jy / cal

            if np.isnan(F_cal[i][j]):
                F_cal[i][j] = 0

    frequencies_on_off = chan_to_freq(CTR_FREQ_on, BW_on, nchan_on)

    f1, f2 = interp1d(frequencies_on_off,
                      F_cal[0],
                      kind='cubic',
                      fill_value='extrapolate'), interp1d(
                          frequencies_on_off,
                          F_cal[1],
                          kind='cubic',
                          fill_value='extrapolate')

    return f1, f2
Esempio n. 14
0
def get_Jy_per_count(dir, psr_cal_file, fitAA, fitBB):

    file = dir + psr_cal_file

    ar = Archive(file, verbose=False)
    rfi = RFIMitigator(ar)
    ar.tscrunch()
    s_duty = ar.getValue("CAL_PHS")
    duty = ar.getValue("CAL_DCYC")
    nchan = ar.getNchan()
    npol = ar.getNpol()
    nbin = ar.getNbin()
    BW = ar.getBandwidth()
    data = ar.getData()
    CTR_FREQ = ar.getCenterFrequency(weighted=True)

    converted_data = IQUV_to_AABB(data, basis="cartesian")

    frequencies = chan_to_freq(CTR_FREQ, BW, nchan)
    psr_cal, high_psr, low_psr = np.zeros((2, nchan, nbin)), np.zeros(
        (2, nchan)), np.zeros((2, nchan))
    for i in np.arange(2):
        for j in np.arange(nchan):
            psr_cal[i][j], high_psr[i][j], low_psr[i][j] = prepare_cal_profile(
                converted_data[0][i][j], s_duty, duty)

    # Calculate jy_per_count{p, f}
    jy_per_count_factor = np.zeros_like(high_psr)
    # for i in np.arange( 2 ):
    for j in np.arange(nchan):
        jy_per_count_factor[0][j] = fitAA(frequencies[j]) / (
            high_psr[0][j] - low_psr[0][j])  # A has units Jy / count

    for j in np.arange(nchan):
        jy_per_count_factor[1][j] = fitBB(frequencies[j]) / (
            high_psr[1][j] - low_psr[1][j])  # A has units Jy / count

    return jy_per_count_factor
def load_archive(file, tscrunch=False):

    ar = Archive(file, verbose=False)
    if tscrunch:
        ar.tscrunch(nsubint=4)
        #ar.imshow()
    name = ar.getName()
    mjd = int(ar.getMJD())
    fe = ar.getFrontend()
    nbin = ar.getNbin()
    data = ar.getData().reshape((ar.getNchan() * ar.getNsubint(), nbin))

    return name, mjd, fe, nbin, data
Esempio n. 16
0
class Zap():
    """
    Master class for zapping data.
    Requires:

    file        -       .FITS (must be PSRFITS v5+ format)

    Optional:

    template    -       ASCII format:       BIN#    Flux           (Required if not doing NN exicison)
    method      -       Either 'chauvenet', 'DMAD' or 'NN'
    verbose     -       Prints more information to the console
    **kwargs    -       Get parsed to plot.histogram_and_curves() or
    """
    def __init__(self,
                 file,
                 template,
                 method='chauvenet',
                 nn_params=None,
                 verbose=False,
                 **kwargs):
        self.file = file
        if "cal" in self.file:
            raise ValueError(f"File {self.file} is not in PSR format.")
        elif "59071" in self.file:
            raise ValueError(f"Not doing 59071...")
        self.method = method
        self.verbose = verbose
        self.ar = Archive(file, verbose=False)
        if method != 'NN':
            _, self.template = u.get_data_from_asc(template)
            self.opw = u.get_1D_OPW_mask(self.template, windowsize=128)
            self.omit, self.rms_mu, self.rms_sigma = self.get_omission_matrix(
                **kwargs)
            unique, counts = np.unique(self.omit, return_counts=True)
            print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
            print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
        elif nn_params != None:
            df = pd.DataFrame(
                np.reshape(self.ar.getData(),
                           (self.ar.getNsubint() * self.ar.getNchan(),
                            self.ar.getNbin())))
            scaler = MinMaxScaler()
            scaled_df = scaler.fit_transform(df.iloc[:, :])
            scaled_df = pd.DataFrame(scaled_df)
            self.x = scaled_df.iloc[:, :].values.transpose()
            self.nn = NeuralNet(self.x, np.array([[0], [0]]))
            self.nn.dims = [self.ar.getNbin(), 512, 10, 13, 8, 6, 6, 4, 4, 1]
            self.nn.threshold = 0.5
            self.nn.load_params(root=nn_params)
            self.omit = self.nn_get_omission()
            np.set_printoptions(threshold=sys.maxsize)
            unique, counts = np.unique(self.omit, return_counts=True)
            print(f"Good channels: {100*(counts[0]/sum(counts)):.3f}%")
            print(f"Bad channels: {100*(counts[1]/sum(counts)):.3f}%")
        else:
            sys.exit()

    def nn_get_omission(self):
        pred = np.around(np.squeeze(self.nn.pred_data(self.x, False)),
                         decimals=0).astype(np.int)
        pred = np.reshape(pred, (self.ar.getNsubint(), self.ar.getNchan()))

        return pred

    def get_omission_matrix(self, **kwargs):

        rms, lin_rms, mu, sigma = u.rms_arr_properties(
            self.ar.getData(), self.opw, 1.0)  # Needs to input 2D array

        # Creates the histogram
        plot.histogram_and_curves(
            lin_rms,
            mean=mu,
            std_dev=sigma,
            bins=(self.ar.getNchan() * self.ar.getNsubint()) // 4,
            x_axis='Root Mean Squared',
            y_axis='Frequency Density',
            title=r'$M={},\ \sigma={}$'.format(mu, sigma),
            **kwargs)

        if self.method == 'chauvenet':
            rej_arr = physics.chauvenet(rms,
                                        median=mu,
                                        std_dev=sigma,
                                        threshold=2.0)
        elif self.method == 'DMAD':
            rej_arr = physics.DMAD(lin_rms, threshold=3.5)
            rej_arr = np.reshape(rej_arr,
                                 (self.ar.getNsubint(), self.ar.getNchan()))

        if self.verbose:
            print("Rejection criterion created.")

        return rej_arr, mu, sigma

    def plot_mask(self, **kwargs):

        fig = plt.figure(figsize=(7, 7))
        ax = fig.add_subplot(111)
        ax.imshow(self.omit.T,
                  cmap=plt.cm.gray,
                  interpolation='nearest',
                  aspect='auto')
        plt.show()

    def save_training_set(self, val_size=0.2):
        # From Chauvenet or DMAD. 1 is bad channel

        with open(
                f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training',
                'w') as t:
            t.write(
                f'# Training set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n'
            )
        with open(
                f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation',
                'w') as t:
            t.write(
                f'# Validation set for {self.ar.getName()} taken on {int(self.ar.getMJD())} at {self.ar.getFrontend()}\n'
            )

        ps_0 = np.zeros(2049)[np.newaxis, :]
        ps_1 = np.zeros(2049)[np.newaxis, :]

        d = self.ar.getData().reshape(
            (self.ar.getNchan() * self.ar.getNsubint(), self.ar.getNbin()))
        omission = self.omit.reshape(
            (self.ar.getNchan() * self.ar.getNsubint()))

        i = 1
        for omit, profile in zip(omission, d):
            try:
                choice = int(omit)
                if choice == 1:
                    choice = 0
                elif choice == 0:
                    choice = 1
            except ValueError:
                choice = -1

            print(i, end='\r')

            if choice != -1:
                # Creates the profile / choice pairs and doubles up with the reciprocal profiles.
                p = np.append(profile, choice)
                #inv_p = np.append( -1*profile, choice )
                if choice == 0:
                    ps_0 = np.append(ps_0, p[np.newaxis, :], axis=0)
                else:
                    ps_1 = np.append(ps_1, p[np.newaxis, :], axis=0)

            i += 1

        ps_0, ps_1 = np.delete(ps_0, 0, 0), np.delete(ps_1, 0, 0)

        # Sort into training / validation sets
        train, validation = train_test_split(ps_0, test_size=val_size)
        ones_t, ones_v = train_test_split(ps_1, test_size=val_size)
        train, validation = np.append(train, ones_t,
                                      axis=0), np.append(validation,
                                                         ones_v,
                                                         axis=0)

        np.random.shuffle(train), np.random.shuffle(validation)

        for k in train:
            with open(
                    f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.training',
                    'a') as t:
                np.savetxt(t, k, fmt='%1.5f ', newline='')
                t.write("\n")
                #np.savetxt( t, inv_p, fmt = '%1.5f ', newline = '' )
                #t.write( "\n" )

        for k in validation:
            with open(
                    f'{self.ar.getName()}_{int(self.ar.getMJD())}_{self.ar.getFrontend()}_{self.ar.getNbin()}.validation',
                    'a') as t:
                np.savetxt(t, k, fmt='%1.5f ', newline='')
                t.write("\n")

    # Save as ASCII text file
    def save(self, outroot="zap_out", ext='.ascii'):
        outfile = outroot + ext
        with open(outfile, 'w+') as f:
            for i, t in enumerate(self.omit):
                for j, rej in enumerate(t):
                    if rej == True:
                        f.write(str(i) + " " + str(self.ar.freq[i][j]) + "\n")
                        #f.write( f'{k} {self.ar.freq[k][i]}\n' )
        return outfile
Esempio n. 17
0
class Cal():
    def __init__(self, file, cont_name, cont_fits_dir, verbose=False):
        self.file = file
        self.cont_name = cont_name
        self.cont_fits_dir = cont_fits_dir
        self.verbose = verbose
        # Change these to match filename format...
        self.obs_num = self.file[-14:-10]
        self.num = self.file[-9:-5]

        self.ar = Archive(self.file, prepare=True, verbose=self.verbose)
        self.mjd = self.ar.getMJD()
        self.fe = self.ar.getFrontend()

    def get_onoff_data(self, tolerance=1):

        try:
            dat = np.genfromtxt("../logs/on_off.log", dtype='str')
            on, off, frontend, date, n = dat[:,
                                             0], dat[:,
                                                     1], dat[:,
                                                             2], dat[:,
                                                                     3], dat[:,
                                                                             4]
        except OSError:
            on, off, frontend, date, n = [], [], [], [], []
            l = []

            # Get accurate co-ordinates for source
            pos, params = flux.find_source_params_f2(self.cont_name)
            m_coordinates = SkyCoord(f"{pos[0]} {pos[1]}",
                                     unit=(unit.hourangle, unit.degree))

            for f in sorted(os.listdir(self.cont_fits_dir)):
                continuum_file = os.path.join(self.cont_fits_dir, f)
                try:
                    fe, mjd, cont_ra, cont_dec = u.find_fe_mjd_ra_dec(
                        continuum_file, "CAL")
                except Exception:
                    continue

                obs_num = continuum_file[-9:-5]

                # Get co-ordinates from FITS file in correct units
                coords = SkyCoord(f"{cont_ra} {cont_dec}",
                                  unit=(unit.hourangle, unit.degree))

                # Enter bizarre Henryk mind space (but tell me it doesn't work)
                l.append({
                    'MJD': mjd,
                    'ON': None,
                    'OFF': None,
                    'FE': fe,
                    'NUM': obs_num
                })

                # Determine if co-ordinates signify ON or OFF source observation. Tolerance in arcminutes.
                if m_coordinates.separation(coords) <= (tolerance *
                                                        unit.arcmin):
                    mode = 'ON'
                else:
                    mode = 'OFF'

                # Set the filename to the correct flag in the dictionary
                for dict in l:
                    if (dict['MJD'] == mjd) and (dict['FE'] == fe) and (
                            dict['NUM'] == obs_num) and (dict[mode] is None):
                        dict[mode] = f

            # Delete the excess rows that got created
            for dict in reversed(l):
                if (dict['ON'] is None) or (dict['OFF'] is None):
                    l.remove(dict)

            for d in l:
                on.append(d['ON'])
                off.append(d['OFF'])
                frontend.append(d['FE'])
                date.append(d['MJD'])
                n.append(d['NUM'])

            if self.verbose:
                print("Saving to logs/on_off.log")

            with open("../logs/on_off.log", "w") as f:
                data = np.array([on, off, frontend, date, n]).T
                np.savetxt(f, data, fmt="%s")

        # Returns everything as iterables
        return on, off, frontend, date, n

    def closest_continuum2psrcal(self, mjd_tol=400):

        try:
            dat = np.genfromtxt("../logs/psrcal2continuum.log", dtype='str')
            p, c_on, c_off, m, c_m, fronts = dat[:,
                                                 0], dat[:,
                                                         1], dat[:,
                                                                 2], dat[:,
                                                                         3], dat[:,
                                                                                 4], dat[:,
                                                                                         5]
        except OSError:
            # Then make the table

            on, off, frontend, date, _ = self.get_onoff_data(tolerance=1)
            p, c_on, c_off, m, c_m, fronts = [], [], [], [], [], []

            for psr_cal in sorted(os.listdir(os.getcwd())):
                mjd_list = []
                try:
                    psr_fe, psr_mjd = u.find_fe_mjd(psr_cal, "CAL")
                except OSError:
                    continue

                if (psr_mjd == -1) or (psr_fe == -1):
                    continue

                # Compare psr_cal file with continuum files
                for cont_on, cont_off, cont_fe, cont_mjd in zip(
                        on, off, frontend, date):
                    if cont_fe == psr_fe:
                        delta_mjd = abs(int(cont_mjd) - psr_mjd)
                        if all(elem > delta_mjd for elem in mjd_list):
                            mjd_list.append(delta_mjd)
                            if delta_mjd < mjd_tol:
                                continuum_on = str(
                                    os.path.join(self.cont_fits_dir, cont_on))
                                continuum_off = str(
                                    os.path.join(self.cont_fits_dir, cont_off))
                                if psr_cal in p:
                                    pind = p.index(psr_cal)
                                    if delta_mjd < abs(
                                            int(c_m[pind]) - psr_mjd):
                                        p[pind] = psr_cal
                                else:
                                    p.append(psr_cal)
                                    m.append(psr_mjd)
                                    c_m.append(cont_mjd)
                                    c_on.append(continuum_on)
                                    c_off.append(continuum_off)
                                    fronts.append(psr_fe)
                            else:
                                continuum_on, continuum_off = None, None

            if self.verbose:
                print("Saving to logs/psrcal2continuum.log")

            with open("../logs/psrcal2continuum.log", "w") as f:
                data = np.array([p, c_on, c_off, m, c_m, fronts]).T
                np.savetxt(f, data, fmt="%s")

        return p, c_on, c_off, m, c_m, fronts

    def jpc(self, G=11.0, **kwargs):

        psr_cal, cont_on, cont_off, psr_mjd, cont_mjd, fe = self.closest_continuum2psrcal(
        )
        freq = self.ar.freq[0]

        inds = [i for i, n in enumerate(psr_mjd) if int(n) == int(self.mjd)]
        ind = None
        for i in inds:
            if fe[i] == self.fe:
                ind = i

        psr_ar = Archive(psr_cal[ind], prepare=False, verbose=False)
        cont_on_ar = Archive(cont_on[ind], prepare=False, verbose=False)
        cont_off_ar = Archive(cont_off[ind], prepare=False, verbose=False)

        cal_arc_list = []

        np.set_printoptions(threshold=sys.maxsize)
        for ar in [psr_ar, cont_on_ar, cont_off_ar]:
            ar.tscrunch()
            fr = ar.freq[0]
            data = ar.getData()
            print(data.shape)
            # The following commented lines are for when certain frequencies are missing
            if (ar is cont_on_ar) or (ar is cont_off_ar):
                # if self.fe == '430':
                #     data = np.delete( data, slice(48, 56), 1 )
                #     fr = np.delete( fr, slice(48, 56), 0 )
                if self.fe == 'lbw':
                    #
                    #data = np.delete( data, slice(0, 64), 1 )
                    #fr = np.delete( fr, slice(0, 64), 0 )
                    #data = np.delete( data, slice(192, 256), 1 )
                    #fr = np.delete( fr, slice(192, 256), 0 )
                    data = np.delete(data, slice(256, 320), 1)
                    fr = np.delete(fr, slice(256, 320), 0)
                    #data = np.delete( data, slice(320, 384), 1 )
                    #fr = np.delete( fr, slice(320, 384), 0 )
                #print(fr)
                # if self.fe == '430':
                #     for i in range(8):
                #         data = np.insert( data, 48, np.zeros((2048)), axis = 1 )
                #     fr = np.insert( fr, 48, np.linspace( 455.0, 467.5, 8, endpoint = False ), 0 )
                # elif self.fe == 'lbw':
                #     for i in range(64):
                #         data = np.insert( data, 384, np.zeros((2048)), axis = 1 )
                #     fr = np.insert( fr, 384, np.linspace( 1180.0, 1080.0, 64, endpoint = False ), 0 )

            cal_arc_list.append({
                'ARC': ar,
                'DATA': data,
                'FREQS': fr,
                'S_DUTY': ar.getValue('CAL_PHS'),
                'DUTY': ar.getValue('CAL_DCYC'),
                'BW': ar.getBandwidth()
            })

        cal_fluxes = flux.get_fluxes(freq / 1000, self.cont_name, **kwargs)

        # Sometimes the calibration happens the opposite way, in which case this would be H, L
        L, H = self._prepare_calibration(cal_arc_list)

        F_ON = (H[1] / L[1]) - 1
        F_OFF = (H[2] / L[2]) - 1

        C0 = cal_fluxes / ((1 / F_ON) - (1 / F_OFF))
        T_sys = C0 / F_OFF
        F_cal = (T_sys * F_OFF) / G
        F_cal = np.nan_to_num(F_cal, nan=0.0)

        # Plots F_cal if first file in series (for checking purposes)
        if self.num == '0001':
            for f in F_cal:
                plt.plot(f)
            plt.show()

        Fa, Fb = interpolate.interp1d(
            cal_arc_list[1]['FREQS'],
            F_cal[0],
            kind='cubic',
            fill_value='extrapolate'), interpolate.interp1d(
                cal_arc_list[2]['FREQS'],
                F_cal[1],
                kind='cubic',
                fill_value='extrapolate')
        Fcr, Fci = interpolate.interp1d(
            cal_arc_list[1]['FREQS'],
            F_cal[2],
            kind='cubic',
            fill_value='extrapolate'), interpolate.interp1d(
                cal_arc_list[2]['FREQS'],
                F_cal[3],
                kind='cubic',
                fill_value='extrapolate')

        aa = Fa(cal_arc_list[0]['FREQS']) / (H[0][0] - L[0][0])
        bb = Fb(cal_arc_list[0]['FREQS']) / (H[0][1] - L[0][1])
        cr = Fcr(cal_arc_list[0]['FREQS']) / (H[0][2] - L[0][2])
        ci = Fci(cal_arc_list[0]['FREQS']) / (H[0][3] - L[0][3])

        with open(
                f"../cal/{self.ar.getName()}_{self.fe}_{int(psr_mjd[ind])}_{self.obs_num}_{self.num}.cal",
                "w") as f:
            data = np.array([freq, aa, bb, cr, ci]).T
            np.savetxt(f, data)

        return freq, aa, bb, cr, ci

    def _prepare_calibration(self, archive_list, r_err=8):

        H = []
        L = []

        for dict in archive_list:
            all_high_means = []
            all_low_means = []

            for pol in dict['DATA']:
                high_means = []
                low_means = []

                for i, channel in enumerate(pol):

                    start_bin = math.floor(len(channel) * dict['S_DUTY'])
                    mid_bin = math.floor(
                        len(channel) * (dict['S_DUTY'] + dict['DUTY']))
                    end_bin = mid_bin + (math.floor(
                        len(channel) * dict['DUTY']))
                    bin_params = [start_bin, mid_bin, end_bin]

                    low_mean = np.mean(channel[bin_params[0]:bin_params[1]])
                    high_mean = np.mean(channel[bin_params[1]:bin_params[2]])

                    low_mean = round(low_mean, r_err)
                    high_mean = round(high_mean, r_err)

                    high_means.append(high_mean)
                    low_means.append(low_mean)

                all_high_means.append(high_means)
                all_low_means.append(low_means)

            H.append(all_high_means)
            L.append(all_low_means)

        H = np.array(H)
        L = np.array(L)

        return H, L
Esempio n. 18
0
    def jpc(self, G=11.0, **kwargs):

        psr_cal, cont_on, cont_off, psr_mjd, cont_mjd, fe = self.closest_continuum2psrcal(
        )
        freq = self.ar.freq[0]

        inds = [i for i, n in enumerate(psr_mjd) if int(n) == int(self.mjd)]
        ind = None
        for i in inds:
            if fe[i] == self.fe:
                ind = i

        psr_ar = Archive(psr_cal[ind], prepare=False, verbose=False)
        cont_on_ar = Archive(cont_on[ind], prepare=False, verbose=False)
        cont_off_ar = Archive(cont_off[ind], prepare=False, verbose=False)

        cal_arc_list = []

        np.set_printoptions(threshold=sys.maxsize)
        for ar in [psr_ar, cont_on_ar, cont_off_ar]:
            ar.tscrunch()
            fr = ar.freq[0]
            data = ar.getData()
            print(data.shape)
            # The following commented lines are for when certain frequencies are missing
            if (ar is cont_on_ar) or (ar is cont_off_ar):
                # if self.fe == '430':
                #     data = np.delete( data, slice(48, 56), 1 )
                #     fr = np.delete( fr, slice(48, 56), 0 )
                if self.fe == 'lbw':
                    #
                    #data = np.delete( data, slice(0, 64), 1 )
                    #fr = np.delete( fr, slice(0, 64), 0 )
                    #data = np.delete( data, slice(192, 256), 1 )
                    #fr = np.delete( fr, slice(192, 256), 0 )
                    data = np.delete(data, slice(256, 320), 1)
                    fr = np.delete(fr, slice(256, 320), 0)
                    #data = np.delete( data, slice(320, 384), 1 )
                    #fr = np.delete( fr, slice(320, 384), 0 )
                #print(fr)
                # if self.fe == '430':
                #     for i in range(8):
                #         data = np.insert( data, 48, np.zeros((2048)), axis = 1 )
                #     fr = np.insert( fr, 48, np.linspace( 455.0, 467.5, 8, endpoint = False ), 0 )
                # elif self.fe == 'lbw':
                #     for i in range(64):
                #         data = np.insert( data, 384, np.zeros((2048)), axis = 1 )
                #     fr = np.insert( fr, 384, np.linspace( 1180.0, 1080.0, 64, endpoint = False ), 0 )

            cal_arc_list.append({
                'ARC': ar,
                'DATA': data,
                'FREQS': fr,
                'S_DUTY': ar.getValue('CAL_PHS'),
                'DUTY': ar.getValue('CAL_DCYC'),
                'BW': ar.getBandwidth()
            })

        cal_fluxes = flux.get_fluxes(freq / 1000, self.cont_name, **kwargs)

        # Sometimes the calibration happens the opposite way, in which case this would be H, L
        L, H = self._prepare_calibration(cal_arc_list)

        F_ON = (H[1] / L[1]) - 1
        F_OFF = (H[2] / L[2]) - 1

        C0 = cal_fluxes / ((1 / F_ON) - (1 / F_OFF))
        T_sys = C0 / F_OFF
        F_cal = (T_sys * F_OFF) / G
        F_cal = np.nan_to_num(F_cal, nan=0.0)

        # Plots F_cal if first file in series (for checking purposes)
        if self.num == '0001':
            for f in F_cal:
                plt.plot(f)
            plt.show()

        Fa, Fb = interpolate.interp1d(
            cal_arc_list[1]['FREQS'],
            F_cal[0],
            kind='cubic',
            fill_value='extrapolate'), interpolate.interp1d(
                cal_arc_list[2]['FREQS'],
                F_cal[1],
                kind='cubic',
                fill_value='extrapolate')
        Fcr, Fci = interpolate.interp1d(
            cal_arc_list[1]['FREQS'],
            F_cal[2],
            kind='cubic',
            fill_value='extrapolate'), interpolate.interp1d(
                cal_arc_list[2]['FREQS'],
                F_cal[3],
                kind='cubic',
                fill_value='extrapolate')

        aa = Fa(cal_arc_list[0]['FREQS']) / (H[0][0] - L[0][0])
        bb = Fb(cal_arc_list[0]['FREQS']) / (H[0][1] - L[0][1])
        cr = Fcr(cal_arc_list[0]['FREQS']) / (H[0][2] - L[0][2])
        ci = Fci(cal_arc_list[0]['FREQS']) / (H[0][3] - L[0][3])

        with open(
                f"../cal/{self.ar.getName()}_{self.fe}_{int(psr_mjd[ind])}_{self.obs_num}_{self.num}.cal",
                "w") as f:
            data = np.array([freq, aa, bb, cr, ci]).T
            np.savetxt(f, data)

        return freq, aa, bb, cr, ci
Esempio n. 19
0
class DataCull:
    '''
    Main class for data culling pulsar fits files to get a less noisy data set.
    '''
    def __init__(self,
                 filename,
                 template,
                 directory=None,
                 SNLim=3000,
                 verbose=False):
        '''
        Initializes all archives and parameters in the data cube for a given file.
        Also requires a template to be parsed in.
        A custom signal / noise lower bound can also be set on initialization but
        the default is 3000. This will exit the current archive if the SNR is
        lower than the threshold.
        One can also set whether long arrays and other bits of console text
        are to be printed in full or in shorthand.
        '''

        if verbose:
            print("Initializing DataCull object...")

        self.SNError = False

        # Parse directory in string or choose CWD if no directory given
        if directory == None:
            self.directory = str(os.getcwd())
        else:
            self.directory = str(directory)

        # Parse filename
        if os.path.isfile(self.directory + filename):
            self.filename = str(filename)
        else:
            raise FileNotFoundError(
                "File {} not found in this directory...".format(filename))

        # Load the template
        self.template = self._loadTemplate(template)

        # Parse verbose option
        self.verbose = verbose

        # Parse SNLim
        self.SNLim = SNLim

        # Load the file in the archive
        self.ar = Archive(self.__str__(), verbose=self.verbose)

        # Togglable print options
        if self.verbose:
            np.set_printoptions(threshold=np.inf)

        # Check if Signal / Noise is too low
        if self.ar.getSN() < SNLim:
            if self.verbose:
                print("Signal / Noise ratio is way too low. (Below {})".format(
                    SNLim))
                print("Data set to be thrown out...")
            self.SNError = True

        # Load the data cube for the file
        self.data = self.ar.getData()

    def __repr__(self):
        return "DataCull( filename = {}, template = {}, directory = {}, SNLim = {}, verbose = {} )".format(
            self.filename, self.templateName, self.directory, self.SNLim,
            self.verbose)

    def __str__(self):
        return self.directory + self.filename

    def _loadTemplate(self, templateFilename):
        '''
        Loads a template specified by the user. If no extension is given, the
        extension .npy will be used. Note that this code is designed for numpy
        arrays so it would be wise to use them.
        Returns the template.
        '''

        # Parse the template's filename into a string and ensure the correct extension
        self.templateName = str(templateFilename)
        self.templateName = u.addExtension(self.templateName, 'npy')

        # Load the template
        template = np.load(self.templateName)

        return template

    def reject(self,
               criterion='chauvenet',
               iterations=1,
               fourier=True,
               rms=True,
               binShift=True,
               showPlots=False):
        '''
        Performs the rejection algorithm until the number of iterations has been
        reached or the data culling is complete, whichever comes first. The
        default number of iterations is 1.
        Requires the criterion to be set with the default criterion
        being Chauvenet's criterion.
        This is the function you should use to reject all outliers fully.
        '''

        if self.verbose:
            print("Beginning data rejection for {}...".format(self.filename))

        # Initialize the completion flag to false
        self.rejectionCompletionFlag = False

        if fourier:
            if self.verbose:
                print("Beginning FFT data rejection...")

            for i in np.arange(iterations):

                self.fourierTransformRejection(criterion, showPlots, showPlots)

                # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
                if self.rejectionCompletionFlag:
                    generation = i + 1
                    if self.verbose:
                        print(
                            "RMS data rejection for {} complete after {} generations..."
                            .format(self.filename, generation))
                    break

            # If the completion flag is still false, the cycles finished before full excision
            if self.verbose and not self.rejectionCompletionFlag:
                print("Maximum number of iterations ({}) completed...".format(
                    iterations))

            # Re-initialize the completion flag to false
            self.rejectionCompletionFlag = False

        if rms:
            if self.verbose:
                print("Beginning RMS data rejection...")

            for i in np.arange(iterations):

                self.rmsRejection(criterion, showPlots)

                # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
                if self.rejectionCompletionFlag:
                    generation = i + 1
                    if self.verbose:
                        print(
                            "RMS data rejection for {} complete after {} generations..."
                            .format(self.filename, generation))
                    break

            # If the completion flag is still false, the cycles finished before full excision
            if self.verbose and not self.rejectionCompletionFlag:
                print("Maximum number of iterations ({}) completed...".format(
                    iterations))

            # Re-initialize the completion flag to false
            self.rejectionCompletionFlag = False

        if binShift:
            if self.verbose:
                print("Beginning bin shift data rejection...")

                for i in np.arange(iterations):

                    self.binShiftRejection(showPlots)

                    # If all possible outliers have been found and the flag is set to true, don't bother doing any more iterations.
                    if self.rejectionCompletionFlag == True:
                        generation = i + 1
                        if self.verbose:
                            print(
                                "Bin shift data rejection for {} complete after {} generations..."
                                .format(self.filename, generation))
                        break

            # If the completion flag is still false, the cycles finished before full excision
            if self.verbose and not self.rejectionCompletionFlag:
                print("Maximum number of iterations ({}) completed...".format(
                    iterations))

        # Re-load the data cube for the file
        self.data = self.ar.getData()

    def rmsRejection(self, criterion, showPlot=False):
        '''
        Rejects outlier root mean squared values for off pulse regions and
        re-weights the data cube in the loaded archive.
        '''

        # Re-load the data cube for the file
        self.data = self.ar.getData()

        templateMask = pu.binMaskFromTemplate(self.template)

        rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties(
            self.data, templateMask)

        if showPlot == True:

            # Creates the histogram
            pltu.histogram_and_curves(
                linearRmsArray,
                mean=mu,
                std_dev=sigma,
                x_axis='Root Mean Squared',
                y_axis='Frequency Density',
                title=r'$\mu={},\ \sigma={}$'.format(mu, sigma),
                show=True,
                curve_list=[spyst.norm.pdf, mathu.test_dist.test_pdf])

        # Determine which criterion to use to reject data
        if criterion is 'chauvenet':  # Chauvenet's Criterion

            rejectionCriterion = mathu.chauvenet(rmsArray, mu, sigma, 3)

        elif criterion is 'DMAD':  # Double Median Absolute Deviation

            rejectionCriterion = mathu.doubleMAD(linearRmsArray)
            rejectionCriterion = np.reshape(
                rejectionCriterion, (self.ar.getNsubint(), self.ar.getNchan()))

        else:
            raise ValueError(
                "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..."
            )

        # Set the weights of potential noise in each profile to 0
        u.zeroWeights(rejectionCriterion, self.ar, self.verbose)

        # Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true.
        if (len(np.where(rejectionCriterion)[0]) == 0):
            self.rejectionCompletionFlag = True

        if self.verbose:
            print("Data rejection cycle complete...")

    def fourierTransformRejection(self,
                                  criterion,
                                  showTempPlot=False,
                                  showOtherPlots=False):
        '''
        Uses FFT (Fast Fourier Transform) to get the break-down of signals in the
        profile and compares to the the template.
        '''

        # Re-load the data cube
        data = self.ar.getData()
        tempData = self.template

        # Initialize guess parameters and the curve to fit
        guess_params = [100, 100, 1024]
        curve = mathu.FFT_dist._pdf

        # Set up arrays for FFT
        profFFT = np.zeros_like(data)
        tempFFT = fft(tempData)

        # Normalize the template array w.r.t the max value and shift to middle
        tempFFT = abs(mathu.normalizeToMax(abs(tempFFT.T)))
        tempFFT = fftshift(tempFFT)

        # Create template FFT mask
        fftTempMask = pu.binMaskFromTemplate(tempFFT)

        rmsArray, linearRmsArray, mu, sigma = u.getRMSArrayProperties(
            data, fftTempMask)

        tempParams = opt.curve_fit(curve,
                                   np.arange(len(tempFFT)),
                                   tempFFT,
                                   p0=guess_params)

        t = np.arange(0, len(tempFFT), 0.01)

        temp_fit = mathu.normalizeToMax(curve(t, *tempParams[0]))

        if showTempPlot:
            pltu.plotAndShow(tempFFT, t, temp_fit)

        # Loop over the time and frequency indices (subints and channels)
        for time in np.arange(self.ar.getNsubint()):
            for frequency in np.arange(self.ar.getNchan()):

                # FFT then normalize and center FFT'd profile
                profFFT[time][frequency] = fft(data[time][frequency])
                profFFT[time][frequency] = abs(
                    mathu.normalizeToMax(abs(profFFT[time][frequency].T)))
                profFFT[time][frequency] = fftshift(profFFT[time][frequency])

                if all(profFFT[time][frequency]) == 0:
                    continue

                # Get optimization parameters for each profile for the same curve used to fit the template.
                params = opt.curve_fit(curve,
                                       np.arange(len(tempFFT)),
                                       profFFT[time][frequency],
                                       p0=guess_params)

                # Normalize the curve with the fitted parameters
                prof_fit = mathu.normalizeToMax(curve(t, *params[0]))

                if showOtherPlots:
                    pltu.plotAndShow(profFFT[time][frequency], t, prof_fit,
                                     temp_fit)

                # if not all( u.is_similar_array( tempParams[0], params[0], tolerance = [ 1e-1, 1, 2 ] ) ):
                #     print( "Not similar" )
                #     continue
                if params[0][1] < 0:
                    print("Not similar")

                    if self.verbose:
                        print(
                            "Setting the weight of (subint: {}, channel: {}) to 0"
                            .format(time, frequency))
                    self.ar.setWeights(0, t=time, f=frequency)

                else:
                    print("Similar")

                # # Check if profile FT RMS matches template FT RMS based on Chauvenet
                # if criterion is 'chauvenet': # Chauvenet's Criterion
                #
                #     rejectionCriterion = mathu.chauvenet( rmsArray, mu, sigma, 2 )
                #
                # elif criterion is 'DMAD': # Double Median Absolute Deviation
                #
                #     rejectionCriterion = mathu.doubleMAD( linearRmsArray )
                #     rejectionCriterion = np.reshape( rejectionCriterion, ( self.ar.getNsubint(), self.ar.getNchan() ) )
                #
                # else:
                #     raise ValueError( "Allowed rejection criteria are either 'chauvenet' or 'DMAD'. Please use one of these..." )
                #
                # if not rejectionCriterion:
                #     if self.verbose:
                #         print( "Setting the weight of (subint: {}, channel: {}) to 0".format( time, frequency ) )
                #     self.ar.setWeights( 0, t = time, f = frequency )

        # Re-load the data cube
        self.data = self.ar.getData()

    def binShiftRejection(self, showPlot=False):
        '''
        Gets the bin shift and bin shift errors of each profile in the file and
        plots both quantities as a histogram.
        Then, rejects based on Chauvenet criterion
        '''

        nBinShift, nBinError = self.getBinShifts()

        # Reshape the bin shift and bin shift error arrays to be linear
        linearNBinShift, linearNBinError = np.reshape(
            nBinShift,
            (self.ar.getNchan() * self.ar.getNsubint())), np.reshape(
                nBinError, (self.ar.getNchan() * self.ar.getNsubint()))

        # Mean and standard deviation of the bin shift
        muS, sigmaS = np.nanmean(linearNBinShift), np.nanstd(linearNBinShift)

        # Mean and standard deviation of the bin shift error
        muE, sigmaE = np.nanmean(linearNBinError), np.nanstd(linearNBinError)

        if showPlot == True:

            # Create the histograms as two subplots
            pltu.histogram_and_curves(
                linearNBinShift,
                mean=muS,
                std_dev=sigmaS,
                x_axis=r'Bin Shift from Template, $\hat{\tau}$',
                y_axis='Frequency Density',
                title=r'$\mu={},\ \sigma={}$'.format(muS, sigmaS),
                show=True,
                curve_list=[spyst.norm.pdf])
            pltu.histogram_and_curves(
                linearNBinError,
                mean=muE,
                std_dev=sigmaE,
                x_axis=r'Bin Shift Error, $\sigma_{\tau}$',
                y_axis='Frequency Density',
                title=r'$\mu={},\ \sigma={}$'.format(muE, sigmaE),
                show=True,
                curve_list=[spyst.maxwell.pdf])

            # Adjust subplots so they look nice
            #plt.subplots_adjust( top=0.92, bottom=0.15, left=0.15, right=0.95, hspace=0.55, wspace=0.40 )

        rejectionCriterionS, rejectionCriterionE = mathu.chauvenet(
            nBinShift, muS, sigmaS), mathu.chauvenet(nBinError, muE, sigmaE)

        # Set the weights of potential noise in each profile to 0
        u.zeroWeights(rejectionCriterionS, self.ar, self.verbose)
        u.zeroWeights(rejectionCriterionE, self.ar, self.verbose)

        # Checks to see if there were any data to reject. If this array has length 0, all data was good and the completion flag is set to true.
        if len(np.where(rejectionCriterionS)[0]) == 0 and len(
                np.where(rejectionCriterionE)[0]) == 0:
            self.rejectionCompletionFlag = True

        if self.verbose:
            print("Data rejection cycle complete...")

    def getBinShifts(self):
        '''
        Returns the bin shift and bin shift error.
        '''

        if self.verbose:
            print("Getting bin shifts and errors from the template...")

        # Re-load the data cube
        self.data = self.ar.getData()

        templateMask = pu.binMaskFromTemplate(self.template)

        # Return the array of RMS values for each profile
        rmsArray = mathu.rmsMatrix2D(self.data,
                                     mask=templateMask,
                                     nanmask=True)

        # Initialize the bin shifts and bin shift errors
        nBinShift = np.zeros((self.ar.getNsubint(), self.ar.getNchan()),
                             dtype=float)
        nBinError = np.zeros((self.ar.getNsubint(), self.ar.getNchan()),
                             dtype=float)

        # Use PyPulse utility get_toa3 to obtain tauhat and sigma_tau for each profile and feed them into the two arrays.
        for time in np.arange(self.ar.getNsubint()):
            for frequency in np.arange(self.ar.getNchan()):

                if all(amp == 0 for amp in self.data[time][frequency]):

                    nBinShift[time][frequency] = np.nan
                    nBinError[time][frequency] = np.nan

                else:

                    # Attempt to calculate the bin shift and error. If not possible, set the profile to 0.
                    try:
                        tauccf, tauhat, bhat, sigma_tau, sigma_b, snr, rho = get_toa3(
                            self.template,
                            self.data[time][frequency],
                            rmsArray[time][frequency],
                            dphi_in=0.1,
                            snrthresh=0.,
                            nlagsfit=5,
                            norder=2)

                        nBinShift[time][frequency] = tauhat
                        nBinError[time][frequency] = sigma_tau

                    except:
                        if self.verbose:
                            print(
                                "Setting the weight of (subint: {}, channel: {}) to 0"
                                .format(time, frequency))
                        self.ar.setWeights(0, t=time, f=frequency)

                        nBinShift[time][frequency] = np.nan
                        nBinError[time][frequency] = np.nan

        # Mask the nan values in the array so that histogram_and_curves doesn't malfunction
        nBinShift, nBinError = np.ma.array(
            nBinShift,
            mask=np.isnan(nBinShift)), np.ma.array(nBinError,
                                                   mask=np.isnan(nBinError))

        return nBinShift, nBinError
Esempio n. 20
0
    def calibrate( self ):

        """
        Master calibration method
        """

        conv_file = "{}_{}_fluxcalibration_conversion_factors.pkl".format( self.psr_name, self.cont_name )
        cal_mjd_file = "{}_{}_fluxcalibration_cal_mjds.pkl".format( self.psr_name, self.cont_name )
        conv_abs_path, cal_abs_path = os.path.join( self.pkl_dir, 'calibration', conv_file ), os.path.join( self.pkl_dir, 'calibration', cal_mjd_file )

        if os.path.isfile( conv_abs_path ):
            if self.verbose:
                print( "Loading previously saved conversion factor data..." )
            pickle_in = open( conv_abs_path, "rb" )
            conversion_factors = pickle.load( pickle_in )
            pickle_in.close()
            pickle_in = open( cal_abs_path, "rb" )
            cal_mjds = pickle.load( pickle_in )
            pickle_in.close()
        else:
            if self.verbose:
                print( "Making new conversion factor list..." )

            conversion_factors = []
            cal_mjds = []
            for e in self.get_closest_contfile():
                conversion_factors.append( self.calculate_Jy_per_count( e ) )
                cal_mjds.append( e[3] )

            conversion_factors = np.array( conversion_factors )

            if self.verbose:
                print( "Saving as {}".format( conv_file ) )

            pickle_out = open( conv_abs_path, "wb" )
            pickle.dump( conversion_factors, pickle_out )
            pickle_out.close()
            pickle_out = open( cal_abs_path, "wb" )
            pickle.dump( cal_mjds, pickle_out )
            pickle_out.close()


        if type( conversion_factors ) != np.ndarray:
            conversion_factors = np.array( conversion_factors )
        if type( cal_mjds ) != np.ndarray:
            cal_mjds = np.array( cal_mjds )

        print(conversion_factors)

        counter = 0

        for directory in self.dirs:
            for psr_file in sorted( os.listdir( directory ) ):
                try:
                    hdul, psr_mjd, psr_fe, obs_num, obs_mode = self.hdul_setup( directory, psr_file, False )
                    if self.verbose:
                        print( "Opening {}".format( psr_file ) )
                except OSError:
                    if self.verbose:
                        print( "Couldn't open {}".format( psr_file ) )
                    continue

                if obs_mode != "PSR":
                    continue

                ar = Archive( os.path.join( directory, psr_file ), verbose = self.verbose )
                data = ar.data_orig
                new_data = []
                for sub in data:
                    A, B, C, D = self.convert_subint_pol_state( sub, ar.subintheader[ 'POL_TYPE' ], "AABBCRCI", linear = ar.header[ 'FD_POLN' ] )
                    new_data.append( [ A, B ] )

                new_data = np.array( new_data )


                while psr_mjd != cal_mjds[ counter ]:
                    print( psr_mjd, cal_mjds[ counter ] )
                    counter += 1
                    if counter >= len( conversion_factors ):
                        break
                else:
                    for sub in new_data:
                        sub = conversion_factors[ counter ] * sub
                        print(sub.shape)
                    counter = 0

        return self