Example #1
0
    def adjust_fold_frequency(self, phasebins, profs=None, shiftsubs=False):
        """
        adjust_fold_frequency(phasebins, profs=None, shiftsubs=False):
            Linearly shift the intervals by phasebins over the course of
                the observation in order to change the apparent folding
                frequency.  Return a 2D array containing the de-dispersed
                profiles as a function of time (i.e. shape = (npart, proflen)),
				and the reduced chi^2 of the resulting summed profile.
                If profs is not None, then use profs instead of self.profs.
				If shiftsubs is not False, then actually correct the subbands
				instead of a 2D projection of them.
        """
        if not self.__dict__.has_key('subdelays'):
            print "Dedispersing first..."
            self.dedisperse()
        if shiftsubs:
            print "Shifting all the subbands..."
            if profs is None:
                profs = self.profs
            for ii in range(self.npart):
                bins_to_shift = int(round(float(ii)/self.npart * phasebins))
                for jj in range(self.nsub):
                    profs[ii,jj] = psr_utils.rotate(profs[ii,jj], bins_to_shift)
            redchi = self.calc_redchi2(prof=profs.sum(0).sum(0))
        else:
            print "Shifting just the projected intervals (not individual subbands)..."
            if profs is None:
                profs = self.profs.sum(1)
            for ii in range(self.npart):
                bins_to_shift = int(round(float(ii)/self.npart * phasebins))
                profs[ii] = psr_utils.rotate(profs[ii], bins_to_shift)
            redchi = self.calc_redchi2(prof=profs.sum(0))
        print "New reduced-chi^2 =", redchi
        return profs, redchi
Example #2
0
    def adjust_fold_frequency(self, phasebins, profs=None, shiftsubs=False):
        """
        adjust_fold_frequency(phasebins, profs=None, shiftsubs=False):
            Linearly shift the intervals by phasebins over the course of
                the observation in order to change the apparent folding
                frequency.  Return a 2D array containing the de-dispersed
                profiles as a function of time (i.e. shape = (npart, proflen)),
				and the reduced chi^2 of the resulting summed profile.
                If profs is not None, then use profs instead of self.profs.
				If shiftsubs is not False, then actually correct the subbands
				instead of a 2D projection of them.
        """
        if not self.__dict__.has_key('subdelays'):
            print "Dedispersing first..."
            self.dedisperse()
        if shiftsubs:
            print "Shifting all the subbands..."
            if profs is None:
                profs = self.profs
            for ii in range(self.npart):
                bins_to_shift = int(round(float(ii)/self.npart * phasebins))
                for jj in range(self.nsub):
                    profs[ii,jj] = psr_utils.rotate(profs[ii,jj], bins_to_shift)
            redchi = self.calc_redchi2(prof=profs.sum(0).sum(0))
        else:
            print "Shifting just the projected intervals (not individual subbands)..."
            if profs is None:
                profs = self.profs.sum(1)
            for ii in range(self.npart):
                bins_to_shift = int(round(float(ii)/self.npart * phasebins))
                profs[ii] = psr_utils.rotate(profs[ii], bins_to_shift)
            redchi = self.calc_redchi2(prof=profs.sum(0))
        print "New reduced-chi^2 =", redchi
        return profs, redchi
Example #3
0
    def _compute_rating(self, cand):
        """Return a rating for the candidate. The rating value is the
            the fraction of sub-ints that deviate from the phase of
            the pulse.

            Input:
                cand: A Candidate object to rate.

            Output:
                value: The rating value.
        """
        tvph = cand.get_from_cache('time_vs_phase')
        pfd = cand.get_from_cache('pfd')

        bestprof = tvph.get_profile()
        new_template = np.zeros_like(bestprof)
        bin_offsets = np.empty(pfd.npart)

        # The following loop creates a better template by removing wiggle, but
        # it does not change the actual subints
        for ii, subint in enumerate(tvph.data):
            # Measure the phase offset
            phase_offset = psr_utils.measure_phase_corr(subint, bestprof)
            # The following is needed to put phase offsets on the interval
            # (-0.5,0.5]
            if phase_offset > 0.5: 
                phase_offset -= 1.0
            # Caclulate the offset in bins
            bin_offset = int(round(pfd.proflen*phase_offset))
            # Update the new template
            new_template += psr_utils.rotate(subint, -bin_offset)

        # Now calculate the wiggle using the updated template
        for ii, subint in enumerate(tvph.data):
            phase_offset = psr_utils.measure_phase_corr(subint, new_template)
            if phase_offset > 0.5:
                phase_offset -= 1.0
            bin_offsets[ii] = int(round(pfd.proflen*phase_offset))

        # Calculate the various metrics
        if method == "GOODFRAC":
            # good fraction 
            wigglescore = sum(abs(bin_offsets) < TOL*pfd.proflen)/ \
                        float(pfd.npart)
        elif method == "WANDER":
            # total wander
            wigglescore = sum(abs(bin_offsets))/(pfd.proflen*pfd.npart)
        elif method == "OFFSTD":
            # offset std
            wigglescore = bin_offsets.std()
        elif method == "OFFMAX":
            # offset max
            wigglescore = bin_offsets.max() 
        else:
            raise utils.RatingError("Unrecognized method for wiggle " \
                                    "rating (%s)" % method)

        return wigglescore
Example #4
0
    def adjust_period(self, p=None, pd=None, pdd=None):
        """
        adjust_period(p=*currp*, pd=*currpd*, pdd=*currpdd*):
            Rotate (internally) the profiles so that they are adjusted
                the given period and period derivatives
        """
        if p is None:
            p = self.curr_p
        if pd is None:
            pd = self.curr_pd
        if pdd is None:
            pdd = self.curr_pdd

        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        ref_p, ref_pd, ref_pdd = psr_utils.p_to_f(self.ref_f, \
                                                  self.ref_fd, \
                                                  self.ref_fdd)

        #print "DEBUG: in dataproducts.py -- ref_p, ref_pd, pdd", ref_p, ref_pd, pdd
        fdd = psr_utils.p_to_f(ref_p, ref_pd, pdd)[2]
        fd = psr_utils.p_to_f(ref_p, pd)[1]
        f = 1.0 / p

        f_diff = f - self.ref_f
        fd_diff = fd - self.ref_fd
        if pdd != 0.0:
            fdd_diff = fdd - self.ref_fdd
        else:
            fdd_diff = 0.0
        #print "DEBUG: in dataproducts.py -- self.ref_f, self.ref_fd, self.ref_fdd", self.ref_f, self.ref_fd, self.ref_fdd
        #print "DEBUG: in dataproducts.py -- f, fd, fdd", f, fd, fdd
        #print "DEBUG: in dataproducts.py -- f_diff, fd_diff, fdd_diff", f_diff, fd_diff, fdd_diff
        #print "DEBUG: in dataproducts.py -- parttimes", parttimes
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff, \
                                                parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = np.fmod(delays * self.nbin, self.nbin) - self.pdelays_bins
        new_pdelays_bins = np.floor(bin_delays + 0.5)

        # Rotate subintegrations
        for ii in range(self.nsubint):
            tmp_prof = self.data[ii, :]
            # Negative sign in num bins to shift because we calculated delays
            # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
            # is shift-to-left
            self.data[ii,:] = psr_utils.rotate(tmp_prof, \
                                            -new_pdelays_bins[ii])

        # Save new p, pd, pdd
        self.curr_p, self.curr_pd, self.curr_pdd = p, pd, pdd
        self.pdelays_bins += new_pdelays_bins
Example #5
0
    def adjust_period(self, p=None, pd=None, pdd=None):
        """
        adjust_period(p=*currp*, pd=*currpd*, pdd=*currpdd*):
            Rotate (internally) the profiles so that they are adjusted
                the given period and period derivatives
        """
        if p is None:
            p = self.curr_p
        if pd is None:
            pd = self.curr_pd
        if pdd is None:
            pdd = self.curr_pdd
        
        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        ref_p, ref_pd, ref_pdd = psr_utils.p_to_f(self.ref_f, \
                                                  self.ref_fd, \
                                                  self.ref_fdd)
        
        #print "DEBUG: in dataproducts.py -- ref_p, ref_pd, pdd", ref_p, ref_pd, pdd
        fdd = psr_utils.p_to_f(ref_p, ref_pd, pdd)[2]
        fd = psr_utils.p_to_f(ref_p, pd)[1]
        f = 1.0/p
        
        f_diff = f - self.ref_f
        fd_diff = fd - self.ref_fd
        if pdd != 0.0:
            fdd_diff = fdd - self.ref_fdd
        else:
            fdd_diff = 0.0
        #print "DEBUG: in dataproducts.py -- self.ref_f, self.ref_fd, self.ref_fdd", self.ref_f, self.ref_fd, self.ref_fdd
        #print "DEBUG: in dataproducts.py -- f, fd, fdd", f, fd, fdd
        #print "DEBUG: in dataproducts.py -- f_diff, fd_diff, fdd_diff", f_diff, fd_diff, fdd_diff
        #print "DEBUG: in dataproducts.py -- parttimes", parttimes
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff, \
                                                parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = np.fmod(delays * self.nbin, self.nbin) - self.pdelays_bins
        new_pdelays_bins = np.floor(bin_delays+0.5)

        # Rotate subintegrations
        for ii in range(self.nsubint):
            tmp_prof = self.data[ii,:]
            # Negative sign in num bins to shift because we calculated delays
            # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
            # is shift-to-left
            self.data[ii,:] = psr_utils.rotate(tmp_prof, \
                                            -new_pdelays_bins[ii])
        
        # Save new p, pd, pdd
        self.curr_p, self.curr_pd, self.curr_pdd = p, pd, pdd
        self.pdelays_bins += new_pdelays_bins
Example #6
0
    def _compute_rating(self, cand):
        """Return a rating for the candidate. The rating value is the
            the fraction of sub-ints that deviate from the phase of
            the pulse.

            Input:
                cand: A Candidate object to rate.

            Output:
                value: The rating value.
        """
        tvph = cand.get_from_cache('time_vs_phase')
        pfd = cand.get_from_cache('pfd')

        bestprof = tvph.get_profile()
        new_template = np.zeros_like(bestprof)
        bin_offsets = np.empty(pfd.npart)

        # The following loop creates a better template by removing wiggle, but
        # it does not change the actual subints
        for ii, subint in enumerate(tvph.data):
            # Measure the phase offset
            phase_offset = psr_utils.measure_phase_corr(subint, bestprof)
            # The following is needed to put phase offsets on the interval
            # (-0.5,0.5]
            if phase_offset > 0.5:
                phase_offset -= 1.0
            # Caclulate the offset in bins
            bin_offset = int(round(pfd.proflen * phase_offset))
            # Update the new template
            new_template += psr_utils.rotate(subint, -bin_offset)

        # Now calculate the wiggle using the updated template
        for ii, subint in enumerate(tvph.data):
            phase_offset = psr_utils.measure_phase_corr(subint, new_template)
            if phase_offset > 0.5:
                phase_offset -= 1.0
            bin_offsets[ii] = int(round(pfd.proflen * phase_offset))

        # Calculate the various metrics
        if method == "GOODFRAC":
            # good fraction
            wigglescore = sum(abs(bin_offsets) < TOL*pfd.proflen)/ \
                        float(pfd.npart)
        elif method == "WANDER":
            # total wander
            wigglescore = sum(abs(bin_offsets)) / (pfd.proflen * pfd.npart)
        elif method == "OFFSTD":
            # offset std
            wigglescore = bin_offsets.std()
        elif method == "OFFMAX":
            # offset max
            wigglescore = bin_offsets.max()
        else:
            raise utils.RatingError("Unrecognized method for wiggle " \
                                    "rating (%s)" % method)

        return wigglescore
Example #7
0
    def _compute_rating(self, cand):
        """Return a rating for the candidate. The rating value is the
            the fraction of subbands that deviate from the position of
            the pulse.

            Input:
                cand: A SPCandidate object to rate.

            Output:
                value: The rating value.
        """
        wdd = cand.waterfall_dd
        spd = cand.spd

        prof = wdd.get_profile()
        new_template = np.zeros_like(prof)
        bin_offsets = np.empty(spd.waterfall_nsubs)

        # The following loop creates a better template by removing wiggle, but
        # it does not change the actual subbands
        for ii, subband in enumerate(wdd.data):
            # Measure the pulse offset
            pulse_offset = psr_utils.measure_phase_corr(subband, prof, zoom=1)
            # Caclulate the offset in bins
            bin_offset = int(round(spd.waterfall_nbins * pulse_offset))
            # Update the new template
            new_template += psr_utils.rotate(subband, -bin_offset)

        # Now calculate the wiggle using the updated template
        for ii, subband in enumerate(wdd.data):
            pulse_offset = psr_utils.measure_phase_corr(subband,
                                                        new_template,
                                                        zoom=1)
            bin_offsets[ii] = int(round(spd.waterfall_nbins * pulse_offset))

        # Calculate the various metrics
        if method == "GOODFRAC":
            # good fraction
            wigglescore = sum(abs(bin_offsets) < TOL*spd.waterfall_nbins)/ \
                        float(spd.waterfall_nsubs)
        elif method == "WANDER":
            # total wander
            wigglescore = sum(
                abs(bin_offsets)) / (spd.waterfall_nbins * spd.waterfall_nsubs)
        elif method == "OFFSTD":
            # offset std
            wigglescore = bin_offsets.std()
        elif method == "OFFMAX":
            # offset max
            wigglescore = bin_offsets.max()
        else:
            raise utils.RatingError("Unrecognized method for wiggle " \
                                    "rating (%s)" % method)

        return wigglescore
Example #8
0
    def _compute_rating(self, cand):
        """Return a rating for the candidate. The rating value is the
            the fraction of subbands that deviate from the position of
            the pulse.

            Input:
                cand: A SPCandidate object to rate.

            Output:
                value: The rating value.
        """
        wdd = cand.waterfall_dd
        spd = cand.spd

        prof = wdd.get_profile()
        new_template = np.zeros_like(prof)
        bin_offsets = np.empty(spd.waterfall_nsubs)

        # The following loop creates a better template by removing wiggle, but
        # it does not change the actual subbands
        for ii, subband in enumerate(wdd.data):
            # Measure the pulse offset
            pulse_offset = psr_utils.measure_phase_corr(subband, prof, zoom=1)
           # Caclulate the offset in bins
            bin_offset = int(round(spd.waterfall_nbins*pulse_offset))
            # Update the new template
            new_template += psr_utils.rotate(subband, -bin_offset)

        # Now calculate the wiggle using the updated template
        for ii, subband in enumerate(wdd.data):
            pulse_offset = psr_utils.measure_phase_corr(subband, new_template, zoom=1)
            bin_offsets[ii] = int(round(spd.waterfall_nbins*pulse_offset))

        # Calculate the various metrics
        if method == "GOODFRAC":
            # good fraction 
            wigglescore = sum(abs(bin_offsets) < TOL*spd.waterfall_nbins)/ \
                        float(spd.waterfall_nsubs)
        elif method == "WANDER":
            # total wander
            wigglescore = sum(abs(bin_offsets))/(spd.waterfall_nbins*spd.waterfall_nsubs)
        elif method == "OFFSTD":
            # offset std
            wigglescore = bin_offsets.std()
        elif method == "OFFMAX":
            # offset max
            wigglescore = bin_offsets.max() 
        else:
            raise utils.RatingError("Unrecognized method for wiggle " \
                                    "rating (%s)" % method)

        return wigglescore
Example #9
0
    def __init__(self, pfd, sefd=None, verbose=True):
        """Return an observation object for the given pfd file.
    
        Inputs: 
            pfd: a pfd object
            sefd: the system-equivalent flux density of the observation (in Jy)
            verbose: if True, print extra information (Default: True)

        Output:
            obs: The Observation object            
        """
        self.sefd = sefd
        self.p = pfd
        self.fn = self.p.pfd_filename
        self.snr = None
        self.smean = None
        self.verbose = verbose
        self.notes = []

        self.p.dedisperse(doppler=True)
        self.p.adjust_period()
        if self.p.bestprof:
            prof = self.p.bestprof.profile
        else:
            prof = self.p.sumprof
        self.proflen = len(prof)
        self.nbin = len(prof)
        imax = np.argmax(prof)
        self.nrot = (imax - len(prof) / 2) % len(prof)
        if self.verbose:
            print "Profile maximum at bin %d. Rotating by %d bins." % (
                imax, self.nrot)
        self.prof = np.asarray(psr_utils.rotate(prof, self.nrot))

        self.region_start = None
        self.region_start_line = None
        self.regions = []

        # Plot
        self.fig = plt.gcf()
        self.ax = plt.gca()
        plt.plot(self.prof, 'k-', drawstyle='steps-post')

        # Set up triggers
        self.cid_mouseclick = self.fig.canvas.mpl_connect('button_press_event', \
                                                            self.mousepress)
        self.cid_pick = self.fig.canvas.mpl_connect('pick_event', self.onpick)
        self.cid_keypress = self.fig.canvas.mpl_connect('key_press_event', \
                                                            self.keypress)
Example #10
0
 def estimate_offsignal_redchi2(self, numtrials=20):
     """
     estimate_offsignal_redchi2():
         Estimate the reduced-chi^2 off of the signal based on randomly shifting
             and summing all of the component profiles.
     """
     redchi2s = []
     for count in range(numtrials):
         prof = Num.zeros(self.proflen, dtype='d')
         for ii in range(self.npart):
             for jj in range(self.nsub):
                 tmpprof = copy.copy(self.profs[ii][jj])
                 prof += psr_utils.rotate(tmpprof, random.randrange(0,self.proflen))
         redchi2s.append(self.calc_redchi2(prof=prof))
     return Num.mean(redchi2s)
Example #11
0
 def estimate_offsignal_redchi2(self, numtrials=20):
     """
     estimate_offsignal_redchi2():
         Estimate the reduced-chi^2 off of the signal based on randomly shifting
             and summing all of the component profiles.
     """
     redchi2s = []
     for count in range(numtrials):
         prof = Num.zeros(self.proflen, dtype='d')
         for ii in range(self.npart):
             for jj in range(self.nsub):
                 tmpprof = copy.copy(self.profs[ii][jj])
                 prof += psr_utils.rotate(tmpprof, random.randrange(0,self.proflen))
         redchi2s.append(self.calc_redchi2(prof=prof))
     return Num.mean(redchi2s)
Example #12
0
    def __init__(self, pfd, sefd=None, verbose=True):
        """Return an observation object for the given pfd file.
    
        Inputs: 
            pfd: a pfd object
            sefd: the system-equivalent flux density of the observation (in Jy)
            verbose: if True, print extra information (Default: True)

        Output:
            obs: The Observation object            
        """
        self.sefd = sefd
        self.p = pfd 
        self.fn = self.p.pfd_filename
        self.snr = None
        self.smean = None
        self.verbose = verbose
        self.notes = []
        
        self.p.dedisperse(doppler=True)
        self.p.adjust_period()
        if self.p.bestprof:
            prof = self.p.bestprof.profile
        else:
            prof = self.p.sumprof
        self.proflen = len(prof)
        self.nbin = len(prof)
        imax = np.argmax(prof)
        self.nrot = (imax-len(prof)/2) % len(prof)
        if self.verbose:
            print "Profile maximum at bin %d. Rotating by %d bins." % (imax, self.nrot)
        self.prof = np.asarray(psr_utils.rotate(prof, self.nrot))
        
        self.region_start = None
        self.region_start_line = None
        self.regions = []
        
        # Plot
        self.fig = plt.gcf()
        self.ax = plt.gca()
        plt.plot(self.prof, 'k-', drawstyle='steps-post')
        
        # Set up triggers
        self.cid_mouseclick = self.fig.canvas.mpl_connect('button_press_event', \
                                                            self.mousepress)
        self.cid_pick = self.fig.canvas.mpl_connect('pick_event', self.onpick)
        self.cid_keypress = self.fig.canvas.mpl_connect('key_press_event', \
                                                            self.keypress)
Example #13
0
 def getDMcurve(
         M):  # return the normalized DM curve downsampled to M points
     feature = '%s:%s' % ('DMbins', M)
     if M == 0:
         return np.array([])
     if not feature in self.extracted_feature:
         ddm = (self.dms.max() - self.dms.min()) / 2.
         loDM, hiDM = (self.bestdm - ddm, self.bestdm + ddm)
         loDM = max((0, loDM))  #make sure cut off at 0 DM
         hiDM = max((ddm, hiDM))  #make sure cut off at 0 DM
         N = 100
         interp = False
         sumprofs = self.profs.sum(0)
         if not interp:
             profs = sumprofs
         else:
             profs = np.zeros(np.shape(sumprofs), dtype='d')
         DMs = psr_utils.span(loDM, hiDM, N)
         chis = np.zeros(N, dtype='f')
         subdelays_bins = self.subdelays_bins.copy()
         for ii, DM in enumerate(DMs):
             subdelays = psr_utils.delay_from_DM(DM, self.barysubfreqs)
             hifreqdelay = subdelays[-1]
             subdelays = subdelays - hifreqdelay
             delaybins = subdelays * self.binspersec - subdelays_bins
             if interp:
                 interp_factor = 16
                 for jj in range(self.nsub):
                     profs[jj] = psr_utils.interp_rotate(
                         sumprofs[jj],
                         delaybins[jj],
                         zoomfact=interp_factor)
                 # Note: Since the interpolation process slightly changes the values of the
                 # profs, we need to re-calculate the average profile value
                 avgprof = (profs / self.proflen).sum()
             else:
                 new_subdelays_bins = np.floor(delaybins + 0.5)
                 for jj in range(self.nsub):
                     profs[jj] = psr_utils.rotate(
                         profs[jj], int(new_subdelays_bins[jj]))
                 subdelays_bins += new_subdelays_bins
                 avgprof = self.avgprof
             sumprof = profs.sum(0)
             chis[ii] = self.calc_redchi2(prof=sumprof, avg=avgprof)
         DMcurve = normalize(downsample(chis, M))
         self.extracted_feature[feature] = DMcurve
     return self.extracted_feature[feature]
 def plot_chi2_vs_DM(self, loDM, hiDM, N=100, interp=0, device='/xwin'):
     """
     plot_chi2_vs_DM(self, loDM, hiDM, N=100, interp=0, device='/xwin'):
         Plot (and return) an array showing the reduced-chi^2 versus
             DM (N DMs spanning loDM-hiDM).  Use sinc_interpolation
             if 'interp' is non-zero.
     """
     # Sum the profiles in time
     sumprofs = self.profs.sum(0)
     if not interp:
         profs = sumprofs
     else:
         profs = Num.zeros(Num.shape(sumprofs), dtype='d')
     DMs = psr_utils.span(loDM, hiDM, N)
     chis = Num.zeros(N, dtype='f')
     subdelays_bins = self.subdelays_bins.copy()
     for ii, DM in enumerate(DMs):
         subdelays = psr_utils.delay_from_DM(DM, self.barysubfreqs)
         hifreqdelay = subdelays[-1]
         subdelays = subdelays - hifreqdelay
         delaybins = subdelays * self.binspersec - subdelays_bins
         if interp:
             interp_factor = 16
             for jj in range(self.nsub):
                 profs[jj] = psr_utils.interp_rotate(sumprofs[jj],
                                                     delaybins[jj],
                                                     zoomfact=interp_factor)
             # Note: Since the interpolation process slightly changes the values of the
             # profs, we need to re-calculate the average profile value
             avgprof = (profs / self.proflen).sum()
         else:
             new_subdelays_bins = Num.floor(delaybins + 0.5)
             for jj in range(self.nsub):
                 profs[jj] = psr_utils.rotate(profs[jj],
                                              int(new_subdelays_bins[jj]))
             subdelays_bins += new_subdelays_bins
             avgprof = self.avgprof
         sumprof = profs.sum(0)
         chis[ii] = self.calc_redchi2(prof=sumprof, avg=avgprof)
     # Now plot it
     Pgplot.plotxy(chis,
                   DMs,
                   labx="DM",
                   laby="Reduced-\gx\u2\d",
                   device=device)
     return (chis, DMs)
Example #15
0
    def dedisperse(self, DM):
        """
        dedisperse(DM=self.bestdm, interp=0, doppler=0):
            Rotate (internally) the profiles so that they are de-dispersed
                at a dispersion measure of DM.
        """
        new_subdelays_bins = self.get_delaybins(DM) - \
                                self.subdelays_bins

        #print "DEBUG: in dataproducts -- DM, self.curr_dm, new_subdelays_bins:", DM, self.curr_dm, new_subdelays_bins
        #print "DEBUG: in dataproducts -- DM, self.get_delaybins(self.curr_dm)-self.subdelays_bins:", DM, self.curr_dm, self.get_delaybins(self.curr_dm)-self.subdelays_bins
        for ii in range(self.nchan):
            tmp_prof = self.data[ii, :]
            self.data[ii,:] = psr_utils.rotate(tmp_prof, \
                                            new_subdelays_bins[ii])
        self.curr_dm = DM
        self.subdelays_bins += new_subdelays_bins
Example #16
0
 def dedisperse(self, DM):
     """
     dedisperse(DM=self.bestdm, interp=0, doppler=0):
         Rotate (internally) the profiles so that they are de-dispersed
             at a dispersion measure of DM.
     """
     new_subdelays_bins = self.get_delaybins(DM) - \
                             self.subdelays_bins
     
     #print "DEBUG: in dataproducts -- DM, self.curr_dm, new_subdelays_bins:", DM, self.curr_dm, new_subdelays_bins
     #print "DEBUG: in dataproducts -- DM, self.get_delaybins(self.curr_dm)-self.subdelays_bins:", DM, self.curr_dm, self.get_delaybins(self.curr_dm)-self.subdelays_bins
     for ii in range(self.nchan):
         tmp_prof = self.data[ii,:]
         self.data[ii,:] = psr_utils.rotate(tmp_prof, \
                                         new_subdelays_bins[ii])
     self.curr_dm = DM
     self.subdelays_bins += new_subdelays_bins
Example #17
0
 def getDMcurve(M): # return the normalized DM curve downsampled to M points
     feature = '%s:%s' % ('DMbins', M)
     if M == 0:
         return np.array([])
     if not feature in self.extracted_feature:
         ddm = (self.dms.max() - self.dms.min())/2.
         loDM, hiDM = (self.bestdm - ddm , self.bestdm + ddm)
         loDM = max((0, loDM)) #make sure cut off at 0 DM
         hiDM = max((ddm, hiDM)) #make sure cut off at 0 DM
         N = 100
         interp = False
         sumprofs = self.profs.sum(0)
         if not interp:
             profs = sumprofs
         else:
             profs = np.zeros(np.shape(sumprofs), dtype='d')
         DMs = psr_utils.span(loDM, hiDM, N)
         chis = np.zeros(N, dtype='f')
         subdelays_bins = self.subdelays_bins.copy()
         for ii, DM in enumerate(DMs):
             subdelays = psr_utils.delay_from_DM(DM, self.barysubfreqs)
             hifreqdelay = subdelays[-1]
             subdelays = subdelays - hifreqdelay
             delaybins = subdelays*self.binspersec - subdelays_bins
             if interp:
                 interp_factor = 16
                 for jj in range(self.nsub):
                     profs[jj] = psr_utils.interp_rotate(sumprofs[jj], delaybins[jj],
                                                         zoomfact=interp_factor)
                 # Note: Since the interpolation process slightly changes the values of the
                 # profs, we need to re-calculate the average profile value
                 avgprof = (profs/self.proflen).sum()
             else:
                 new_subdelays_bins = np.floor(delaybins+0.5)
                 for jj in range(self.nsub):
                     profs[jj] = psr_utils.rotate(profs[jj], int(new_subdelays_bins[jj]))
                 subdelays_bins += new_subdelays_bins
                 avgprof = self.avgprof
             sumprof = profs.sum(0)
             chis[ii] = self.calc_redchi2(prof=sumprof, avg=avgprof)
         DMcurve = normalize(downsample(chis, M))
         self.extracted_feature[feature] = DMcurve
     return self.extracted_feature[feature]
Example #18
0
    def shift_channels(self, bins, padval=0):
        """Shift each channel to the left by the corresponding 
            value in bins, an array.

            Inputs:
                bins: An array containing the number of bins
                    to shift each channel by.
                padval: Value to use when shifting near the edge
                    of a channel. This can be a numeric value,
                    'median', 'mean', or 'rotate'. 
                    
                    The values 'median' and 'mean' refer to the 
                    median and mean of the channel. The value 
                    'rotate' takes values from one end of the 
                    channel and shifts them to the other.

            Outputs:
                None

            *** Shifting happens in-place ***
        """
        assert self.numchans == len(bins)
        for ii in range(self.numchans):
            chan = self.get_chan(ii)
            # Use 'chan[:]' so update happens in-place
            # this way the change effects self.data
            chan[:] = psr_utils.rotate(chan, bins[ii])
            if padval != 'rotate':
                # Get padding value
                if padval == 'mean':
                    pad = np.mean(chan)
                elif padval == 'median':
                    pad = np.median(chan)
                else:
                    pad = padval

                # Replace rotated values with padval
                if bins[ii] > 0:
                    chan[-bins[ii]:] = pad
                elif bins[ii] < 0:
                    chan[:-bins[ii]] = pad
Example #19
0
    def shift_channels(self, bins, padval=0):
        """Shift each channel to the left by the corresponding 
            value in bins, an array.

            Inputs:
                bins: An array containing the number of bins
                    to shift each channel by.
                padval: Value to use when shifting near the edge
                    of a channel. This can be a numeric value,
                    'median', 'mean', or 'rotate'. 
                    
                    The values 'median' and 'mean' refer to the 
                    median and mean of the channel. The value 
                    'rotate' takes values from one end of the 
                    channel and shifts them to the other.

            Outputs:
                None

            *** Shifting happens in-place ***
        """
        assert self.numchans == len(bins)
        for ii in range(self.numchans):
            chan = self.get_chan(ii)
            # Use 'chan[:]' so update happens in-place
            # this way the change effects self.data
            chan[:] = psr_utils.rotate(chan, bins[ii])
            if padval!='rotate':
                # Get padding value
                if padval=='mean':
                    pad = np.mean(chan)
                elif padval=='median':
                    pad = np.median(chan)
                else:
                    pad = padval
                
                # Replace rotated values with padval
                if bins[ii]>0:
                    chan[-bins[ii]:] = pad
                elif bins[ii]<0:
                    chan[:-bins[ii]] = pad
Example #20
0
 def plot_chi2_vs_DM(self, loDM, hiDM, N=100, interp=0, device='/xwin'):
     """
     plot_chi2_vs_DM(self, loDM, hiDM, N=100, interp=0, device='/xwin'):
         Plot (and return) an array showing the reduced-chi^2 versus
             DM (N DMs spanning loDM-hiDM).  Use sinc_interpolation
             if 'interp' is non-zero.
     """
     # Sum the profiles in time
     sumprofs = self.profs.sum(0)
     if not interp:
         profs = sumprofs
     else:
         profs = Num.zeros(Num.shape(sumprofs), dtype='d')
     DMs = psr_utils.span(loDM, hiDM, N)
     chis = Num.zeros(N, dtype='f')
     subdelays_bins = self.subdelays_bins.copy()
     for ii, DM in enumerate(DMs):
         subdelays = psr_utils.delay_from_DM(DM, self.barysubfreqs)
         hifreqdelay = subdelays[-1]
         subdelays = subdelays - hifreqdelay
         delaybins = subdelays*self.binspersec - subdelays_bins
         if interp:
             interp_factor = 16
             for jj in range(self.nsub):
                 profs[jj] = psr_utils.interp_rotate(sumprofs[jj], delaybins[jj],
                                                     zoomfact=interp_factor)
             # Note: Since the interpolation process slightly changes the values of the
             # profs, we need to re-calculate the average profile value
             avgprof = (profs/self.proflen).sum()
         else:
             new_subdelays_bins = Num.floor(delaybins+0.5)
             for jj in range(self.nsub):
                 profs[jj] = psr_utils.rotate(profs[jj], int(new_subdelays_bins[jj]))
             subdelays_bins += new_subdelays_bins
             avgprof = self.avgprof
         sumprof = profs.sum(0)
         chis[ii] = self.calc_redchi2(prof=sumprof, avg=avgprof)
     # Now plot it
     Pgplot.plotxy(chis, DMs, labx="DM", laby="Reduced-\gx\u2\d", device=device)
     return (chis, DMs)
Example #21
0
    def time_vs_phase(self, p=None, pd=None, pdd=None, interp=0):
        """
        time_vs_phase(p=*bestp*, pd=*bestpd*, pdd=*bestpdd*):
            Return the 2D time vs. phase profiles shifted so that
                the given period and period derivative are applied.
                Use FFT-based interpolation if 'interp' is non-zero 
                (NOTE: It is off by default!).

            Dedisperses the datacube, if necessary.
            Other than running self.dedisperse(), the datacube
                is not modified.
        """
        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        f_diff, fd_diff, fdd_diff = self.freq_offsets(p, pd, pdd)
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff, parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = Num.fmod(delays * self.proflen, self.proflen)

        # Rotate subintegrations
        subints = self.combine_profs(self.npart, 1)[:,0,:]
        for ii in range(self.npart):
            tmp_prof = subints[ii,:]
            # Negative sign in num bins to shift because we calculated delays
            # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
            # is shift-to-left
            if interp:
                subints[ii,:] = psr_utils.fft_rotate(tmp_prof, -bin_delays[ii])
            else:
                subints[ii,:] = psr_utils.rotate(tmp_prof, -Num.floor(bin_delays[ii]+0.5))
        return subints
Example #22
0
    def adjust_period(self, p=None, pd=None, pdd=None, interp=0):
        """
        adjust_period(p=*bestp*, pd=*bestpd*, pdd=*bestpdd*):
            Rotate (internally) the profiles so that they are adjusted to
                the given period and period derivatives.  By default,
                use the 'best' values as determined by prepfold's seaqrch.
                This should orient all of the profiles so that they are
                almost identical to what you see in a prepfold plot which
                used searching.  Use FFT-based interpolation if 'interp'
                is non-zero.  (NOTE: It is off by default, as in prepfold!)
        """
        if self.fold_pow == 1.0:
            bestp = self.bary_p1
            bestpd = self.bary_p2
            bestpdd = self.bary_p3
        else:
            bestp = self.topo_p1
            bestpd = self.topo_p2
            bestpdd = self.topo_p3
        if p is None:
            p = bestp
        if pd is None:
            pd = bestpd
        if pdd is None:
            pdd = bestpdd

        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        f_diff, fd_diff, fdd_diff = self.freq_offsets(p, pd, pdd)
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff, parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = Num.fmod(delays * self.proflen, self.proflen) - self.pdelays_bins
        if interp:
            new_pdelays_bins = bin_delays.astype(int)
        else:
            new_pdelays_bins = Num.floor(bin_delays+0.5).astype(int)

        # Rotate subintegrations
        for ii in range(self.nsub):
            for jj in range(self.npart):
                tmp_prof = self.profs[jj,ii,:]
                # Negative sign in num bins to shift because we calculated delays
                # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
                # is shift-to-left
                if interp:
                    self.profs[jj,ii] = psr_utils.fft_rotate(tmp_prof, -new_pdelays_bins[jj])
                else:
                    self.profs[jj,ii] = psr_utils.rotate(tmp_prof, \
                                            -new_pdelays_bins[jj])
        self.pdelays_bins += new_pdelays_bins
        if interp:
            # Note: Since the rotation process slightly changes the values of the
            # profs, we need to re-calculate the average profile value
            self.avgprof = (self.profs/self.proflen).sum()

        self.sumprof = self.profs.sum(0).sum(0)
        if Num.fabs((self.sumprof/self.proflen).sum() - self.avgprof) > 1.0:
            print "self.avgprof is not the correct value!"

        # Save current p, pd, pdd
        self.curr_p1, self.curr_p2, self.curr_p3 = p, pd, pdd
Example #23
0
    sumprof *= Num.sqrt(current_pfd.DOF_corr()) / offpulse.std()
    print("\nSummed profile approx SNR = %.3f" % sum(sumprof))
    if SEFD:
        avg_S /= len(pfdfilenms)
        if pulsebins is None:
            SNR = sumprof.sum()  # integrate everything
        else:
            SNR = sumprof[pulsebins].sum()
        S = SEFD * SNR / Num.sqrt(2.0 * BW * Tpostrfi / numbins) / numbins
        print("     Approx sum profile flux density = %.3f mJy" % S)
        print("    Avg of individual flux densities = %.3f mJy" % avg_S)
        print("     Total (RFI cleaned) integration = %.0f s (%.2f hrs)" % \
              (Tpostrfi, Tpostrfi/3600.0))

    # Rotate the summed profile so that the max value is at the phase ~ 0.25 mark
    sumprof = psr_utils.rotate(sumprof, -len(sumprof) / 4)
    Pgplot.plotxy(sumprof,
                  Num.arange(numbins),
                  labx="Pulse Phase",
                  laby="Relative Flux")
    Pgplot.closeplot()

    print("\n Writing profile to '%s'..." % (outfilenm), end=' ')

    outfile = file(outfilenm, "w")
    for ii, val in enumerate(sumprof):
        outfile.write("%04d  %20.15g\n" % (ii, val))
    outfile.close()

    print("Done\n")
Example #24
0
def Phase_Wiggle_Ratings(pfd, method="GOODFRAC"):
    """
    Calculate a metric for the phase wiggle in time and frequency.

    Parameters
    ----------
    pfd : class
        An instance of the prepfold.pfd class
    method : string
        The method to use to calculate the phase wiggle metrics
        \"GOODFRAC\" - The fraction of sub-intervals/sub-bands with a wiggle
            less than some threshold
        \"WANDER\" - The total phase wander normalized by the number of
            profile bins and the number of sub-intervals/sub-bands

    Returns
    -------
    names : list
        A list of ratings names
    ratings : list
        A list of ratings values
    """
    # The ratings names
    name1 = "Phase_Wiggle_Time"
    name2 = "Phase_Wiggle_Freq"
    
    # De-disperse the profile at the best DM
    pfd.dedisperse(DM=pfd.bestdm, doppler=1)
    # Internally rotate the data cube so that is aligned with best fold values
    pfd.adjust_period()
    # Get the subints.
    subints      = N.sum(pfd.profs, axis=1)
    # Get the subbands.
    subbands     = N.sum(pfd.profs, axis=0)
    # Get the template ("best") profile
    template     = N.sum(subints, axis=0)
    # Make an array for storing a new template
    new_template = N.zeros_like(template)
    # Make an arrary for storing the offsets (in phase bins)
    bin_offsets_time  = N.empty(pfd.npart)
    bin_offsets_freq  = N.empty(pfd.npart)
    # The following loop creats a better template by removing wiggle, but
    # it does not change the actual subints
    for ii,subint in enumerate(subints):
        # Measure the phase offset
        phase_offset = PU.measure_phase_corr(subint, template)
        # The following is needed to put phase offsets on the interval
        # (-0.5,0.5]
        if phase_offset > 0.5: phase_offset -= 1.0
        # Caclulate the offset in bins
        bin_offset    = int(round(pfd.proflen*phase_offset))
        # Update the new template
        new_template += PU.rotate(subint, -bin_offset)

    # Now calculate the wiggle using the updated template
    for ii,(subint,subband) in enumerate(zip(subints,subbands)):
        phase_offset_time = PU.measure_phase_corr(subint, new_template)
        if phase_offset_time > 0.5: phase_offset_time -= 1.0
        bin_offsets_time[ii] = int(round(pfd.proflen*phase_offset_time))

        phase_offset_freq = PU.measure_phase_corr(subband, new_template)
        if phase_offset_freq > 0.5: phase_offset_freq -= 1.0
        bin_offsets_freq[ii] = int(round(pfd.proflen*phase_offset_freq))

    # Calcultae the various metrics
    good_fraction_time = sum(abs(bin_offsets_time) < \
                             PHASE_DRIFT_TOLERANCE*pfd.proflen)/ \
                             float(pfd.npart)
    total_wander_time  = sum(abs(bin_offsets_time))/(pfd.proflen*pfd.npart)
    good_fraction_freq = sum(abs(bin_offsets_freq) < \
                             PHASE_DRIFT_TOLERANCE*pfd.proflen)/ \
                         float(pfd.npart)
    total_wander_freq  = sum(abs(bin_offsets_freq))/(pfd.proflen*pfd.nsub)

    # Make the appropriate metric the rating
    if method == "GOODFRAC":
        rating1 = good_fraction_time
        rating2 = good_fraction_freq
    
    if method == "WANDER"  :
        rating1 = total_wander_time
        rating2 = total_wander_freq

    return [name1,name2],[rating1,rating2]
Example #25
0
def Phase_Wiggle_Ratings(pfd, method="GOODFRAC"):
    """
    Calculate a metric for the phase wiggle in time and frequency.

    Parameters
    ----------
    pfd : class
        An instance of the prepfold.pfd class
    method : string
        The method to use to calculate the phase wiggle metrics
        \"GOODFRAC\" - The fraction of sub-intervals/sub-bands with a wiggle
            less than some threshold
        \"WANDER\" - The total phase wander normalized by the number of
            profile bins and the number of sub-intervals/sub-bands

    Returns
    -------
    names : list
        A list of ratings names
    ratings : list
        A list of ratings values
    """
    # The ratings names
    name1 = "Phase_Wiggle_Time"
    name2 = "Phase_Wiggle_Freq"

    # De-disperse the profile at the best DM
    pfd.dedisperse(DM=pfd.bestdm, doppler=1)
    # Internally rotate the data cube so that is aligned with best fold values
    pfd.adjust_period()
    # Get the subints.
    subints = N.sum(pfd.profs, axis=1)
    # Get the subbands.
    subbands = N.sum(pfd.profs, axis=0)
    # Get the template ("best") profile
    template = N.sum(subints, axis=0)
    # Make an array for storing a new template
    new_template = N.zeros_like(template)
    # Make an arrary for storing the offsets (in phase bins)
    bin_offsets_time = N.empty(pfd.npart)
    bin_offsets_freq = N.empty(pfd.npart)
    # The following loop creats a better template by removing wiggle, but
    # it does not change the actual subints
    for ii, subint in enumerate(subints):
        # Measure the phase offset
        phase_offset = PU.measure_phase_corr(subint, template)
        # The following is needed to put phase offsets on the interval
        # (-0.5,0.5]
        if phase_offset > 0.5: phase_offset -= 1.0
        # Caclulate the offset in bins
        bin_offset = int(round(pfd.proflen * phase_offset))
        # Update the new template
        new_template += PU.rotate(subint, -bin_offset)

    # Now calculate the wiggle using the updated template
    for ii, (subint, subband) in enumerate(zip(subints, subbands)):
        phase_offset_time = PU.measure_phase_corr(subint, new_template)
        if phase_offset_time > 0.5: phase_offset_time -= 1.0
        bin_offsets_time[ii] = int(round(pfd.proflen * phase_offset_time))

        phase_offset_freq = PU.measure_phase_corr(subband, new_template)
        if phase_offset_freq > 0.5: phase_offset_freq -= 1.0
        bin_offsets_freq[ii] = int(round(pfd.proflen * phase_offset_freq))

    # Calcultae the various metrics
    good_fraction_time = sum(abs(bin_offsets_time) < \
                             PHASE_DRIFT_TOLERANCE*pfd.proflen)/ \
                             float(pfd.npart)
    total_wander_time = sum(abs(bin_offsets_time)) / (pfd.proflen * pfd.npart)
    good_fraction_freq = sum(abs(bin_offsets_freq) < \
                             PHASE_DRIFT_TOLERANCE*pfd.proflen)/ \
                         float(pfd.npart)
    total_wander_freq = sum(abs(bin_offsets_freq)) / (pfd.proflen * pfd.nsub)

    # Make the appropriate metric the rating
    if method == "GOODFRAC":
        rating1 = good_fraction_time
        rating2 = good_fraction_freq

    if method == "WANDER":
        rating1 = total_wander_time
        rating2 = total_wander_freq

    return [name1, name2], [rating1, rating2]
Example #26
0
    def dm_curve_check(self, spec_index=0.):
        # Sum the profiles in time
        profs = self.pfd.profs.sum(0)

        ### Generate simulated profiles ###

        # prof_avg: median profile value per subint per subband
        #  Sum over subint axis to get median per subband
        prof_avg = self.pfd.stats[:, :, 4].sum(0)
        # prof_var: variance of profile per subint per subband
        #  Sum over subint axis to get variance per subband
        prof_var = self.pfd.stats[:, :, 5].sum(0)
        # The standard deviation in each subband is proportional to the median
        # value of that subband.  Here we scale all subbands to equal levels.
        #scaled_vars = prof_var / prof_avg**2
        scaled_vars = np.array(np.abs(prof_var),
                               dtype=np.float64) / prof_avg**2
        scaled_vars[scaled_vars <= 0] = np.random.rand(
            1
        )  ## This is new, to avoir errors when noise calculates <=0 inside the sqrt
        scaled_profs = (profs.T / prof_avg).T - 1.
        # The mean profile (after scaling) will be our "clean" profile--hardly
        # true in most cases, but we pretend it's noiseless for what follows
        scaled_mean_prof = scaled_profs.mean(0)
        # Extend this "clean" profile across the number of subbands
        sim_profs_clean = np.tile(scaled_mean_prof,\
            scaled_profs.shape[0]).reshape(scaled_profs.shape)
        # Scale these subbands according to the input spectral index
        spec_mult = (self.pfd.subfreqs / self.pfd.subfreqs[0])**spec_index
        spec_mult /= spec_mult.mean()
        sim_profs_spec = (sim_profs_clean.T * spec_mult).T
        # For consistency, set a seed for generating noise
        np.random.seed(1967)
        # Add white noise that matches the variance of the real subbands
        # on a subband by subband basis
        noise = np.random.normal(scale=np.sqrt(scaled_vars),
                                 size=scaled_profs.T.shape).T
        # sim_profs_noisy is the simulated equivalent of scaled_profs
        sim_profs_noisy = sim_profs_spec + noise
        # sim_profs_final is the simulated equivalent of profs
        sim_profs = ((sim_profs_noisy + 1.).T * prof_avg).T

        # The rest of this is essentially code from the prepfold.pfd class
        # in which we loop over DM values and see how strong a signal we
        # get by dedispersing at each of these values

        # Go to higher DMs than the original curve to try to better exclude noise
        DMs = np.linspace(self.pfd.dms[0], self.pfd.dms[-1]+4.*\
            (self.pfd.dms[-1]-self.pfd.dms[0]), len(self.pfd.dms))
        chis = np.zeros_like(DMs)
        sim_chis = np.zeros_like(DMs)
        subdelays_bins = self.pfd.subdelays_bins.copy()
        for ii, DM in enumerate(DMs):
            subdelays = psr_utils.delay_from_DM(DM, self.pfd.barysubfreqs)
            hifreqdelay = subdelays[-1]
            subdelays = subdelays - hifreqdelay
            delaybins = subdelays * self.pfd.binspersec - subdelays_bins
            new_subdelays_bins = np.floor(delaybins + 0.5)
            for jj in range(self.pfd.nsub):
                profs[jj] = psr_utils.rotate(profs[jj],
                                             int(new_subdelays_bins[jj]))
                sim_profs[jj] = psr_utils.rotate(sim_profs[jj],\
                    int(new_subdelays_bins[jj]))
            subdelays_bins += new_subdelays_bins
            # The set of reduced chi2s like those in the prepfold plot
            # (should be the same if the same DMs are used)
            chis[ii] = self.pfd.calc_redchi2(prof=profs.sum(0),
                                             avg=self.pfd.avgprof)
            # The same thing but for our "simulated" data
            sim_chis[ii] = self.pfd.calc_redchi2(prof=sim_profs.sum(0),
                                                 avg=self.pfd.avgprof)
        return DMs, chis, sim_chis
Example #27
0
def transform(data, rot, scale, dc):
    nrot = int(np.round(rot * len(data)))
    #print "Rotating model by %d bins" % nrot
    rotated = np.asarray(psr_utils.rotate(data, nrot))
    return rotated * scale + dc
Example #28
0
    sumprof -= Num.median(offpulse)
    sumprof /= rms_correction(offpulse.std(), dt_per_bin)
    print "\nSummed profile approx SNR = %.3f" % sum(sumprof)
    if SEFD:
        avg_S /= len(pfdfilenms)
        if pulsebins is None:
            SNR = sumprof.sum()  # integrate everything
        else:
            SNR = sumprof[pulsebins].sum()
        S = SEFD * SNR / Num.sqrt(2.0 * BW * Tpostrfi / numbins) / numbins
        print "     Approx sum profile flux density = %.3f mJy" % S
        print "    Avg of individual flux densities = %.3f mJy" % avg_S
        print "     Total (RFI cleaned) integration = %.0f s (%.2f hrs)" % \
              (Tpostrfi, Tpostrfi/3600.0)

    # Rotate the summed profile so that the max value is at the phase ~ 0.25 mark
    sumprof = psr_utils.rotate(sumprof, -len(sumprof)/4)
    Pgplot.plotxy(sumprof, Num.arange(numbins),
                  labx="Pulse Phase", laby="Relative Flux")
    Pgplot.closeplot()

    print "\n Writing profile to '%s'..."%(outfilenm),

    outfile = file(outfilenm, "w")
    for ii, val in enumerate(sumprof):
        outfile.write("%04d  %20.15g\n"%(ii, val))
    outfile.close()
    
    print "Done\n"
    
Example #29
0
    def adjust_period(self, p=None, pd=None, pdd=None, interp=0):
        """
        adjust_period(p=*bestp*, pd=*bestpd*, pdd=*bestpdd*):
            Rotate (internally) the profiles so that they are adjusted to
                the given period and period derivatives.  By default,
                use the 'best' values as determined by prepfold's seaqrch.
                This should orient all of the profiles so that they are
                almost identical to what you see in a prepfold plot which
                used searching.  Use FFT-based interpolation if 'interp'
                is non-zero.  (NOTE: It is off by default, as in prepfold!)
        """
        if self.fold_pow == 1.0:
            bestp = self.bary_p1
            bestpd = self.bary_p2
            bestpdd = self.bary_p3
        else:
            bestp = self.topo_p1
            bestpd = self.topo_p2
            bestpdd = self.topo_p3
        if p is None:
            p = bestp
        if pd is None:
            pd = bestpd
        if pdd is None:
            pdd = bestpdd

        # Cast to single precision and back to double precision to
        # emulate prepfold_plot.c, where parttimes is of type "float"
        # but values are upcast to "double" during computations.
        # (surprisingly, it affects the resulting profile occasionally.)
        parttimes = self.start_secs.astype('float32').astype('float64')

        # Get delays
        f_diff, fd_diff, fdd_diff = self.freq_offsets(p, pd, pdd)
        delays = psr_utils.delay_from_foffsets(f_diff, fd_diff, fdd_diff, parttimes)

        # Convert from delays in phase to delays in bins
        bin_delays = Num.fmod(delays * self.proflen, self.proflen) - self.pdelays_bins
        if interp:
            new_pdelays_bins = bin_delays
        else:
            new_pdelays_bins = Num.floor(bin_delays+0.5)

        # Rotate subintegrations
        for ii in range(self.nsub):
            for jj in range(self.npart):
                tmp_prof = self.profs[jj,ii,:]
                # Negative sign in num bins to shift because we calculated delays
                # Assuming +ve is shift-to-right, psr_utils.rotate assumes +ve
                # is shift-to-left
                if interp:
                    self.profs[jj,ii] = psr_utils.fft_rotate(tmp_prof, -new_pdelays_bins[jj])
                else:
                    self.profs[jj,ii] = psr_utils.rotate(tmp_prof, \
                                            -new_pdelays_bins[jj])
        self.pdelays_bins += new_pdelays_bins
        if interp:
            # Note: Since the rotation process slightly changes the values of the
            # profs, we need to re-calculate the average profile value
            self.avgprof = (self.profs/self.proflen).sum()

        self.sumprof = self.profs.sum(0).sum(0)
        if Num.fabs((self.sumprof/self.proflen).sum() - self.avgprof) > 1.0:
            print "self.avgprof is not the correct value!"

        # Save current p, pd, pdd
        self.curr_p1, self.curr_p2, self.curr_p3 = p, pd, pdd
Example #30
0
    def dm_curve_check(self, spec_index=0.0):
        # Sum the profiles in time
        profs = self.pfd.profs.sum(0)

        ### Generate simulated profiles ###

        # prof_avg: median profile value per subint per subband
        #  Sum over subint axis to get median per subband
        prof_avg = self.pfd.stats[:, :, 4].sum(0)
        # prof_var: variance of profile per subint per subband
        #  Sum over subint axis to get variance per subband
        prof_var = self.pfd.stats[:, :, 5].sum(0)
        # The standard deviation in each subband is proportional to the median
        # value of that subband.  Here we scale all subbands to equal levels.
        scaled_vars = prof_var / prof_avg ** 2
        scaled_profs = (profs.T / prof_avg).T - 1.0
        # The mean profile (after scaling) will be our "clean" profile--hardly
        # true in most cases, but we pretend it's noiseless for what follows
        scaled_mean_prof = scaled_profs.mean(0)
        # Extend this "clean" profile across the number of subbands
        sim_profs_clean = np.tile(scaled_mean_prof, scaled_profs.shape[0]).reshape(scaled_profs.shape)
        # Scale these subbands according to the input spectral index
        spec_mult = (self.pfd.subfreqs / self.pfd.subfreqs[0]) ** spec_index
        spec_mult /= spec_mult.mean()
        sim_profs_spec = (sim_profs_clean.T * spec_mult).T
        # For consistency, set a seed for generating noise
        np.random.seed(1967)
        # Add white noise that matches the variance of the real subbands
        # on a subband by subband basis
        noise = np.random.normal(scale=np.sqrt(scaled_vars), size=scaled_profs.T.shape).T
        # sim_profs_noisy is the simulated equivalent of scaled_profs
        sim_profs_noisy = sim_profs_spec + noise
        # sim_profs_final is the simulated equivalent of profs
        sim_profs = ((sim_profs_noisy + 1.0).T * prof_avg).T

        # The rest of this is essentially code from the prepfold.pfd class
        # in which we loop over DM values and see how strong a signal we
        # get by dedispersing at each of these values

        # Go to higher DMs than the original curve to try to better exclude noise
        DMs = np.linspace(
            self.pfd.dms[0], self.pfd.dms[-1] + 4.0 * (self.pfd.dms[-1] - self.pfd.dms[0]), len(self.pfd.dms)
        )
        chis = np.zeros_like(DMs)
        sim_chis = np.zeros_like(DMs)
        subdelays_bins = self.pfd.subdelays_bins.copy()
        for ii, DM in enumerate(DMs):
            subdelays = psr_utils.delay_from_DM(DM, self.pfd.barysubfreqs)
            hifreqdelay = subdelays[-1]
            subdelays = subdelays - hifreqdelay
            delaybins = subdelays * self.pfd.binspersec - subdelays_bins
            new_subdelays_bins = np.floor(delaybins + 0.5)
            for jj in range(self.pfd.nsub):
                profs[jj] = psr_utils.rotate(profs[jj], int(new_subdelays_bins[jj]))
                sim_profs[jj] = psr_utils.rotate(sim_profs[jj], int(new_subdelays_bins[jj]))
            subdelays_bins += new_subdelays_bins
            # The set of reduced chi2s like those in the prepfold plot
            # (should be the same if the same DMs are used)
            chis[ii] = self.pfd.calc_redchi2(prof=profs.sum(0), avg=self.pfd.avgprof)
            # The same thing but for our "simulated" data
            sim_chis[ii] = self.pfd.calc_redchi2(prof=sim_profs.sum(0), avg=self.pfd.avgprof)
        return DMs, chis, sim_chis
Example #31
0
def calc_features_from_pfd(pfd_filepath):

    pfd_data = prepfold.pfd(str(pfd_filepath))

    if pfd_filepath.parent.name == 'positive':
        label = 1
    elif pfd_filepath.parent.name == 'negative':
        label = 0
    else:
        label = -1
        # raise RuntimeError('unable to decide the label of pfd file: {}'.format(
        #     str(pfd_filepath)))

    pfd_data.dedisperse()

    #### As done in: prepfold.pfd.plot_sumprofs
    profile = pfd_data.sumprof
    profile = normalise_1d(profile)
    ####

    profile_mean = np.mean(profile)
    profile_std_dev = np.std(profile)
    profile_skewness = scipy.stats.skew(profile)
    profile_excess_kurtosis = scipy.stats.kurtosis(profile)

    profiles_sum_axis0 = pfd_data.profs.sum(0)

    #### As done in: prepfold.pfd.plot_chi2_vs_DM
    loDM = 0
    hiDM = pfd_data.numdms
    N = pfd_data.numdms
    profs = profiles_sum_axis0.copy()  # = pfd_data.profs.sum(0)
    DMs = psr_utils.span(loDM, hiDM, N)
    chis = np.zeros(N, dtype='f')
    subdelays_bins = pfd_data.subdelays_bins.copy()
    for ii, DM in enumerate(DMs):
        subdelays = psr_utils.delay_from_DM(DM, pfd_data.barysubfreqs)
        hifreqdelay = subdelays[-1]
        subdelays = subdelays - hifreqdelay
        delaybins = subdelays * pfd_data.binspersec - subdelays_bins
        new_subdelays_bins = np.floor(delaybins + 0.5)
        for jj in range(pfd_data.nsub):
            profs[jj] = psr_utils.rotate(profs[jj],
                                         int(new_subdelays_bins[jj]))
        subdelays_bins += new_subdelays_bins
        sumprof = profs.sum(0)
        chis[ii] = pfd_data.calc_redchi2(prof=sumprof, avg=pfd_data.avgprof)
    ####

    best_dm = pfd_data.bestdm

    # crop_radius = 100
    # best_dm_index = np.searchsorted(DMs, best_dm)  # Not accurate, but close.
    # bloated_chis = np.insert(chis, N, np.full(crop_radius, chis[-1]))
    # bloated_chis = np.insert(bloated_chis, 0, np.full(crop_radius, chis[0]))
    # cropped_chis = bloated_chis[ best_dm_index : best_dm_index+2*crop_radius ]
    # chis = cropped_chis

    chis_mean = np.mean(chis)
    chis_std_dev = np.std(chis)
    chis_skewness = scipy.stats.skew(chis)
    chis_excess_kurtosis = scipy.stats.kurtosis(chis)

    #### As done in: prepfold.pfd.plot_intervals
    intervals = pfd_data.profs.sum(1)
    intervals = normalise_2d_rowwise(intervals)
    ####

    #### As done in: prepfold.pfd.plot_subbands
    subbands = profiles_sum_axis0.copy()  # = pfd_data.profs.sum(0)
    subbands = normalise_2d_rowwise(subbands)
    ####

    return (label, profile_mean, profile_std_dev, profile_skewness,
            profile_excess_kurtosis, chis_mean, chis_std_dev, chis_skewness,
            chis_excess_kurtosis, best_dm, profile, intervals, subbands, chis)
Example #32
0
def transform(data, rot, scale, dc):
    nrot = int(np.round(rot*len(data)))
    #print "Rotating model by %d bins" % nrot
    rotated = np.asarray(psr_utils.rotate(data, nrot))
    return rotated*scale + dc