예제 #1
0
파일: lumfun.py 프로젝트: imcgreer/simqso
    def calcBinnedLF(self,Medges,zedges,**kwargs):
        '''
        Calculate binned luminosity function from the stored survey data.

        Parameters
        ----------
        Medges : array defining bin edges in absolute mag
        zedges : array defining bin edges in redshift
        '''
        confinterval = kwargs.get('confinterval','root-n')
        # kind of hacky to access cosmo through m2M... XXX
        dVdzdO = interp_dVdzdO(zedges,self.m2M.cosmo)
        #
        Mbins = Medges[:-1] + np.diff(Medges)/2
        zbins = zedges[:-1] + np.diff(zedges)/2
        lfShape = Mbins.shape + zbins.shape
        # assign data points to bins and trim out-of-bounds objects
        Mi = np.digitize(self.M,Medges) - 1
        zi = np.digitize(self.z,zedges) - 1
        ii = np.where( (Mi>=0) & (Mi<len(Mbins)) &
                       (zi>=0) & (zi<len(zbins)) )[0]
        # do the counting in bins
        lf = self.init_lf_table(Mbins,zbins)
        np.add.at( lf['rawCounts'], (Mi[ii],zi[ii]),                1   )
        np.add.at(    lf['counts'], (Mi[ii],zi[ii]), self.weights[ii]   )
        np.add.at(  lf['countUnc'], (Mi[ii],zi[ii]), self.weights[ii]**2)
        #
        inbounds = self.getinbounds(Medges,zedges)
        lf['filled'][:] = (inbounds==4)
        # calculate bin volumes by integrating dVdM = (dV/dz)dzdM
        #   ... note if there were many redshift bins, could save time
        #       by only calculating dV once for each filled bin within
        #       each redshift slice
        binVol = np.zeros(lfShape)
        for i,j in zip(*np.where(inbounds > 0)):
            Mlim = lambda z: np.clip(self.m_lim-self.m2M(self.m_lim,z),
                                     Medges[i],Medges[i+1])
            binVol[i,j],_ = dblquad(lambda M,z: dVdzdO(z),
                                    zedges[j],zedges[j+1],
                                    lambda z: Medges[i],Mlim)
        # calculate luminosity function from ~ counts/volume
        mask = (lf['rawCounts']==0) | (binVol == 0)
        binVol = np.ma.array(binVol * self.area_srad, mask=mask)
        lf['phi'] = np.ma.divide(lf['counts'],binVol)
        lf['rawPhi'] = np.ma.divide(lf['rawCounts'],binVol)
        # --- only works for the symmetric ones ---
        sighi = ( poisson_conf_interval(lf['countUnc'],
                                            interval=confinterval)[1]
                           - lf['countUnc'] )
        lf['sigPhi'] = np.ma.divide(sighi,binVol)
        return lf
예제 #2
0
def plot_profile(phase, profile, err=None, ax=None):
    """Plot a pulse profile showing some stats.

    If err is None, the profile is assumed in counts and the Poisson confidence
    level is plotted. Otherwise, err is shown as error bars

    Parameters
    ----------
    phase : array-like
        The bins on the x-axis

    profile : array-like
        The pulsed profile

    Other Parameters
    ----------------
    ax : `matplotlib.pyplot.axis` instance
        Axis to plot to. If None, create a new one.

    Returns
    -------
    ax : `matplotlib.pyplot.axis` instance
        Axis where the profile was plotted.
    """
    import matplotlib.pyplot as plt
    if ax is None:
        plt.figure('Pulse profile')
        ax = plt.subplot()
    mean = np.mean(profile)
    if np.all(phase < 1.5):
        phase = np.concatenate((phase, phase + 1))
        profile = np.concatenate((profile, profile))
    ax.plot(phase, profile, drawstyle='steps-mid')
    if err is None:
        err_low, err_high = \
            poisson_conf_interval(mean, interval='frequentist-confidence',
                                  sigma=1)
        ax.axhspan(err_low, err_high, alpha=0.5)
    else:
        err = np.concatenate((err, err))
        ax.errorbar(phase, profile, yerr=err, fmt='none')

    ax.set_ylabel('Counts')
    ax.set_xlabel('Phase')
    return ax
예제 #3
0
def poisson_symmetrical_errors(counts):
    """Optimized version of frequentist symmetrical errors.

    Uses a lookup table in order to limit the calls to poisson_conf_interval

    Parameters
    ----------
    counts : iterable
        An array of Poisson-distributed numbers

    Returns
    -------
    err : numpy.ndarray
        An array of uncertainties associated with the Poisson counts in
        ``counts``

    Examples
    --------
    >>> from astropy.stats import poisson_conf_interval
    >>> counts = np.random.randint(0, 1000, 100)
    >>> # ---- Do it without the lookup table ----
    >>> err_low, err_high = poisson_conf_interval(np.asarray(counts),
    ...                 interval='frequentist-confidence', sigma=1)
    >>> err_low -= np.asarray(counts)
    >>> err_high -= np.asarray(counts)
    >>> err = (np.absolute(err_low) + np.absolute(err_high))/2.0
    >>> # Do it with this function
    >>> err_thisfun = poisson_symmetrical_errors(counts)
    >>> # Test that results are always the same
    >>> assert np.all(err_thisfun == err)
    """
    from astropy.stats import poisson_conf_interval
    counts_int = np.asarray(counts, dtype=np.int64)
    count_values = np.unique(counts_int)
    err_low, err_high = \
        poisson_conf_interval(count_values,
                              interval='frequentist-confidence', sigma=1)
    # calculate approximately symmetric uncertainties
    err_low -= np.asarray(count_values)
    err_high -= np.asarray(count_values)
    err = (np.absolute(err_low) + np.absolute(err_high)) / 2.0

    idxs = np.searchsorted(count_values, counts_int)
    return err[idxs]
예제 #4
0
    def __init__(self, time, counts, err=None, input_counts=True,
                 gti=None, err_dist='poisson', mjdref=0, dt=None):
        """
        Make a light curve object from an array of time stamps and an
        array of counts.

        Parameters
        ----------
        time: iterable
            A list or array of time stamps for a light curve

        counts: iterable, optional, default None
            A list or array of the counts in each bin corresponding to the
            bins defined in `time` (note: use `input_counts=False` to
            input the count range, i.e. counts/second, otherwise use
            counts/bin).

        err: iterable, optional, default None:
            A list or array of the uncertainties in each bin corresponding to
            the bins defined in `time` (note: use `input_counts=False` to
            input the count rage, i.e. counts/second, otherwise use
            counts/bin). If None, we assume the data is poisson distributed
            and calculate the error from the average of the lower and upper 
            1-sigma confidence intervals for the Poissonian distribution with 
            mean equal to `counts`.

        input_counts: bool, optional, default True
            If True, the code assumes that the input data in 'counts'
            is in units of counts/bin. If False, it assumes the data
            in 'counts' is in counts/second.

        gti: 2-d float array, default None
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They are *not* applied to the data by default.
            They will be used by other methods to have an indication of the
            "safe" time intervals to use during analysis.

        err_dist: str, optional, default=None
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            Default makes no assumptions and keep errors equal to zero.

        mjdref: float
            MJD reference (useful in most high-energy mission data)


        Attributes
        ----------
        time: numpy.ndarray
            The array of midpoints of time bins.

        bin_lo:
            The array of lower time stamp of time bins.

        bin_hi:
            The array of higher time stamp of time bins.

        counts: numpy.ndarray
            The counts per bin corresponding to the bins in `time`.

        counts_err: numpy.ndarray
            The uncertainties corresponding to `counts`

        countrate: numpy.ndarray
            The counts per second in each of the bins defined in `time`.

        countrate_err: numpy.ndarray
            The uncertainties corresponding to `countrate`

        meanrate: float
            The mean count rate of the light curve.

        meancounts: float
            The mean counts of the light curve.

        n: int
            The number of data points in the light curve.

        dt: float
            The time resolution of the light curve.

        mjdref: float
            MJD reference date (tstart / 86400 gives the date in MJD at the
            start of the observation)

        tseg: float
            The total duration of the light curve.

        tstart: float
            The start time of the light curve.

        gti: 2-d float array
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They indicate the "safe" time intervals
            to be used during the analysis of the light curve.

        err_dist: string
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            It propagates to Spectrum classes.

        """

        if not np.all(np.isfinite(time)):
            raise ValueError("There are inf or NaN values in "
                             "your time array!")

        if not np.all(np.isfinite(counts)):
            raise ValueError("There are inf or NaN values in "
                             "your counts array!")

        if len(time) != len(counts):

            raise StingrayError("time and counts array are not "
                                "of the same length!")

        if len(time) <= 1:
            raise StingrayError("A single or no data points can not create "
                                "a lightcurve!")

        if err is not None:
            if not np.all(np.isfinite(err)):
                raise ValueError("There are inf or NaN values in "
                                 "your err array")
        else:
            if err_dist.lower() not in valid_statistics:
                # err_dist set can be increased with other statistics
                raise StingrayError("Statistic not recognized."
                                    "Please select one of these: ",
                                    "{}".format(valid_statistics))
            if err_dist.lower() == 'poisson':
                # Instead of the simple square root, we use confidence
                # intervals (should be valid for low fluxes too)
                err_low, err_high = poisson_conf_interval(np.asarray(counts),
                    interval='frequentist-confidence', sigma=1)
                # calculate approximately symmetric uncertainties
                err_low -= np.asarray(counts)
                err_high -= np.asarray(counts)
                err = (np.absolute(err_low) + np.absolute(err_high))/2.0
                # other estimators can be implemented for other statistics
            else:
                simon("Stingray only uses poisson err_dist at the moment, "
                      "We are setting your errors to zero. "
                      "Sorry for the inconvenience.")
                err = np.zeros_like(counts)

        self.mjdref = mjdref
        self.time = np.asarray(time)
        if dt is None:
            self.dt = np.median(self.time[1:] - self.time[:-1])
        else:
            self.dt = dt

        self.bin_lo = self.time - 0.5 * self.dt
        self.bin_hi = self.time + 0.5 * self.dt

        self.err_dist = err_dist

        self.tstart = self.time[0] - 0.5*self.dt
        self.tseg = self.time[-1] - self.time[0] + self.dt

        self.gti = \
            np.asarray(assign_value_if_none(gti,
                                            [[self.tstart,
                                              self.tstart + self.tseg]]))
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti)

        self.time = self.time[good]
        if input_counts:
            self.counts = np.asarray(counts)[good]
            self.countrate = self.counts / self.dt
            self.counts_err = np.asarray(err)[good]
            self.countrate_err = np.asarray(err)[good] / self.dt
        else:
            self.countrate = np.asarray(counts)[good]
            self.counts = self.countrate * self.dt
            self.counts_err = np.asarray(err)[good] * self.dt
            self.countrate_err = np.asarray(err)[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]

        # Issue a warning if the input time iterable isn't regularly spaced,
        # i.e. the bin sizes aren't equal throughout.
        dt_array = np.diff(self.time)
        if not (np.allclose(dt_array, np.repeat(self.dt, dt_array.shape[0]))):
            simon("Bin sizes in input time array aren't equal throughout! "
                  "This could cause problems with Fourier transforms. "
                  "Please make the input time evenly sampled.")
예제 #5
0
def plot_folding(fnames, figname=None, xlog=None, ylog=None,
                 output_data_file=None):
    from .fold import z2_n_detection_level
    from .io import find_file_in_allowed_paths
    from stingray.pulse.pulsar import fold_detection_level
    from matplotlib import gridspec
    import matplotlib.pyplot as plt

    if is_string(fnames):
        fnames = [fnames]

    for fname in fnames:
        ef = load_folding(fname)

        if not hasattr(ef, 'M') or ef.M is None:
            ef.M = 1

        if ef.kind == "Z2n":
            vmin = ef.N - 1
            vmax = z2_n_detection_level(0.001, n=ef.N,
                                        ntrial=max(ef.stat.shape),
                                        n_summed_spectra=ef.M)
            nbin = ef.N * 8
        else:
            vmin = ef.nbin
            vmax = fold_detection_level(ef.nbin, 0.001,
                                        ntrial=max(ef.stat.shape))
            nbin = ef.nbin

        if len(ef.stat.shape) > 1 and ef.stat.shape[0] > 1:
            idx = ef.stat.argmax()
            # ix, iy = np.unravel_index(np.argmax(ef.stat, axis=None),
            #                           ef.stat.shape)
            f, fdot = ef.freq.flatten()[idx], ef.fdots.flatten()[idx]
            df = np.min(np.diff(ef.freq[0]))
            dfdot = np.min(np.diff(ef.fdots[:, 0]))
        elif len(ef.stat.shape) == 1:
            f = ef.freq[ef.stat.argmax()]
            df = np.min(np.diff(ef.freq))

            fdot = 0
            dfdot = 1
        else:
            raise ValueError("Did not understand stats shape.")

        plt.figure(fname, figsize=(10, 10))

        if hasattr(ef, "filename") and ef.filename is not None and \
                os.path.exists(ef.filename):
            external_gs = gridspec.GridSpec(2, 1)
            search_gs_no = 1

            events = load_events(ef.filename)

            if hasattr(ef, "parfile") and ef.parfile is not None:
                root = os.path.split(fname)[0]
                parfile = find_file_in_allowed_paths(ef.parfile,
                                                     ['.', root])
                if not parfile:
                    warnings.warn("{} does not exist".format(ef.parfile))
                else:
                    ef.parfile = parfile

                if parfile and os.path.exists(ef.parfile):
                    events = deorbit_events(events, ef.parfile)

            phase, profile, profile_err = \
                fold_events(copy.deepcopy(events.time), f, fdot,
                            ref_time=events.gti[0, 0],
                            gtis=copy.deepcopy(events.gti),
                            expocorr=False, nbin=nbin)

            ax = plt.subplot(external_gs[0])

            ax.text(0.1, 0.9, "Profile for F0={} Hz, F1={} Hz/s".format(
                round(f, -np.int(np.floor(np.log10(np.abs(df))))),
                round(fdot, -np.int(np.floor(np.log10(np.abs(dfdot)))))),
                    horizontalalignment='left', verticalalignment = 'center',
                    transform = ax.transAxes)
            ax.plot(np.concatenate((phase, phase + 1)),
                    np.concatenate((profile, profile)), drawstyle='steps-mid')

            mean = np.mean(profile)

            low, high = \
                poisson_conf_interval(mean,
                                      interval='frequentist-confidence',
                                      sigma=1)

            ax.axhline(mean)
            ax.fill_between([0, 2], [low, low], [high, high],
                            label=r"1-$\sigma c.l.$", alpha=0.5)
            low, high = \
                poisson_conf_interval(mean,
                                      interval='frequentist-confidence',
                                      sigma=3)
            ax.fill_between([0, 2], [low, low], [high, high],
                            label=r"3-$\sigma c.l.$", alpha=0.5)
            ax.set_xlabel("Phase")
            ax.set_ylabel("Counts")
            ax.set_xlim([0, 2])
            ax.legend()
            phascommand = "HENphaseogram -f {} --fdot {} {}".format(f, fdot,
                                                                    ef.filename)
            if ef.parfile and os.path.exists(ef.parfile):
                phascommand += " --deorbit-par {}".format(parfile)

            print("To see the detailed phaseogram, "
                  "run {}".format(phascommand))

        elif not os.path.exists(ef.filename):
            warnings.warn(ef.filename + " does not exist")
            external_gs = gridspec.GridSpec(1, 1)
            search_gs_no = 0
        else:
            external_gs = gridspec.GridSpec(1, 1)
            search_gs_no = 0


        if len(ef.stat.shape) > 1 and ef.stat.shape[0] > 1:

            gs = gridspec.GridSpecFromSubplotSpec(
                2, 2, height_ratios=(1, 3), width_ratios=(3, 1),
                hspace=0, wspace=0, subplot_spec=external_gs[search_gs_no])

            axf = plt.subplot(gs[0, 0])
            axfdot = plt.subplot(gs[1, 1])
            if vmax is not None:
                axf.axhline(vmax, ls="--", label="99.9\% c.l.")
                axfdot.axvline(vmax)
            axffdot = plt.subplot(gs[1, 0], sharex=axf, sharey=axfdot)
            axffdot.pcolormesh(ef.freq, np.asarray(ef.fdots), ef.stat,
                               vmin=vmin, vmax=vmax)
            maximum_idx = 0
            maximum = 0
            for ix in range(ef.stat.shape[0]):
                axf.plot(ef.freq[ix, :], ef.stat[ix, :], alpha=0.5, lw=0.2,
                         color='k')
                if np.max(ef.stat[ix, :]) > maximum:
                    maximum = np.max(ef.stat[ix, :])
                    maximum_idx = ix
            if vmax is not None and maximum_idx > 0:
                axf.plot(ef.freq[maximum_idx, :], ef.stat[maximum_idx, :],
                         lw=1, color='k')
            maximum_idx = -1
            maximum = 0
            for iy in range(ef.stat.shape[1]):
                axfdot.plot(ef.stat[:, iy], np.asarray(ef.fdots)[:, iy],
                            alpha=0.5, lw=0.2, color='k')
                if np.max(ef.stat[:, iy]) > maximum:
                    maximum = np.max(ef.stat[:, iy])
                    maximum_idx = iy
            if vmax is not None and maximum_idx > 0:
                axfdot.plot(ef.stat[:, maximum_idx],
                            np.asarray(ef.fdots)[:, maximum_idx],
                            lw=1, color='k')
            axf.set_ylabel(r"Stat")
            axfdot.set_xlabel(r"Stat")

            # plt.colorbar()
            axffdot.set_xlabel('Frequency (Hz)')
            axffdot.set_ylabel('Fdot (Hz/s)')
            axffdot.set_xlim([np.min(ef.freq), np.max(ef.freq)])
            axffdot.set_ylim([np.min(ef.fdots), np.max(ef.fdots)])
            axf.legend()
        else:
            axf = plt.subplot(external_gs[search_gs_no])
            axf.plot(ef.freq, ef.stat, drawstyle='steps-mid', label=fname)
            axf.set_xlabel('Frequency (Hz)')
            axf.set_ylabel(ef.kind + ' stat')
            axf.legend()


        if hasattr(ef, 'best_fits') and ef.best_fits is not None and \
                not len(ef.stat.shape) > 1:

            for f in ef.best_fits:
                xs = np.linspace(np.min(ef.freq), np.max(ef.freq),
                                 len(ef.freq)*2)
                plt.plot(xs, f(xs))

        if output_data_file is not None:
            fdots = ef.fdots
            if not isinstance(fdots, collections.Iterable) or len(fdots) == 1:
                fdots = fdots + np.zeros_like(ef.freq.flatten())
            # print(fdots.shape, ef.freq.shape, ef.stat.shape)
            out = [ef.freq.flatten(), fdots.flatten(), ef.stat.flatten()]
            out_err = [None, None, None]

            if hasattr(ef, 'best_fits') and ef.best_fits is not None and \
                    not len(ef.stat.shape) > 1:
                for f in ef.best_fits:
                    out.append(f(ef.freq.flatten()))
                    out_err.append(None)

            save_as_qdp(out, out_err, filename=output_data_file, mode='a')

    ax = plt.gca()
    if xlog:
        ax.set_xscale('log', nonposx='clip')
    if ylog:
        ax.set_yscale('log', nonposy='clip')

    if figname is not None:
        plt.savefig(figname)
예제 #6
0
파일: radial.py 프로젝트: twiight/pulsar_2
def plot_azimuth_SMC():
    path = '/Users/baotong/Desktop/period_Tuc/'
    catname1 = path + 'erosita_cat_coord.xlsx'
    catname2 = path + 'xray_properties-592.fits'
    (ra_eR, dec_eR, srcIDlist) = read_erosita_cat(catname1)
    cat2 = fits.open(catname2)
    ra_chand = cat2[1].data['RAdeg']
    dec_chand = cat2[1].data['DEdeg']
    srcIDlist_chand = np.arange(1, len(ra_chand) + 1, 1)
    (dist1, dist2) = dist_center_twocat(catname1, catname2)
    dist1 = dist1.arcmin
    dist2 = dist2.arcmin
    # c3=SkyCoord(ra=13.1583*u.degree,dec=-72.8003*u.degree,distance=62.44*u.kpc)  ##smc coord
    # c2=SkyCoord(ra=ra_center*u.degree,dec=dec_center*u.degree,distance=4.0*u.kpc)
    # c1=SkyCoord(ra=ra_eR*u.degree,dec=dec_eR*u.degree,distance=4.0*u.kpc)
    # c0=SkyCoord(ra=ra_chand*u.degree,dec=dec_chand*u.degree,distance=4.0*u.kpc)
    # dist_chand_smc = c0.separation(c3)
    # dist_eR_smc=c1.separation(c3)
    # dist_ngc104_smc=c2.separation(c3)
    c_center = SkyCoord(ra=ra_center * u.degree, dec=dec_center * u.degree)
    c_smc = SkyCoord(ra=13.1583 * u.degree, dec=-72.8003 * u.degree)
    c_eR = SkyCoord(ra=ra_eR * u.degree, dec=dec_eR * u.degree)
    c_chand = SkyCoord(ra=ra_chand * u.degree, dec=dec_chand * u.degree)

    v_center = np.array([
        c_center.cartesian.x.value, c_center.cartesian.y.value,
        c_center.cartesian.z.value
    ])
    v_smc = np.array([
        c_smc.cartesian.x.value, c_smc.cartesian.y.value,
        c_smc.cartesian.z.value
    ])
    v_eR = np.array([
        c_eR.cartesian.x.value, c_eR.cartesian.y.value, c_eR.cartesian.z.value
    ]).T
    v_chand = np.array([
        c_chand.cartesian.x.value, c_chand.cartesian.y.value,
        c_chand.cartesian.z.value
    ]).T
    v1 = v_eR - v_center
    v2 = v_smc - v_center
    v3 = v_chand - v_center
    include_ang = vg.angle(v1, v2)
    include_ang_chandra = vg.angle(v3, v2)
    # include_ang-=90

    index_eR_1 = np.where((dist1 < 20) & (dist1 > 4))[0]
    index_eR_2 = np.where((dist1 < 40) & (dist1 > 20))[0]
    width = 10
    bins_fi = np.arange(0, 180 + width, width)

    num_in = plt.hist(include_ang[index_eR_1], bins=bins_fi,
                      histtype='step')[0]
    num_out = plt.hist(include_ang[index_eR_2], bins=bins_fi,
                       histtype='step')[0]
    num_chandra = plt.hist(include_ang_chandra, bins=bins_fi,
                           histtype='step')[0]

    plt.close()
    (num_in_err, num_out_err) = get_err_num_poisson(num_in, num_out)
    num_chandra_err = np.array(
        poisson_conf_interval(num_chandra, interval='frequentist-confidence'))
    num_chandra_err[0] = num_chandra - num_chandra_err[0]
    num_chandra_err[1] = num_chandra_err[1] - num_chandra

    for i in range(len(num_in)):
        num_in[i] /= np.pi * (20**2 - 4**2) * width / 180
        num_in_err[0][i] /= np.pi * (20**2 - 4**2) * width / 180
        num_in_err[1][i] /= np.pi * (20**2 - 4**2) * width / 180

    for i in range(len(num_out)):
        num_out[i] /= np.pi * (40**2 - 20**2) * width / 180
        num_out_err[0][i] /= np.pi * (40**2 - 20**2) * width / 180
        num_out_err[1][i] /= np.pi * (40**2 - 20**2) * width / 180

    plt.errorbar(x=bins_fi[:-1] + width / 2,
                 y=num_in,
                 yerr=num_in_err,
                 xerr=np.zeros(len(num_in)) + width / 2,
                 marker='.',
                 linestyle='')
    # plt.errorbar(x=bins_fi[:-1]+width/2,y=num_out,yerr=num_out_err,xerr=np.zeros(len(num_out))+width/2,marker='.',linestyle='')
    # plt.errorbar(x=bins_fi[:-1] + width / 2, y=num_chandra, yerr=num_chandra_err, xerr=np.zeros(len(num_chandra)) + width / 2,
    #              marker='.', linestyle='')

    plt.xlabel('fi (degree)', funcs.font2)
    plt.ylabel('Number of source per arcmin^2', funcs.font2)
    plt.tick_params(labelsize=16)
    plt.semilogy()
    # plt.legend(['in(0-20 arcmin)','out(20-40 arcmin)'])
    plt.show()
예제 #7
0
    def __init__(self, time, counts, err=None, input_counts=True,
                 gti=None, err_dist='poisson', mjdref=0, dt=None):
        """
        Make a light curve object from an array of time stamps and an
        array of counts.

        Parameters
        ----------
        time: iterable
            A list or array of time stamps for a light curve

        counts: iterable, optional, default None
            A list or array of the counts in each bin corresponding to the
            bins defined in `time` (note: use `input_counts=False` to
            input the count range, i.e. counts/second, otherwise use
            counts/bin).

        err: iterable, optional, default None:
            A list or array of the uncertainties in each bin corresponding to
            the bins defined in `time` (note: use `input_counts=False` to
            input the count rage, i.e. counts/second, otherwise use
            counts/bin). If None, we assume the data is poisson distributed
            and calculate the error from the average of the lower and upper 
            1-sigma confidence intervals for the Poissonian distribution with 
            mean equal to `counts`.

        input_counts: bool, optional, default True
            If True, the code assumes that the input data in 'counts'
            is in units of counts/bin. If False, it assumes the data
            in 'counts' is in counts/second.

        gti: 2-d float array, default None
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They are *not* applied to the data by default.
            They will be used by other methods to have an indication of the
            "safe" time intervals to use during analysis.

        err_dist: str, optional, default=None
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            Default makes no assumptions and keep errors equal to zero.

        mjdref: float
            MJD reference (useful in most high-energy mission data)


        Attributes
        ----------
        time: numpy.ndarray
            The array of midpoints of time bins.

        bin_lo:
            The array of lower time stamp of time bins.

        bin_hi:
            The array of higher time stamp of time bins.

        counts: numpy.ndarray
            The counts per bin corresponding to the bins in `time`.

        counts_err: numpy.ndarray
            The uncertainties corresponding to `counts`

        countrate: numpy.ndarray
            The counts per second in each of the bins defined in `time`.

        countrate_err: numpy.ndarray
            The uncertainties corresponding to `countrate`

        meanrate: float
            The mean count rate of the light curve.

        meancounts: float
            The mean counts of the light curve.

        n: int
            The number of data points in the light curve.

        dt: float
            The time resolution of the light curve.

        mjdref: float
            MJD reference date (tstart / 86400 gives the date in MJD at the
            start of the observation)

        tseg: float
            The total duration of the light curve.

        tstart: float
            The start time of the light curve.

        gti: 2-d float array
            [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
            Good Time Intervals. They indicate the "safe" time intervals
            to be used during the analysis of the light curve.

        err_dist: string
            Statistic of the Lightcurve, it is used to calculate the
            uncertainties and other statistical values apropriately.
            It propagates to Spectrum classes.

        """

        if not np.all(np.isfinite(time)):
            raise ValueError("There are inf or NaN values in "
                             "your time array!")

        if not np.all(np.isfinite(counts)):
            raise ValueError("There are inf or NaN values in "
                             "your counts array!")

        if len(time) != len(counts):

            raise StingrayError("time and counts array are not "
                                "of the same length!")

        if len(time) <= 1:
            raise StingrayError("A single or no data points can not create "
                                "a lightcurve!")

        if err is not None:
            if not np.all(np.isfinite(err)):
                raise ValueError("There are inf or NaN values in "
                                 "your err array")
        else:
            if err_dist.lower() not in valid_statistics:
                # err_dist set can be increased with other statistics
                raise StingrayError("Statistic not recognized."
                                    "Please select one of these: ",
                                    "{}".format(valid_statistics))
            if err_dist.lower() == 'poisson':
                # Instead of the simple square root, we use confidence
                # intervals (should be valid for low fluxes too)
                err_low, err_high = poisson_conf_interval(np.asarray(counts),
                    interval='frequentist-confidence', sigma=1)
                # calculate approximately symmetric uncertainties
                err = (np.absolute(err_low) + np.absolute(err_high) -
                       2 * np.asarray(counts))/2.0
                # other estimators can be implemented for other statistics
            else:
                simon("Stingray only uses poisson err_dist at the moment, "
                      "We are setting your errors to zero. "
                      "Sorry for the inconvenience.")
                err = np.zeros_like(counts)

        self.mjdref = mjdref
        self.time = np.asarray(time)
        if dt is None:
            self.dt = np.median(self.time[1:] - self.time[:-1])
        else:
            self.dt = dt

        self.bin_lo = self.time - 0.5 * self.dt
        self.bin_hi = self.time + 0.5 * self.dt

        self.err_dist = err_dist

        self.tstart = self.time[0] - 0.5*self.dt
        self.tseg = self.time[-1] - self.time[0] + self.dt

        self.gti = \
            np.asarray(assign_value_if_none(gti,
                                            [[self.tstart,
                                              self.tstart + self.tseg]]))
        check_gtis(self.gti)

        good = create_gti_mask(self.time, self.gti)

        self.time = self.time[good]
        if input_counts:
            self.counts = np.asarray(counts)[good]
            self.countrate = self.counts / self.dt
            self.counts_err = np.asarray(err)[good]
            self.countrate_err = np.asarray(err)[good] / self.dt
        else:
            self.countrate = np.asarray(counts)[good]
            self.counts = self.countrate * self.dt
            self.counts_err = np.asarray(err)[good] * self.dt
            self.countrate_err = np.asarray(err)[good]

        self.meanrate = np.mean(self.countrate)
        self.meancounts = np.mean(self.counts)
        self.n = self.counts.shape[0]

        # Issue a warning if the input time iterable isn't regularly spaced,
        # i.e. the bin sizes aren't equal throughout.
        dt_array = np.diff(self.time)
        if not (np.allclose(dt_array, np.repeat(self.dt, dt_array.shape[0]))):
            simon("Bin sizes in input time array aren't equal throughout! "
                  "This could cause problems with Fourier transforms. "
                  "Please make the input time evenly sampled.")
예제 #8
0
def phase_fold(time,
               epoch_info,
               p_test,
               outpath,
               bin=20,
               net_percent=0.9,
               shift=0.0,
               label='test',
               text=None,
               save=False,
               show=True):
    turns = trans(time, p_test, shift)
    loc = np.zeros(bin)
    for index in turns:
        loc[int(index * bin)] += 1
    AM = 1 - min(loc) / max(loc)
    A0 = AM / (2 - AM)
    print('A0={0}'.format(A0))
    x = np.array([(i / bin + 0.5 / bin) for i in range(bin)])
    src_bkg = 1 - net_percent
    bkg_y = len(time) * src_bkg / bin
    b_1sigma = poisson_conf_interval(bkg_y,
                                     interval='frequentist-confidence').T
    bkg_y_low = b_1sigma[0]
    bkg_y_high = b_1sigma[1]
    fig = plt.figure(1, (10, 7.5))
    ax1 = fig.add_subplot(111)
    bkg_x = [0, 2]
    plt.fill_between(bkg_x,
                     bkg_y_low,
                     bkg_y_high,
                     facecolor='green',
                     alpha=0.5)
    x2 = np.concatenate((x, x + 1))
    y2 = np.concatenate((loc, loc))
    T_in_perbin = funcs.get_T_in_mbins(epoch_info, 2 * np.pi / p_test, bin,
                                       shift * 2 * np.pi)

    correct_gap = T_in_perbin / (sum(T_in_perbin) / len(T_in_perbin))
    print('correct_gap=', correct_gap)
    correct_gap = correct_gap + 1e-5
    y2 /= np.concatenate((correct_gap, correct_gap))
    y2_err = np.array(
        poisson_conf_interval(y2, interval='frequentist-confidence'))
    y2_err[0] = y2 - y2_err[0]
    y2_err[1] = y2_err[1] - y2

    # plt.title("#{0} P={1:.2f},C={2}".format(label, p_test, str(len(time))), fontsize=18)
    plt.xlabel('Phase', font1)
    plt.ylabel('Counts/bin', font1)
    plt.tick_params(labelsize=18)
    plt.ylim(0, (np.max(y2) + np.max(y2)**0.5) * 1.05)
    plt.step(np.concatenate(([0], x2)),
             np.concatenate(([y2[0]], y2)),
             color='red',
             linewidth=1.5)
    plt.errorbar(x2 - 0.5 / bin,
                 y2,
                 yerr=y2_err,
                 fmt='.',
                 capsize=1,
                 elinewidth=1.5,
                 ecolor='red',
                 linewidth=1.5)
    if text:
        plt.text(0.05,
                 0.95,
                 '{0}, P={1:.2f}s'.format(text, p_test),
                 fontsize=18,
                 fontweight='semibold',
                 transform=ax1.transAxes)
    plt.text(1.6,
             0.03 * np.max(y2),
             'C={0}'.format(str(len(time))),
             fontsize=18)

    ax2 = ax1.twinx()
    yhigh = (np.max(y2) + np.max(y2)**0.5) * 1.05 / np.mean(y2)
    ax2.set_ylabel('Normalized flux', font1)
    ax2.plot([0, 2], [1.0, 1.0], '--', color='green')
    ax2.set_ylim([0, yhigh])
    ax2.tick_params(labelsize=18)
    if save:
        plt.savefig(outpath + 'pfold_lc_{0}.pdf'.format(label),
                    bbox_inches='tight',
                    pad_inches=0.1)
    if show: plt.show()
    else: plt.close()