Exemplo n.º 1
0
    def opt_depth_inverse(self, z, tau):
        """
        Return Energy in GeV for redshift z and optical depth tau from BSpline Interpolation

        Parameter
        ---------
        z: float, 
            redshift
        tau: float, 
            optical depth

        Returns
        -------
        float, energy in GeV
        """

        tau_array = self._tauSpline(self._logEGeV,z)[:,0]

        mask = np.concatenate([[True], np.diff(tau_array) > 0.])

        while not np.all(np.diff(tau_array[mask]) > 0.) and np.sum(mask):
            new_mask = np.full(mask.size, False)
            new_mask[mask] = np.concatenate([[True], np.diff(tau_array[mask]) > 0.])
            mask = new_mask

        if not np.sum(mask):
            raise ValueError("Could not interpolate tau vs E")

        Enew = USpline(tau_array[mask],self._logEGeV[mask],
                s = 0, k = 1, ext = 'extrapolate')

        return np.power(10.,Enew(tau))
Exemplo n.º 2
0
 def jet_bfield_scaled(self, rs, rvhe, r0, b0):
     """
     Function to get jet B-field strength. The function is an analytic
     approximation, defined by the constants, to the shape of the B-field
     vs. r from PC Jet model, scaled to rvhe. Strength scaled to r0 and B0.
     """
     xs = rs
     tr1 = np.log10((0.104778867386 / 0.3) * rvhe)
     tr2 = np.log10((0.763434306576 / 0.3) * rvhe)
     tt1 = np.log10((0.0656583839948 / 0.3) * rvhe)
     tt2 = np.log10((0.312675309121 / 0.3) * rvhe)
     sc1 = 1.35770127215
     sc2 = 2.9067727141
     st1 = 1.32933857554
     st2 = 9.99999939106
     f = 0.815193652746
     bs = 0.06 * (xs / rvhe)**-0.85
     bps = 1.2 * (xs / rvhe)**-0.68
     btr = .75 * (xs / rvhe)**-3. * (xs >= (0.27 / 0.3) * rvhe) + (
         (xs <= (0.27 / 0.3) * rvhe) * bps * f)
     b_erftr = 0.5 * special.erfc(
         -st1 * (np.log10(xs) - tt1)) * btr * 0.5 * special.erfc(
             st2 * (np.log10(xs) - tt2))
     b_erfcs = 0.5 * special.erfc(
         sc1 * (np.log10(xs) - tr1)) * bps + b_erftr + 0.5 * special.erfc(
             -sc2 * (np.log10(xs) - tr2)) * bs
     #find b_erfcs(r_0) to find scaling factor to make it b0
     interp = USpline(xs, b_erfcs, k=1, s=0)
     ber_r0 = interp(r0)
     b_erfcs *= (b0 / ber_r0)
     return b_erfcs
Exemplo n.º 3
0
    def _set_refflux(self, g11=1.):
        self.__g11 = g11
        self.__ref_flux = np.zeros_like(self._emax)

        self._x = np.logspace(
            1., 6., 100
        )  # np.logspace(np.log10(self._emin[0]), np.log10(self._emax[-1]), 300)
        self._dndedt = self._snalpflux.dnde_gray(self._x,
                                                 self._t_sec_ref,
                                                 g11,
                                                 self._m_neV,
                                                 bfield=self._bfield)
        self._dndedt[self._dndedt < 1e-40] = np.full(
            np.sum(self._dndedt < 1e-40), 1e-40)
        self._intp = USpline(np.log(self._x), np.log(self._dndedt), s=0, k=1)

        from scipy.integrate import simps
        for i, e in enumerate(self._emax):
            #self.__ref_flux[i] = self._snalpflux.integrateGRayFlux(self._emin[i], e,
            #self._t_sec_ref, g11,
            #self._m_neV, bfield = self._bfield,
            #esteps = 100, eflux = False)
            xi = np.logspace(np.log10(self._emin[i]), np.log10(self._emax[i]),
                             100)
            self.__ref_flux[i] = simps(
                np.exp(self._intp(np.log(xi))) * xi, np.log(xi))
Exemplo n.º 4
0
    def __set_interpolation(self):
        """Set the interpolation tables for spectrum and light curve"""
        self.__spls = []
        if self._Mprog == 10.:
            for i in range(1, ALPSNSignal.M10par.shape[1]):
                self.__spls.append(
                    USpline(ALPSNSignal.M10par[:, 0],
                            ALPSNSignal.M10par[:, i],
                            s=0,
                            k=1,
                            ext='extrapolate'))
        elif self._Mprog == 18.:
            for i in range(1, ALPSNSignal.M18par.shape[1]):
                self.__spls.append(
                    USpline(ALPSNSignal.M18par[:, 0],
                            ALPSNSignal.M18par[:, i],
                            s=0,
                            k=1,
                            ext='extrapolate'))

        return
Exemplo n.º 5
0
    def construct_spline(self):
        self.spline = USpline(self.lmbd_crop,
                              self.flux_crop,
                              w=self.ivar_crop,
                              k=self.spln_degr,
                              s=self.spln_smth)
        self.blue_most = self.lmbd_crop[0]

        # get blue extrapolation
        frst_derv = self.spline.derivative()
        drvs = frst_derv(lmbd)
        x_4k, x_5k, x_6k, x_8k, x_9k, x_Tk = np.searchsorted(
            lmbd, (4000, 5000, 6000, 8000, 9000, 10000))

        dr4k = np.mean(drvs[x_4k:x_5k])  # derivative centered on 4500
        dr5k = np.mean(drvs[x_5k:x_6k])  # derivative centered on 5500
        lb4k = np.mean(lmbd[x_4k:x_5k])  # exact lambda, roughly 4500
        lb5k = np.mean(lmbd[x_5k:x_6k])  # exact lambda, roughly 5500
        scnd_derv = (dr4k - dr5k) / (
            lb4k - lb5k)  # get second derivative between these two points

        dist = (self.blue_most -
                self.lmbd[0]) / 2  # distance to middle of extrapolated section
        b_fl, b_sl = self.spline.derivatives(
            self.blue_most)  # get flux, slope at blue-most kept point
        slop = b_sl - scnd_derv * dist
        intr = b_fl - slop * self.blue_most
        self.blue_slop = slop
        self.blue_intr = intr

        if self.spl_2:
            self.compute_cont_norm()
            self.spline = USpline(self.lmbd,
                                  self.cont,
                                  w=self.ivar,
                                  k=self.spln_degr,
                                  s=self.spln_smth)
            self.blue_most = 0
            self.cont = None
            self.norm = None
Exemplo n.º 6
0
    def __convolve_pdf(self):
        """Convolve the posterior distribution with a time delay"""
        if self._max_delay <= self._min_delay or self._max_delay == 0.:
            raise ValueError("Invalid delay choices")
        delay_unc = self._max_delay - self._min_delay

        x = np.arange(self._texp.min() - self._max_delay,
                      -1. * (self._texp.min() - self._max_delay),
                      np.min([0.01, delay_unc / 2.]))

        boxcar = ((0. < (x + self._max_delay)) & \
                    ((x + self._max_delay) < delay_unc)).astype(np.float)
        boxcar /= boxcar.sum()

        pdf = self._kde.pdf(x)
        pdf_conv = np.convolve(pdf, boxcar, mode='same')
        self._pdf_conv = USpline(x, pdf_conv, k=1, s=0, ext='zeros')

        # integrate the pdf to get the cdf
        self._cdf = np.zeros_like(x)
        for i, xi in enumerate(x[1:]):
            xx = np.linspace(x[0], xi, i + 10)
            self._cdf[i + 1] = simps(self._pdf_conv(xx), xx)

        m = (x <= 0.) & (self._cdf < 1. - 1e-3)
        # remove values to close together, numerical imprecision
        n = np.diff(self._cdf[m]) > 1e-7
        if not np.all(np.diff(self._cdf[m][:-1][n]) > 0.):
            raise ValueError("CDF values must strictly increase!")

        self._cdfinv = USpline(self._cdf[m][:-1][n],
                               x[m][:-1][n],
                               k=1,
                               s=0,
                               ext='const')

        # test it:
        if np.any(np.isnan(self._cdfinv(self._cdf[m]))):
            raise ValueError("CDF Inverse returns nan!")
Exemplo n.º 7
0
    def update_dlog_spline(self,
                           table_row,
                           idx,
                           spline=dict(k=2, s=1e-3, ext='extrapolate')):
        """Update a likelihood interpolation curve with a new table row"""

        for k in self.t.keys():
            self.t[idx][k] = table_row[k]

        for j in range(table_row['dloglike_scan'].shape[0]):
            self._dlog_spline[idx][j] = USpline(table_row['norm_scan'][j] * \
                                table_row['ref_flux'][j],
                                table_row['dloglike_scan'][j] + table_row['loglike'][j],
                                **spline)
Exemplo n.º 8
0
    def __init__(self, walkerfile, min_delay=0., max_delay=0.):
        """
        Init the class

        Parameters
        ----------
        walkerfile: str
            path to MOSFIT results file which contains the walkers

        {options}

        min_delay: float
            minimum delay between core collapse and
            onset of optical emission in days, default: 0.

        max_delay: float
            maximum delay between core collapse and
            onset of optical emission in days, default: 0.
        """
        with open(walkerfile, 'r') as f:
            data = json.loads(f.read())
        if 'name' not in data:
            data = data[list(data.keys())[0]]

        model = data['models'][0]
        corner_input, var_names = get_corner_input(model)

        self._texpref = \
             data['models'][0]['realizations'][0]['parameters']['reference_texplosion']['value']

        m = ['exp' in v for v in var_names]
        # get the data of the explosion times
        self._texp = np.array(corner_input)[:, m].flatten()
        # Compute a gaussian kernel density estimate
        self._kde = gaussian_kde(self._texp, bw_method='scott')

        self._max_delay = max_delay
        self._min_delay = min_delay
        if self._max_delay > 0.:
            self._cdfinv = None
            self.__convolve_pdf()
        else:
            self._cdf = (np.cumsum(self._texp) - np.cumsum(self._texp)[0]) / \
                        (np.cumsum(self._texp)[-1] - np.cumsum(self._texp)[0])

            self._cdfinv = USpline(self._cdf,
                                   np.sort(self._texp),
                                   k=1,
                                   s=0,
                                   ext='const')
Exemplo n.º 9
0
    def opt_depth_Inverse(self,z,tau):
	"""
	Return log10(Energy/GeV) for redshift z and optical depth tau from BSpline Interpolation

	Parameter
	---------
	z:	float, redshift
	tau:	float, optical depth

	Returns
	-------
	energy, float, log10(E/GeV) 
	"""
	Enew = USpline(self.tauSpline(self.logEGeV,z)[:,0],self.logEGeV, s = 0, k = 2, ext = 'extrapolate')
	return Enew(tau)
Exemplo n.º 10
0
    def simulate_texp(self, size=1, apply_obs_times=True, seed=None):
        """
        Draw random explosion times from smeared explosion times

        :param apply_obs_times:
        bool, if True, only return times that lie within light curve intervals

        :param size:
        int, size of simulated array

        :return:
        texp_sim array-like, array with simulated explosion times

        """
        if seed is not None:
            np.random.seed(seed)
        qs = np.random.rand(size *
                            2)  # multiply with 2 as half the bins will be lost
        if apply_obs_times:
            # make a mask for texp times which is only true if
            # texp is within time intervals of light curve
            m = np.zeros(self.tpost.texp.size, dtype=np.bool)
            for i, tmin in enumerate(self._tmin):
                m = m | ((self.tpost.texp + self.tpost.texpref >= tmin) & \
                            (self.tpost.texp + self.tpost.texpref <= self._tmax[i]))
            # recompute CDF with limited times
            cdf = (np.cumsum(self.tpost._texp[m]) - np.cumsum(self.tpost._texp[m])[0]) / \
                        (np.cumsum(self.tpost._texp[m])[-1] - np.cumsum(self.tpost._texp[m])[0])
            # interpolate inverse
            cdfinv = USpline(cdf, self.tpost._texp[m], k=1, s=0, ext='const')
            texp_sim = cdfinv(qs) + self.tpost.texpref

            bins = np.empty((self._tmin.size + self._tmax.size, ),
                            dtype=self._tmax.dtype)
            bins[0::2] = self._tmin
            bins[1::2] = self._tmax

            binnum = np.digitize(texp_sim, bins)
            # select only those bins that lay within self._tmin and self._tmax:
            m = (binnum % 2).astype(
                np.bool)  # these are the bins with exposure
            return texp_sim[m], binnum[m]

        else:
            texp_sim = self.tpost.cdfinv(qs)

            return texp_sim
Exemplo n.º 11
0
    def opt_depth_inverse(self, z, tau):
        """
	Return Energy in GeV for redshift z and optical depth tau from BSpline Interpolation

	Parameter
	---------
	z:	float, 
		redshift
	tau:	float, 
		optical depth

	Returns
	-------
	float, energy in GeV
	"""
        Enew = USpline(self.__tauSpline(self._logEGeV, z)[:, 0],
                       self._logEGeV,
                       s=0,
                       k=1,
                       ext='extrapolate')
        return np.power(10., Enew(tau))
Exemplo n.º 12
0
    def get_jet_props_gen(self, z, tdoms_done=False):
        """
        Calculate the magnetic field as function of distance in the jet frame

        Parameters
        ----------
        z: array-like
            n-dim array with distance from BH in pc

        Returns
        -------
        B, Psi: tuple with :py:class:`numpy.ndarray`
            N-dim array with field strength in G along line of sight
            N-dim array with psi angles between photon polarization states
            and jet B field
        """
        # t1 = time.time()
        # get Bs from PC shape function
        Bs = self.jet_bfield_scaled(z, self._rvhe, self._r0,
                                    self._B0)  # r0 and rvhe in pc, b0 in G
        gammas = self.jet_gammas_scaled(z, self._r0, self._g0, self._rjet)

        if not tdoms_done:
            # t2 = time.time()
            if self._ft > 0 and self._l_tcor != 'jetdom' and self._l_tcor != 'jetwidth':
                # tangled domains
                d = z[0]
                self._tdoms = []
                tdom_seeds = np.arange(6007 + self._tseed,
                                       6007 + (2 * len(z)) + self._tseed, 1)
                np.random.seed(tdom_seeds)
                while d <= z[-1]:
                    self._tdoms.append(d)
                    d += np.random.uniform(self._l_tcor / 20.,
                                           self._l_tcor * 20.)
                self._tdoms = np.array(self._tdoms)

            elif self._l_tcor == 'jetwidth':  #self._ft > 0 and

                theta_m1_interp = USpline(np.log10(z), gammas)

                p = lambda r, rsw, c, a: c * (rsw + r)**a
                con = lambda r, rvhe, the: np.tan(the) * (r - rvhe)

                rsw = 1.e-5 * self._rvhe
                C = 1.49 * rsw**0.42
                A = 0.58

                self._tdoms = [self._rvhe]

                jwf_seeds = np.arange(4000 + self._tseed,
                                      4000 + 1000 + self._tseed, 1)
                np.random.seed(jwf_seeds)

                while self._tdoms[-1] <= z[-1]:  # tdoms in pc here

                    if self._jwf_dist == 'Uniform':
                        self._jwf = np.random.uniform(0.1, 1.)
                    elif self._jwf_dist == 'Normal':
                        jwft = 0.
                        while jwft < 0.1 or jwft > 1.:
                            self._jwf = np.random.normal(0.55, 0.15)
                            jwft = self._jwf
                    elif self._jwf_dist == 'Triangular Rise':
                        self._jwf = np.random.triangular(0.1, 1., 1.)
                    elif self._jwf_dist == 'Triangular Lower':
                        self._jwf = np.random.triangular(0.1, 0.1, 1.)

                    theta = 1. / theta_m1_interp(np.log10(self._tdoms[-1]))
                    self._tdoms.append(
                        self._tdoms[-1] + self._jwf *
                        (p(self._rvhe, rsw, C, A) +
                         con(self._tdoms[-1], self._rvhe, theta))
                    )  # doms length is jetwidth

                self._tdoms = np.array(self._tdoms)

                if len(self._tdoms) - 1 > len(z) or min(np.diff(z)) > min(
                        np.diff(self._tdoms)):
                    logging.warning(
                        "Not resolving tangled field: min z step is {}"
                        "pc but min tangled length is {} pc".format(
                            min(np.diff(z)), min(np.diff(self._tdoms))))
                    logging.warning(
                        "# of z doms is {} but # tangled doms is {}".format(
                            len(z), len(self._tdoms)))
                    self._trerun = True

                    if len(self._tdoms) - 1 > len(z):
                        logging.info("rerunning with r = tdoms")
                        return self.get_jet_props_gen(np.sqrt(
                            self._tdoms[1:] * self._tdoms[:-1]),
                                                      tdoms_done=True)
                    else:
                        self._newbounds = self._tdoms
                        while len(self._newbounds) <= 400:
                            btwn = (self._newbounds[1:] +
                                    self._newbounds[:-1]) / 2.
                            self._newbounds = np.sort(
                                np.concatenate((self._newbounds, btwn)))

                        logging.info(
                            "rerunning with {} domains. new min z step is {} pc"
                            .format(len(self._newbounds),
                                    min(np.diff(self._newbounds))))

                        return self.get_jet_props_gen(np.sqrt(
                            self._newbounds[1:] * self._newbounds[:-1]),
                                                      tdoms_done=True)

            else:
                self._tdoms = z

        # set up tangled field angles
        tthe_seeds = np.arange(0 + self._tseed,
                               len(self._tdoms) + self._tseed, 1)
        tphi_seeds = np.arange(1007 + self._tseed,
                               1007 + len(self._tdoms) + self._tseed, 1)
        np.random.seed(tthe_seeds)
        self._tthes = np.random.random(size=len(self._tdoms)) * np.pi / 2.
        np.random.seed(tphi_seeds)
        self._tphis = np.random.random(size=len(self._tdoms)) * 2. * np.pi

        BTs, phis = [], []

        # t4 = time.time()

        BhelrT = Bs[np.argmin([abs(ll - self._r_T)
                               for ll in z])] * np.sqrt(1. - self._ft)

        for i, l in enumerate(z):
            # if i == int(len(z)/2):
            # t6 = time.time()
            B = Bs[i]  # Gauss
            B_tang = B * np.sqrt(self._ft)
            B_hel = B * np.sqrt(1. - self._ft)

            h_phi = np.pi / 2.  #just align helix phi with one axis, why not?
            if l <= self._r_T:  #Set section size
                # B_hel *= BhelrT*(l/self._r_T)**(self._Bt_exp + 1.) #make B_hel go like Bt_exp (make 1 '-a')

                fact = (BhelrT * (l / self._r_T)**(self._Bt_exp)) / B_hel
                B_hel *= np.where(fact > 1., 1., fact)

            # if i == int(len(z)/2):
            # t7 = time.time()

            # tangled field angles
            if self._ft > 0. and len(z) != len(self._tdoms) - 1:
                # could probably be faster
                t_phi = self._tphis[np.argmin(
                    [l - tl for tl in self._tdoms if l >= tl])]
                t_the = self._tthes[np.argmin(
                    [l - tl for tl in self._tdoms if l >= tl])]
            elif self._l_tcor == 'jetwidth' and len(z) != len(self._tdoms) - 1:
                #could probably be faster
                t_phi = self._tphis[np.argmin(
                    [l - tl for tl in self._tdoms if l >= tl])]
                t_the = self._tthes[np.argmin(
                    [l - tl for tl in self._tdoms if l >= tl])]
            else:

                t_phi = self._tphis[i]
                t_the = self._tthes[i]

            # if i == int(len(z)/2):
            #     t8 = time.time()
            #     print("getting tangled angles for 1 domain out of {} took {}s".format(len(z),t8-t7))

            h_phi = np.pi / 2.
            h_the = np.pi / 2.  # in r-05 section theta is included in B_fact term ^ otherwise = pi/2 by def
            B_tang_t = B_tang * np.sin(t_the)
            B_hel_t = B_hel * np.sin(h_the)

            Bt_x = np.sqrt((B_hel_t * np.cos(h_phi))**2 +
                           (B_tang_t * np.cos(t_phi))**2)
            Bt_y = np.sqrt((B_hel_t * np.sin(h_phi))**2 +
                           (B_tang_t * np.sin(t_phi))**2)

            phi = np.arctan(Bt_y / Bt_x)
            Bt = np.sqrt(Bt_x**2 + Bt_y**2)

            BTs.append(Bt)
            phis.append(phi)
            # if i == int(len(z)/2):
            #     t9 = time.time()
            #     print("total BT for 1 domain out of {} took {}s".format(len(z),t9-t6))
            # if abs(l - 1.) <= 1.e-2:
            #     print("BT at around 1 pc: {} pc {} G".format(l,Bt))

        # t5 = time.time()
        # print("Calculating BTs took {}s".format(t5-t4))
        # print("this run through the get_jet_props function took {}s".format(t5-t1))
        return np.array(BTs), np.array(phis)
Exemplo n.º 13
0
def calc_Eresol(edisp, etrue, ereco, axis=0, idx=-1, conf=0.68):
    """
    Calculate energy resolution from energy dispersion matrix

    Parameters
    ----------
    edisp: nxm dim np.array, energy dispersion matrix 
    etrue: n dim np.array, true energy values (bin centers)
    ereco: m dim np.array, reconstructed energy values (bin centers)

    kwargs
    ------
    axis: int, axis of true energy (default: 0)
    conf: float, interval around median for energy resolution (default: 0.68)
    idx: int, if >=0 : exit at this energy and return

    Return
    ------
    m dim np.array with energy resolution dE / E
    """
    # form the cumulative distripution along the true energy axis
    invert_axis = int(np.invert(bool(axis)))
    # new version
    idmax = np.argmax(edisp, axis=axis)
    edmax = np.max(edisp, axis=axis)

    # old version
    cdf = cumsum(edisp, axis=axis)
    dE = np.empty(edisp.shape[int(np.invert(bool(axis)))])

    cen = 0.5

    for j in range(edisp.shape[int(np.invert(bool(axis)))]):  # loop over ereco

        if not axis:
            p = interp1d(cdf[:, j], etrue)
            spline = USpline(np.log(etrue), edisp[:, j], s=5e-5, ext=0)
        else:
            p = interp1d(cdf[j, :], etrue)
            spline = USpline(np.log(etrue), edisp[j, :], s=5e-5, ext=0)


#	cen = spline.integral(np.log(etrue[0]), np.log(etrue[idmax[j]])) / spline.integral(np.log(etrue[0]), np.log(etrue[-1]))
#	if cen <= 0. or cen >= 1. : cen = 0.5

        try:
            Elo = p(cen - conf / 2.)
        except ValueError:
            Elo = etrue[0]
        try:
            Eup = p(cen + conf / 2.)
        except ValueError:
            Eup = etrue[-1]
        #dE[j] = (Eup - Elo) / ereco[j]

        if idx >= 0 and j == idx:
            return p(cen), Elo, Eup, etrue, p, ereco[j]

        dE[j] = (Eup - Elo) / p(cen)

    # compute the energies +/- 32 % around median and the width
    #ielo = argmin(abs(edisp - (np.diag(edisp[idmax,:]) - conf / 2.)), axis = axis)
    #ieup = argmin(abs(edisp - (np.diag(edisp[idmax,:]) + conf / 2.)), axis = axis)
    #ielo = argmin(abs(edisp - (np.diag(edisp[idmax,:]) - conf / 2.)), axis = axis)
    #ieup = argmin(abs(edisp - (np.diag(edisp[ielo,:]) + conf)), axis = axis)
    #print idmax
    #print ielo
    #print ieup
    #print etrue[ieup],etrue[idmax],etrue[ielo]
    #return (etrue[ieup] - etrue[ielo]) / ereco
    return dE
Exemplo n.º 14
0
def calc_conf_radius(hist_etht,
                     theta,
                     conf,
                     start=1,
                     tmax=None,
                     t_lbounds=None,
                     emin=None,
                     emax=None,
                     e_lbounds=None,
                     interp=True):
    """
    Calculate the containment radius of the halo component for all energies 
    for a given confidence level.

    Parameters
    ----------
    hist_etht:	(n,m,(k))-dim array, output of ELMAG simulation, 
		n: energies, m: angular sep., k: time delay. k dimension is optional.
    theta:	m-dim array, bin centers of angular seperation
    conf:	float, 0 < conf < 1, desired containment level

    kwargs
    ------
    emin:	float or None, minimum considered energy of halo photons in eV
    emax:	float or None, minimum considered energy of halo photons in eV
    e_lbounds:	(n+1)-dim array, left bin bounds of the energy in eV
    tmax:	float or None, maximum time delay of the cascade in years. 
    		If given, you need to provide the time delay bin bounds with 
		the t_lbounds keyword
    t_lbounds:	(j+1)-dim array, log10 of left bin bounds of the time delay
    interp:	bool, if true, use spline interpolation to find the containment 
    		radius. If false, use the nearest neighbour below the desired 
		containment level
    start:	int, 
    		first theta bin to be used (0: all photons are cascade photons)

    Returns
    -------
    float, containment radius in degrees
    """
    if conf < 0 or conf > 1:
        raise ValueError(
            "Confidence level must be >= 0 and <= 1, not {0:.3f}".format(conf))

    if not tmax == None:
        idt = np.where(
            t_lbounds <= np.log10(tmax))  # choose a maximum time delay
        if not len(idt):
            raise ValueError(
                "No cascade photons pass the chosen max. time cut of {0:.3e} years!"
                .format(tmax))
        elif idt[0][-1] == 0:
            raise ValueError(
                "No cascade photons pass the chosen max. time cut of {0:.3e} years!"
                .format(tmax))

        idt = idt[0][-1]
        hist = hist_etht[:, start:, :idt].sum(
            axis=2)  # index 1 in column 1: do not consider
        # primary photons, sum over all time delays
    elif len(hist_etht.shape) == 2:  # case of no time axis
        hist = hist_etht
    else:
        hist = hist_etht[:, start:, :].sum(axis=2)

    if not emax == None:
        ide = np.where(e_lbounds <= emax)
        if not len(ide):
            raise ValueError(
                "No cascade photons pass the chosen max. energy cut of {0:.3e} eV!"
                .format(emax))
        elif ide[0][-1] == 0:
            raise ValueError(
                "No cascade photons pass the chosen max. energy cut of {0:.3e} eV!"
                .format(emax))
        ide = ide[0][-1]
        hist = hist[:ide if ide <= hist.shape[0] else None, :]
    if not emin == None:
        ide = np.where(e_lbounds >= emin)[0][0]
        hist = hist[ide:, :]

    if np.all(hist < 1e-10):  # no cascade photons
        warnings.warn(
            "All entries smaller than 1e-10. No cascade photons? Retuning 0.",
            RuntimeWarning)
        return 0.
    hist = np.cumsum(hist.sum(axis=0))
    hist /= hist[-1]  # CDF

    if interp:
        idmax = np.where(np.round(hist, 5) == 1.)  # first entry where cdf is 1
        if not len(idmax) or not len(idmax[0]):
            sh = slice(None)
            st = slice(1, None)
        else:
            idmax = idmax[0][0]
            sh = slice(idmax + 1 if idmax <= hist.shape[0] else None)
            st = slice(start,
                       idmax + 2 if idmax + 1 <= theta.shape[0] - 1 else None)

        # interpolate inverted cdf
        # exclude first bin with primary emission
        try:
            cdf_inv = USpline(hist[sh],
                              np.log10(theta[st]),
                              s=0,
                              k=1,
                              ext='raise')
            if np.isnan(cdf_inv(conf)):
                # interpolation did not work, likely because there are not
                # many cascade photons with the applied cuts. Using nearest bin instead.
                idx = np.where(hist <= conf)
                return (theta[start:])[idx[0][-1]]
        except:
            idx = np.where(hist <= conf)
            if not len(idx) or not len(idx[0]):
                warnings.warn(
                    "No confidence level below {0:.3f}, minimum is {1:.3f}. Retruning 0"
                    .format(conf, hist[0]), RuntimeWarning)
                return 0.
            return (theta[start:])[idx[0][-1]]
        return np.power(10., cdf_inv(conf))

    else:
        idx = np.where(hist <= conf)
        if not len(idx):
            warnings.warn(
                "No confidence level below {0:.3f}, minimum is {1:.3f}. Retruning 0"
                .format(conf, hist[0]), RuntimeWarning)
            return 0.
        return (theta[start:])[idx[0][-1]]
Exemplo n.º 15
0
    def __init__(self, alp, source, **kwargs):
        """
	Initialize mixing in the magnetic field of the jet, 
	assumed here to be coherent

	Parameters
	----------
	alp: `~gammaALPs.ALP`
	    `~gammaALPs.ALP` object with ALP parameters

	source: `~gammaALPs.Source`
	    `~gammaALPs.Source` object with source parameters

	kwargs
	------
	EGeV: `~numpy.ndarray` 
	    Gamma-ray energies in GeV

	restore: str or None
	    if str, identifier for files to restore environment. 
	    If None, initialize mixing with new B field
	restore_path: str
	    full path to environment files
	rgam: float
	    distance of gamma-ray emitting region to BH in pc (default: 0.1)
	sens: float
	    sens > 0 and sens < 1., sets the number of domains, 
	    for the B field in the n-th domain, it will have changed by B_n = sens * B_{n-1}
	rbounds: list or `~numpy.ndarray`
	    bin bounds for steps along line of sight in pc, 
	    default: log range between rgam and Rjet
	    with step size chosen such that B field changes 
	    by sens parameter in each step


	B field kwargs:

	B0: float
	    Jet field at r = R0 in G (default: 0.1)
	r0: float
	    distance from BH where B = B0 in pc (default: 0.1)
	alpha: float
	    exponent of toroidal mangetic field (default: -1.)
	psi: float
	    Angle between one photon polarization state and B field. 
	    Assumed constant over entire jet. (default: pi / 4)
	helical: bool
	    if True, use helical magnetic-field model from Clausen-Brown et al. (2011). 
	    In this case, the psi kwarg is treated is as the phi angle 
	    of the photon trajectory in the cylindrical jet coordinate system
	    (default: True)

	Electron density kwargs:

	n0: float
	    electron density at R0 in cm**-3 (default 1e3)
	beta: float
	    exponent of electron density (default = -2.)
	equipartition: bool
	    if true, assume equipartition between electrons and the B field. 
	    This will overwrite beta = 2 * alpha and set n0 given the minimum 
	    electron lorentz factor set with gamma_min
	gamma_min: float
	    minimum lorentz factor of emitting electrons, only used if equipartition = True
	gamma_max: float
	    maximum lorentz factor of emitting electrons, only used if equipartition = True
	    by default assumed to be gamma_min * 1e4

	Jet kwargs:

	Rjet: float
	    maximum jet length in pc (default: 1000.)
	theta_obs: float
	    Angle between l.o.s. and jet axis in degrees (default: 3.) 
	bulk_lorentz: float
	    bulk lorentz factor of gamma-ray emitting plasma (default: 10.)
	theta_jet: float
	    Jet opening angle in degrees. If not given, assumed to be 1./bulk_lorentz
	"""
        kwargs.setdefault('EGeV', np.logspace(0., 4., 100))
        kwargs.setdefault('restore', None)
        kwargs.setdefault('restore_path', './')
        kwargs.setdefault('sens', 0.99)
        kwargs.setdefault('rgam', 0.1)
        # Bfield kwargs
        kwargs.setdefault('helical', True)
        kwargs.setdefault('B0', 0.1)
        kwargs.setdefault('r0', 0.1)
        kwargs.setdefault('alpha', -1.)
        kwargs.setdefault('psi', np.pi / 4.)
        # electron density kwargs
        kwargs.setdefault('n0', 1e3)
        kwargs.setdefault('beta', -2.)
        kwargs.setdefault('equipartition', True)
        kwargs.setdefault('gamma_min', 1.)
        kwargs.setdefault('gamma_max', 1e4 * kwargs['gamma_min'])

        # calculate doppler factor
        self._Rjet = kwargs['Rjet']
        self._psi = kwargs['psi']
        self._source = source

        nsteps = int(np.ceil( kwargs['alpha'] * \
          np.log(self._Rjet /kwargs['rgam'] ) / np.log(kwargs['sens']) ))
        kwargs.setdefault(
            'rbounds',
            np.logspace(np.log10(kwargs['rgam']), np.log10(self._Rjet),
                        nsteps))
        self._rbounds = kwargs['rbounds']

        self._r = np.sqrt(self._rbounds[1:] * self._rbounds[:-1])
        dL = self._rbounds[1:] - self._rbounds[:-1]

        if kwargs['restore'] == None:
            self._b = jet.Bjet(kwargs['B0'], kwargs['r0'], kwargs['alpha'])
            B, psi = self._b.new_Bn(self._r, psi=kwargs['psi'])
            if kwargs['helical']:
                B, psi = self.Bjet_calc(B, psi)

            if kwargs['equipartition']:
                kwargs['beta'] = kwargs['alpha'] * 2.

                intp = USpline(np.log10(self._r), np.log10(B), k=1, s=0)
                B0 = 10.**intp(np.log10(kwargs['r0']))
                # see e.g.https://arxiv.org/pdf/1307.4100.pdf Eq. 2
                kwargs['n0'] = B0** 2. / 8. / np.pi \
                     / kwargs['gamma_min'] / (c.m_e * c.c ** 2.).to('erg').value / \
                     np.log(kwargs['gamma_max'] / kwargs['gamma_min'])
                logging.info(
                    "Assuming equipartion at r0: n0(r0) = {0[n0]:.3e} cm^-3".
                    format(kwargs))

            self._neljet = njet.NelJet(kwargs['n0'], kwargs['r0'],
                                       kwargs['beta'])

            # init the transfer function with absorption
            super(MixJet, self).__init__(kwargs['EGeV'],
                                         B * 1e6,
                                         psi,
                                         self._neljet(self._r),
                                         dL * 1e-3,
                                         alp,
                                         Gamma=None,
                                         chi=None,
                                         Delta=None)

            # transform energies to stationary frame
            self._ee /= self._source._doppler
        else:
            tra = super(MixJet,
                        self).readEnviron(kwargs['restore'],
                                          alp,
                                          filepath=kwargs['restore_path'])
            super(MixJet, self).__init__(tra.EGeV,
                                         tra.B,
                                         tra.psi,
                                         tra.nel,
                                         tra.dL,
                                         tra.alp,
                                         Gamma=tra.Gamma,
                                         chi=tra.chi,
                                         Delta=tra.Delta)
        return
Exemplo n.º 16
0
    def fit(
        self,
        smoother: SmoothMethod = "poly",
        degree: int = DEFAULT_POLY_DEGREE,
        spline_smooth: float = DEFAULT_SPLINE_SMOOTH,
        detrend: bool = False,
        return_callable: bool = False,
    ) -> Tuple[ndarray, ndarray, Optional[Callable[[ndarray], ndarray]]]:
        """Computer the specified smoothing function values for a set of eigenvalues.

        Parameters
        ----------
        eigs: ndarray
            The sorted eigenvalues

        smoother: "poly" | "spline" | "gompertz" | lambda
            The type of smoothing function used to fit the step function

        degree: int
            The degree of the polynomial or spline

        spline_smooth: float
            The smoothing factors passed into scipy.interpolate.UnivariateSpline

        detrend: bool
            Whether or not to perform EMD detrending before returning the
            unfolded eigenvalues.

        return_callable: bool
            If true, return a function that closes over the fit parameters so
            that, e.g., additional values can be fit later.


        Returns
        -------
        unfolded: ndarray
            the unfolded eigenvalues

        steps: ndarray
            the step-function values
        """
        eigs = self._eigs
        # steps = _step_function_fast(eigs, eigs)
        steps = np.arange(0, len(eigs)) + 1
        self.__validate_args(smoother=smoother,
                             degree=degree,
                             spline_smooth=spline_smooth)

        if smoother == "poly":
            poly_coef = polyfit(eigs, steps, degree)
            unfolded = polyval(eigs, poly_coef)
            func = lambda x: polyval(x, poly_coef) if return_callable else None
            if detrend:
                unfolded = emd_detrend(unfolded)
            return unfolded, steps, func

        if smoother == "spline":
            k = DEFAULT_SPLINE_DEGREE
            try:
                k = int(degree)
            except BaseException as e:
                print(ValueError("Cannot convert spline degree to int."))
                raise e
            if spline_smooth == "heuristic":
                s = len(eigs) * np.var(eigs, ddof=1)
                spline = USpline(eigs, steps, k=k, s=s)
            elif spline_smooth is not None:
                if not isinstance(spline_smooth, float):
                    raise ValueError("Spline smoothing factor must be a float")
                spline = USpline(eigs, steps, k=k, s=spline_smooth)
            else:
                raise ValueError(
                    "Unreachable: All possible spline_smooth arguments should have been handled."
                )
                spline = USpline(eigs, steps, k=k, s=spline_smooth)
            func = lambda x: spline(x) if return_callable else None
            unfolded = spline(eigs)
            if detrend:
                unfolded = emd_detrend(unfolded)
            return unfolded, steps, func

        if smoother == "gompertz":
            # use steps[end] as guess for the asymptote, a, of gompertz curve
            [a, b, c], cov = curve_fit(gompertz,
                                       eigs,
                                       steps,
                                       p0=(steps[-1], 1, 1))
            func = lambda x: gompertz(x, a, b, c) if return_callable else None
            unfolded = gompertz(eigs, a, b, c)
            if detrend:
                unfolded = emd_detrend(unfolded)
            return unfolded, steps, func
        raise RuntimeError("Unreachable!")
Exemplo n.º 17
0
for i in range(z_removed.size):
    depths = np.linspace(0, z0[-1] + idepth[-i], npts)

    # begin interglacial period
    N *= np.exp(-L * ti)
    Nexp = sim.simple_expose_slow(depths, ti, be10, alt, lat)
    N += Nexp

    depthsm = depths / rho / 100.0
    Ns[i] = N

    # begin glaciation
    N *= np.exp(-L * tg)  # isotopic decay
    # erode the top of the profile away
    depths -= z_removed[-i]
    nofz = USpline(depths, N, k=3, s=0)
    depths = np.linspace(0, depths[-1], npts)
    N = nofz(depths)

    #depthsm = depths / rho / 100.0

# account for recent cosmic ray exposure
N *= np.exp(-L * tH)
Nhol = sim.simple_expose_slow(depths, tH, be10, alt, lat)
N += Nhol
Ns[-1] = N

fig_height = 5  # in.
fig_width = 3.5  # in.

fig = plt.figure(figsize=(fig_width, fig_height))
Exemplo n.º 18
0
    def interp_llh(self, B, l_coh, th_jet, idx_halo=-1, **interp_kwargs):
        """
        Interpolate log likelihood cube for a particular set of
        simulation parameters

        Parameters
        ----------
        B: float
            IGMF strength for which the logl cube is interpolated

        l_coh:
            coherence length for which the logl cube is interpolated

        th_jet:
            theta jet value for which the logl cube is interpolated

        idx_halo: int or "profile"
            halo normalization to be used.
        """
        interp_kwargs.setdefault('method', 'nearest')
        interp_kwargs.setdefault('fill_value', None)
        interp_kwargs.setdefault('bounds_error', False)

        values = {"B": B, "th_jet": th_jet, "maxTurbScale": l_coh}

        if self._llh is None:
            raise ValueError(
                "No logl cube initialized, run get_llh_one_source function first"
            )

        # select a sub cube with the right parameters
        idx = {}
        for k in ["B", "maxTurbScale", "th_jet"]:
            idx[k] = np.where(self._params[k] == values[k])[0]
            if not len(idx):
                raise ValueError("{0:s} not in list: {1}".format(
                    values[k], self._params[k]))

        llh = self._llh[idx["B"][0], idx["maxTurbScale"][0], idx["th_jet"][0]]
        norms = self._norms[idx["B"][0], idx["maxTurbScale"][0],
                            idx["th_jet"][0]]

        # select how you treat the halo normalization
        if isinstance(idx_halo, str) and idx_halo == "profile":
            # profile over halo normalization
            llh = llh.max(axis=-1)
        elif isinstance(idx_halo, int):
            llh = llh[..., idx_halo]
        else:
            raise ValueError("idx_halo not understood")

        # now llh has shape Cutoff x Index x source normalization
        # but for each Cutoff and Index, normalization array is different
        # therefore, perform first a piece-wise interpolation for each
        # combination of cutoff and index over the same norm array
        self._log_norm_array = np.linspace(np.log10(norms.min()),
                                           np.log10(norms.max()),
                                           int(norms.shape[-1] * 4. / 3.))

        self._index_array = np.linspace(-0.5, 5., 50)

        self._llh_grid = np.zeros(
            (self._params["Cutoff"].size, self._params["Index"].size,
             self._log_norm_array.size))

        # first pass: bring likelihood cube to regular grid over norms
        for i, cut in enumerate(self._params['Cutoff']):
            for j, ind in enumerate(self._params['Index']):

                # for some reason, sometimes same normalization values are stored,
                # this let's the interpolation crash
                if not np.all(np.diff(np.log10(norms[i, j])) > 0.):
                    mcut = np.diff(np.log10(norms[i, j])) > 0.
                    norms_increasing = np.insert(norms[i, j][1:][mcut], 0,
                                                 norms[i, j][0])
                    llh_increasing = np.insert(llh[i, j][1:][mcut], 0,
                                               llh[i, j][0])
                    idxs = np.argsort(norms_increasing)
                    spline = USpline(np.log10(norms_increasing[idxs]),
                                     llh_increasing[idxs],
                                     k=2,
                                     s=0,
                                     ext='extrapolate')

                else:
                    spline = USpline(np.log10(norms[i, j]),
                                     llh[i, j],
                                     k=2,
                                     s=0,
                                     ext='extrapolate')
                self._llh_grid[i, j] = spline(self._log_norm_array)

        self._llh_grid_extend = np.zeros(
            (self._params["Cutoff"].size, self._index_array.size,
             self._log_norm_array.size))

        # second pass: extend likelihood over index with 2D spline interpolation
        for i, cut in enumerate(self._params['Cutoff']):
            for j, norm in enumerate(self._log_norm_array):
                spline = USpline(self._params["Index"],
                                 self._llh_grid[i, :, j],
                                 k=2,
                                 s=0,
                                 ext='extrapolate')
                self._llh_grid_extend[i, :, j] = spline(self._index_array)

        # now perform the grid interpolation
        #self._interp = RegularGridInterpolator(points=(self._params["Cutoff"],
        #                                               self._params["Index"],
        #                                               self._log_norm_array),
        #                                       values=self._llh_grid,
        #                                       **interp_kwargs
        #                                       )
        self._interp = RegularGridInterpolator(points=(self._params["Cutoff"],
                                                       self._index_array,
                                                       self._log_norm_array),
                                               values=self._llh_grid_extend,
                                               **interp_kwargs)
Exemplo n.º 19
0
    def __init__(self,
                 lcfile,
                 walkerfile,
                 m_neV,
                 Mprog,
                 bfield='jansson12',
                 cosmo=FlatLambdaCDM(H0=0.7 * 100. * u.km / u.s / u.Mpc,
                                     Tcmb0=2.725 * u.K,
                                     Om0=0.3),
                 min_delay=0.,
                 max_delay=0.,
                 t_sec_ref=20.,
                 spline=dict(k=2, s=1e-3, ext='extrapolate')):
        """
        Initialize the class

        Parameters
        ----------
        lcfile: str
            path for combined lc file in fits or npy format

        walkerfile: str
            path to MOSFIT results file which contains the walkers

        m_neV: float
            ALP mass in neV

        Mprog: float
            progenitor mass in solar masses
            (currently only 10. and 18. implemented)

        {options}

        bfield: str
            Milky Way Bfield identifier, default: jansson12

        cosmo: `~astropy.cosmology.FlatLambdaCDM`
            used cosmology

        spline: dict
            dictionary with keywords for spline interpolation 
            of likelihood functions

        min_delay: float
            minimum delay between core collapse and
            onset of optical emission in days, default: 0.

        max_delay: float
            maximum delay between core collapse and
            onset of optical emission in days, default: 0.

        """
        if 'fits' in lcfile:
            self._t = Table.read(lcfile)
        elif 'npy' in lcfile:
            self._t = np.load(fname).flat[0]

        self._emin = np.unique(self._t['emin_sed'].data)
        self._emax = np.unique(self._t['emax_sed'].data)
        self._tmin = self._t['tmin'].data
        self._tmax = self._t['tmax'].data
        self._tcen = 0.5 * (self._tmin + self._tmax)
        self._dt = self._tmax - self._tmin

        self._walkerfile = walkerfile
        self._Mprog = Mprog
        self._m_neV = m_neV
        self._bfield = bfield
        self._cosmo = cosmo
        self._snalpflux = SNALPflux(walkerfile, Mprog=Mprog, cosmo=cosmo)
        self._t_sec_ref = t_sec_ref
        self._set_refflux()

        # get the posterior for the explosion time
        self._tpost = TexpPost(walkerfile,
                               min_delay=min_delay,
                               max_delay=max_delay)

        # arrays to store cached g11 and flux for likelihood calculation
        self.__g11cache = None
        self.__fluxcache = None

        # spline interpolations
        self._dlog_spline = []

        # loop over time bins
        for i, dlog in enumerate(self._t['dloglike_scan']):
            # loop energy bins
            self._dlog_spline.append([])
            for j in range(self._emax.size):
                self._dlog_spline[i].append(USpline(self._t['norm_scan'][i,j] * \
                                self._t['ref_flux'][i,j],
                                dlog[j] + self._t['loglike'][i,j], **spline))
Exemplo n.º 20
0
OmegaK = cosmo.Ok0
H0 = (cosmo.H0).value

pi = np.pi
#c = 299792.458  #km/s
Delta_c = 200
deltac = 1.675
h = cosmo.h
G = 4.302e-9 # kms^2 Mpc /

teste = ascii.read("matterpower.dat")
k, Pk = teste['col1'], teste['col2']
mink = min(k)
maxk = max(k)

P = USpline(k,Pk,s=0,k=5)

matt_corr_table = Table.read("matt_corr.fits")
raios_matt, matt_cor = matt_corr_table['col0'], matt_corr_table['col1']
matter_correlation_norm = USpline(raios_matt, np.array(matt_cor)/0.644809, s=0, k=5)

Window = lambda x: (3/(x*x*x))*(np.sin(x)-x*np.cos(x))
rhoM = OmegaM*(rho_c_cl_mpc*1.9891e30)

H = lambda a: H0*np.sqrt(OmegaM*(a)**(-3) + OmegaK*(a)**(-2) + OmegaL)

def a(z):
    return 1/(1+z)

def D1(a):
    return (H(a))*integrate.quad(lambda x: 1/(x*H(x)/H0)**3 , 0,a)[0]
Exemplo n.º 21
0
    def __init__(self,
                 llh,
                 taublr,
                 EGeVllh,
                 EGeVtau,
                 fluxllh,
                 rtau,
                 kx=2,
                 ky=2,
                 fit_mode='mle',
                 covar=None):
        """
        Initialize the class

        Parameters
        ----------
        llh: `~numpy.ndarray`
            if fit_mode = 'mle':
                n x m dimensional cube, dimension n: energy bins, dimension m: flux bins,
                each entry gives the log likelihood value for that energy bin and flux
            else if fit_mode = 'chi2':
                flux measurements

        taublr: `~numpy.ndarray`
            i x k dimensional cube with optical depths for i energies and k distances

        EGeVllh:`~numpy.ndarray`
            if fit_mode = 'mle':
                n dimensional array with central bin energies for llh cube
            else if fit_mode = 'chi2':
                energies corresponding to the flux measurements

        EGeVtau:`~numpy.ndarray`
            i dimensional array with central bin energies for tau cube

        flux_llh:`~numpy.ndarray`
            if fit_mode = 'mle':
                m dimensional array with central flux bin values for llh cube
                or n x m dimensional array with central flux bin values for llh cube
                for each energy bin
            else if fit_mode = 'chi2':
                the flux measurement errors

        rtau :`~numpy.ndarray`
            k dimensional array with central distance bin values for tau cube

        covar: array-like
            covariance matrix of flux measurements, only used if fit_mode = 'chi2'

        fit_mode: str
            either 'mle' or 'chi2'. If 'mle' a maximum likelihood estimate is performed and
            a least square fit otherwise.
        """
        self._taublr = taublr
        self._EGeVtau = EGeVtau
        self._rtau = rtau
        self._profile_llh = None
        self._profile_par_names = None
        self._profile_scale = None

        self._scale_name = None
        self._index_name = None
        self._norm_name = None
        self._fit_mode = fit_mode

        self._EGeVllh = EGeVllh
        self._fit_mode = fit_mode
        if fit_mode == 'mle':
            self._fluxllh = fluxllh
            self._llh = llh
            self._llh_intp = []

            # piece-wise interpolation of llh
            if len(self._fluxllh.shape) == 1:
                self._fluxllh[self._fluxllh == 0.] = 1e-40 * np.ones(
                    np.sum(self._fluxllh == 0.))
                for i, l in enumerate(self._llh):
                    self._llh_intp.append(
                        USpline(np.log(self._fluxllh),
                                l,
                                k=2,
                                s=0,
                                ext='extrapolate'))
            elif len(self._fluxllh.shape) == 2:
                for i, l in enumerate(self._llh):
                    self._fluxllh[i][self._fluxllh[i] == 0.] = \
                        1e-40 * np.ones(np.sum(self._fluxllh[i] == 0.))
                    self._llh_intp.append(
                        USpline(np.log(self._fluxllh[i]),
                                l,
                                k=2,
                                s=0,
                                ext='extrapolate'))
        elif fit_mode == 'chi2':
            self._y = llh
            self._dy = fluxllh
            if covar is None:
                self._cov_inv = None
            else:
                self._cov_inv = np.linalg.inv(covar)
        else:
            raise ValueError(
                "Unknown fit mode chosen, must be either 'mle' or 'chi2'")

# interpolate taublr
        self.__tauSpline = RBSpline(np.log(EGeVtau),
                                    np.log(rtau),
                                    self._taublr,
                                    kx=kx,
                                    ky=ky)

        return