Beispiel #1
0
def microfacetFourierSeries(mu_o, mu_i, etaC, alpha, n, relerr=10e-4):
    A, B = getAB(mu_i, mu_o, etaC, alpha)

    if sp.i0e(B) * np.exp(A+B) < 1e-10:
        return [0.0]

    B_max = Bmax(n, relerr)
    if B > B_max:
        A = A + B - B_max + math.log(sp.i0e(B) / sp.i0e(B_max))
        B = B_max

    expcos_coeffs = expcos_fseries(A, B, relerr)
    if B == 0.0:
        phiMax = f.safe_acos(-1.0)
    else:
        phiMax = f.safe_acos(1.0 + np.log(relerr) / B)

    lowfreq_coeffs = microfacetNoExpFourierSeries(mu_o, mu_i, etaC, alpha, 12, phiMax)

    result = fseries_convolve(lowfreq_coeffs, len(lowfreq_coeffs), expcos_coeffs, len(expcos_coeffs))

    for i in range(len(result)):
        if result[i] == 0 or np.abs(result[i]) < result[0] * relerr:
            result = result[:i]
            break

    return result
Beispiel #2
0
def det_deriv(ti_te, mi_me, bi, kperp_rhoi, w_bar):
    """
    evaluate derivative of Det M w.r.t. omega_bar
    where omega_bar = w/k_par/v_A
    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    xi_i = w_bar / np.sqrt(bi)
    xi_e = w_bar * np.sqrt(ti_te / mi_me / bi)
    Z_i = zp(xi_i)
    Z_e = zp(xi_e)
    Gamma_1i = i0e(alpha_i) - i1e(alpha_i)
    Gamma_1e = i0e(alpha_e) - i1e(alpha_e)
    Gamma_0i = i0e(alpha_i)
    Gamma_0e = i0e(alpha_e)

    G_i = 1 / np.sqrt(bi) * ((1 - 2 * xi_i**2) * Z_i - 2 * xi_i)
    G_e = 1/np.sqrt(bi) * ((1-2*xi_e**2)*Z_e - 2 * xi_e) \
        * np.sqrt(ti_te / mi_me)

    # Aprime, dA / d omega_bar
    Ap = Gamma_0i * G_i + ti_te * Gamma_0e * G_e
    Cp = Gamma_1i * G_i - Gamma_1e * G_e
    Dp = 2 * (Gamma_1i * G_i + 1 / ti_te * Gamma_1e * G_e)

    a = A(ti_te, mi_me, bi, kperp_rhoi, w_bar)
    c = C(ti_te, mi_me, bi, kperp_rhoi, w_bar)
    d = D(ti_te, mi_me, bi, kperp_rhoi, w_bar)

    res = Ap * (d - 2 / bi) + a * Dp - 2 * c * Cp
    return res
Beispiel #3
0
def B(ti_te, mi_me, kperp_rhoi):
    """
    Calculate
    B = sum_s [ (Ti/Ts) ( 1 - Gamma_0(alpha_s)) ]
    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    return 1 - i0e(alpha_i) - ti_te * (1 - i0e(alpha_e))
Beispiel #4
0
def F(ti_te, mi_me, kperp_rhoi):
    """
    F = sum_s (2Ts/Ti) * Gamma_1s
    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    Gamma_1i = i0e(alpha_i) - i1e(alpha_i)
    Gamma_1e = i0e(alpha_e) - i1e(alpha_e)
    return 2 * Gamma_1i - 2 * Gamma_1e / ti_te
Beispiel #5
0
def E(ti_te, mi_me, kperp_rhoi):
    """
    E = sum_s (qs/qi) * Gamma_1s
    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    Gamma_1i = i0e(alpha_i) - i1e(alpha_i)
    Gamma_1e = i0e(alpha_e) - i1e(alpha_e)
    return Gamma_1i - Gamma_1e
Beispiel #6
0
def A(ti_te, mi_me, bi, kperp_rhoi, w_bar):
    """
    Calculate
    A  = sum_s [ (Ti/Ts) (1 + Gamma_0s xi_s Z_s) ]
    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    xi_i = w_bar / np.sqrt(bi)
    xi_e = w_bar * np.sqrt(ti_te / mi_me / bi)
    Z_i = zp(xi_i)
    Z_e = zp(xi_e)
    Gamma_0i = i0e(alpha_i)
    Gamma_0e = i0e(alpha_e)
    return 1 + Gamma_0i * xi_i * Z_i + ti_te * (1 + Gamma_0e * xi_e * Z_e)
Beispiel #7
0
def gen_data(dtypes, shapes):
    dtype = dtypes[0]
    shape = shapes[0]
    input = random_gaussian(shape, miu=3.75).astype(dtype)
    expect = sp.i0e(input)
    output = np.full(expect.shape, np.nan, dtype)
    return expect, [input], output
Beispiel #8
0
    def log_like(self, params):
        """
            log-likelihood function,
            params : current sample as dictionary (filled from Prior)
        """

        #generate waveform
        hphc = np.array(self.wave.compute_hphc(params))

        # dh , hh
        inner_prods = np.transpose([
            list(self.inner_products_singleifo(i, ifo, params, hphc))
            for i, ifo in enumerate(self.ifos)
        ])
        dh = np.sum(inner_prods[0])
        hh = np.real(np.sum(inner_prods[1]))

        if self.marg_phi_ref:
            dh = np.abs(dh)
            R = dh + np.log(i0e(dh))
        else:
            dh = np.real(dh)
            R = dh

        lnl = R - 0.5 * hh

        return np.real(lnl)
Beispiel #9
0
def C(ti_te, mi_me, bi, kperp_rhoi, w_bar):
    """
    C = sum_s (qi/qs) Gamma_1s xi_s Z_s 

    """
    alpha_i = kperp_rhoi**2 / 2
    alpha_e = alpha_i / ti_te / mi_me
    xi_i = w_bar / np.sqrt(bi)
    xi_e = w_bar * np.sqrt(ti_te / mi_me / bi)
    Z_i = zp(xi_i)
    Z_e = zp(xi_e)
    Gamma_1i = i0e(alpha_i) - i1e(alpha_i)
    Gamma_1e = i0e(alpha_e) - i1e(alpha_e)

    res = Gamma_1i * xi_i * Z_i - Gamma_1e * xi_e * Z_e
    return res
Beispiel #10
0
 def _setup_phase_marginalization(self):
     self._bessel_function_interped = interp1d(
         np.logspace(-5, 10, int(1e6)),
         np.logspace(-5, 10, int(1e6)) +
         np.log([i0e(snr) for snr in np.logspace(-5, 10, int(1e6))]),
         bounds_error=False,
         fill_value=(0, np.nan))
Beispiel #11
0
def kappa_to_stddev(kappa):
    '''
        Convert kappa to wrapped gaussian std dev

        std = 1 - I_1(kappa)/I_0(kappa)
    '''
    # return 1.0 - spsp.i1(kappa)/spsp.i0(kappa)
    return np.sqrt(-2.*np.log(spsp.i1e(kappa)/spsp.i0e(kappa)))
Beispiel #12
0
def kappa_to_stddev(kappa):
    '''
        Convert kappa to wrapped gaussian std dev

        std = 1 - I_1(kappa)/I_0(kappa)
    '''
    # return 1.0 - spsp.i1(kappa)/spsp.i0(kappa)
    return np.sqrt(-2. * np.log(spsp.i1e(kappa) / spsp.i0e(kappa)))
Beispiel #13
0
    def _loglr(self):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) = \sum_i
                \left<h_i(\Theta)|d_i\right> -
                \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,

        at the current parameter values :math:`\Theta`.

        Returns
        -------
        float
            The value of the log likelihood ratio.
        """
        # get model params
        p = self.current_params.copy()
        p.update(self.static_params)

        hh = 0.0
        hd = 0j
        for ifo in self.data:
            # get detector antenna pattern
            fp, fc = self.det[ifo].antenna_pattern(
                p["ra"], p["dec"], p["polarization"], self.antenna_time[ifo]
            )
            # get timeshift relative to end of data
            dt = self.det[ifo].time_delay_from_earth_center(
                p["ra"], p["dec"], p["tc"]
            )
            dtc = p["tc"] + dt - self.end_time[ifo]
            tshift = numpy.exp(-2.0j * numpy.pi * self.fedges[ifo] * dtc)
            # generate template and calculate waveform ratio
            hp, hc = get_fd_waveform_sequence(
                sample_points=Array(self.fedges[ifo]), **p
            )
            htilde = numpy.array(fp * hp + fc * hc) * tshift
            r = (htilde / self.h00_sparse[ifo]).astype(numpy.complex128)
            r0 = r[:-1]
            r1 = (r[1:] - r[:-1]) / (
                self.fedges[ifo][1:] - self.fedges[ifo][:-1]
            )

            # <h, d> is sum over bins of A0r0 + A1r1
            hd += numpy.sum(
                self.sdat[ifo]["a0"] * r0 + self.sdat[ifo]["a1"] * r1
            )
            # <h, h> is sum over bins of B0|r0|^2 + 2B1Re(r1r0*)
            hh += numpy.sum(
                self.sdat[ifo]["b0"] * numpy.absolute(r0) ** 2.0
                + 2.0 * self.sdat[ifo]["b1"] * (r1 * numpy.conjugate(r0)).real
            )
        hd = abs(hd)
        llr = numpy.log(special.i0e(hd)) + hd - 0.5 * hh
        return float(llr)
 def test_besseli_larger(self, dtype):
   x = np.random.uniform(1., 20., size=int(1e4)).astype(dtype)
   try:
     from scipy import special  # pylint: disable=g-import-not-at-top
     self.assertAllClose(
         special.i0e(x), self.evaluate(special_math_ops.bessel_i0e(x)))
     self.assertAllClose(
         special.i1e(x), self.evaluate(special_math_ops.bessel_i1e(x)))
   except ImportError as e:
     tf_logging.warn('Cannot test special functions: %s' % str(e))
Beispiel #15
0
def like_equal_d_cosi(a_hat, f, d, cosi):
    """
    For a network equally sensitive to plus and cross, calculate the
    likelihood marginalized over the 2 phases.  This a function of d and cosi.
    Note: we use uniform prior on phi, psi (=1/2pi)
    :param a_hat: the f-stat parameters of the signal
    :param f: the detector response (f = F+ = Fx)
    :param d: distance of template
    :param cosi: cos(inclination) of template
    """
    # calculate the two dimensional likelihood in circular polarization
    # marginalized over the two phases
    ar_hat, al_hat = fstat.a_to_circ_amp(a_hat)
    d0 = a_hat[0]
    al = (d0 / d) * (1 - cosi)**2 / 4
    ar = (d0 / d) * (1 + cosi)**2 / 4
    like = exp(- f ** 2 * (al - al_hat) ** 2) * \
           exp(- f ** 2 * (ar - ar_hat) ** 2) * \
           special.i0e(2 * f ** 2 * al * al_hat) * special.i0e(2 * f ** 2 * ar * ar_hat)
    return like
Beispiel #16
0
def stddev_to_kappa_single(stddev):
    '''
        Converts stddev to kappa

        No closed-form, does a line optimisation
    '''

    errfunc = lambda kappa, stddev: (np.exp(-0.5*stddev**2.) - spsp.i1e(kappa)/spsp.i0e(kappa))**2.
    kappa_init = 1.0
    kappa_opt = spopt.fmin(errfunc, kappa_init, args=(stddev, ), disp=False)

    return np.abs(kappa_opt[0])
Beispiel #17
0
def stddev_to_kappa_single(stddev):
    '''
        Converts stddev to kappa

        No closed-form, does a line optimisation
    '''

    errfunc = lambda kappa, stddev: (np.exp(-0.5 * stddev**2.) - spsp.i1e(
        kappa) / spsp.i0e(kappa))**2.
    kappa_init = 1.0
    kappa_opt = spopt.fmin(errfunc, kappa_init, args=(stddev, ), disp=False)

    return np.abs(kappa_opt[0])
Beispiel #18
0
    def _loglr(self):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) = \sum_i
                \left<h_i(\Theta)|d_i\right> -
                \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,

        at the current parameter values :math:`\Theta`.

        Returns
        -------
        float
            The value of the log likelihood ratio.
        """
        # get model params
        p = self.current_params.copy()
        p.update(self.static_params)
        wfs = self.get_waveforms(p)

        hh = 0.0
        hd = 0j
        for ifo in self.data:

            det = self.det[ifo]
            freqs = self.fedges[ifo]
            sdat = self.sdat[ifo]
            hp, hc = wfs[ifo]
            h00 = self.h00_sparse[ifo]
            end_time = self.end_time[ifo]
            times = self.antenna_time[ifo]

            # project waveform to detector frame
            fp, fc = det.antenna_pattern(p["ra"], p["dec"],
                                         p["polarization"], times)
            dt = det.time_delay_from_earth_center(p["ra"], p["dec"], times)
            dtc = p["tc"] + dt - end_time

            hdp, hhp = self.lik(freqs, fp, fc, dtc,
                                hp, hc, h00,
                                sdat['a0'], sdat['a1'],
                                sdat['b0'], sdat['b1'])

            hd += hdp
            hh += hhp

        hd = abs(hd)
        llr = numpy.log(special.i0e(hd)) + hd - 0.5 * hh
        return float(llr)
 def _loglr(self):
     r"""Computes the log likelihood ratio,
     .. math::
         \log \mathcal{L}(\Theta) =
             I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
             \frac{1}{2}\left<h^0_i, h^0_i\right>,
     at the current point in parameter space :math:`\Theta`.
     Returns
     -------
     float
         The value of the log likelihood ratio evaluated at the given point.
     """
     params = self.current_params
     try:
         wfs = self.waveform_generator.generate(**params)
     except NoWaveformError:
         return self._nowaveform_loglr()
     except FailedWaveformError as e:
         if self.ignore_failed_waveforms:
             return self._nowaveform_loglr()
         else:
             raise e
     hh = 0.
     hd = 0j
     for det, h in wfs.items():
         # the kmax of the waveforms may be different than internal kmax
         kmax = min(len(h), self._kmax[det])
         if self._kmin[det] >= kmax:
             # if the waveform terminates before the filtering low frequency
             # cutoff, then the loglr is just 0 for this detector
             hh_i = 0.
             hd_i = 0j
         else:
             # whiten the waveform
             h[self._kmin[det]:kmax] *= \
                 self._weight[det][self._kmin[det]:kmax]
             # calculate inner products
             hh_i = h[self._kmin[det]:kmax].inner(
                 h[self._kmin[det]:kmax]).real
             hd_i = self._whitened_data[det][self._kmin[det]:kmax].inner(
                 h[self._kmin[det]:kmax])
         # store
         setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
         hh += hh_i
         hd += hd_i
     hd = abs(hd)
     self._current_stats.maxl_phase = numpy.angle(hd)
     return numpy.log(special.i0e(hd)) + hd - 0.5 * hh
Beispiel #20
0
def ln_i0(value):
    """
    A numerically stable method to evaluate ln(I_0) a modified Bessel function
    of order 0 used in the phase-marginalized likelihood.

    Parameters
    ==========
    value: array-like
        Value(s) at which to evaluate the function

    Returns
    =======
    array-like:
        The natural logarithm of the bessel function
    """
    return np.log(i0e(value)) + value
    def forward(ctx, v, z):

        assert isinstance(v, Number), 'v must be a scalar'

        ctx.save_for_backward(z)
        ctx.v = v
        z_cpu = z.data.cpu().numpy()

        if np.isclose(v, 0):
            output = special.i0e(z_cpu, dtype=z_cpu.dtype)
        elif np.isclose(v, 1):
            output = special.i1e(z_cpu, dtype=z_cpu.dtype)
        else:  # v > 0
            output = special.ive(v, z_cpu, dtype=z_cpu.dtype)

        return torch.Tensor(output).to(z.device)
Beispiel #22
0
    def _loglr(self):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) = \sum_i
                \left<h_i(\Theta)|d_i\right> -
                \frac{1}{2}\left<h_i(\Theta)|h_i(\Theta)\right>,

        at the current parameter values :math:`\Theta`.

        Returns
        -------
        float
            The value of the log likelihood ratio.
        """
        # get model params
        p = self.current_params.copy()
        p.update(self.static_params)

        llr = 0.
        for ifo in self.data:
            # get detector antenna pattern
            fp, fc = self.det[ifo].antenna_pattern(p['ra'], p['dec'],
                                                   p['polarization'], p['tc'])
            ip = numpy.cos(p['inclination'])
            ic = 0.5 * (1.0 + ip * ip)
            htf = (fp * ip + 1.0j * fc * ic) / p['distance']
            # get timeshift relative to fiducial waveform
            dt = self.det[ifo].time_delay_from_earth_center(
                p['ra'], p['dec'], p['tc'])
            dtc = p['tc'] + dt - self.end_time - self.ta[ifo]
            # generate template and calculate waveform ratio
            r0, r1 = self.waveform_ratio(p, htf, dtc=dtc)
            # <h, d> is real part of sum over bins of A0r0 + A1r1
            hd = numpy.sum(self.sdat[ifo]['a0'] * r0 +
                           self.sdat[ifo]['a1'] * r1).real
            # marginalize over phase
            hd = numpy.log(special.i0e(hd)) + abs(hd)
            # <h, h> is real part of sum over bins of B0|r0|^2 + 2B1Re(r1r0*)
            hh = numpy.sum(self.sdat[ifo]['b0'] * numpy.absolute(r0)**2. +
                           2. * self.sdat[ifo]['b1'] *
                           (r1 * numpy.conjugate(r0)).real).real
            # increment loglr
            llr += (hd - 0.5 * hh)
        return float(llr)
Beispiel #23
0
 def rice(self, x, nu, sig, cut_off=20, log_out=False):
     """
 Probability distribution function
 rice_scipy(x,nu/sig,scale=sig)==rice(x,nu,sig) (wikipedia)
 for asymptotic expression check Andersen 1996  (Letters to editor)
 cut_off: snr cut_off for asymptotic expression
 log_out: output log of probability
 """
     if np.isscalar(nu):
         is_scalar = True
         nu = np.array([nu])
         if np.isscalar(x):
             x = np.array([x])
     else:
         nu = np.array(nu)
         is_scalar = False
     x = np.array(x)
     res = np.zeros_like(nu, dtype=np.float)
     sig = np.ones_like(nu) * sig
     #assert(np.sum(sig==0)==0), "Sigma is zero; check bounds"
     snr = nu / sig
     mask_cut = (snr < cut_off)
     if not log_out:
         res[mask_cut] = st.rice.pdf(x[mask_cut],
                                     snr[mask_cut],
                                     scale=sig[mask_cut])
         res[~mask_cut] = (np.sqrt(x[~mask_cut] / nu[~mask_cut]) *
                           np.sqrt(1 / (2 * np.pi * sig[~mask_cut]**2)) *
                           np.exp(-(x[~mask_cut] - nu[~mask_cut])**2 /
                                  (2 * sig[~mask_cut]**2)))
     else:
         x_cal = x[mask_cut] / sig[mask_cut]
         b_cal = snr[mask_cut]
         res[mask_cut] = (np.log(x_cal / sig[mask_cut]) -
                          (((x_cal - b_cal)**2) / 2.) +
                          np.log(sp.i0e(x_cal * b_cal)))
         res[~mask_cut] = (
             1 / 2. *
             np.log(x[~mask_cut] /
                    (nu[~mask_cut] * 2 * np.pi * sig[~mask_cut]**2)) -
             (x[~mask_cut] - nu[~mask_cut])**2 / (2 * sig[~mask_cut]**2))
     if is_scalar:
         res = res[0]
     return res
Beispiel #24
0
def like_cosi_psi(a_hat, f_plus, f_cross, x, psi, d_max=1000.):
    """
    Return the likelihood marginalized over d and phi, using flat (1/2pi
    prior on phi; uniform volume on d up to d_max.
    :param a_hat: the F-stat A parameters
    :param f_plus: F_plus sensitivity
    :param f_cross: F_cross sensitivity
    :param x: cos(inclination)
    :param psi: polarization
    :param d_max: maximum distance for marginalization
    """
    ahat2, f, g = like_parts_d_cosi_psi(a_hat, f_plus, f_cross, x, psi)
    d0 = a_hat[0]
    # Marginalizing over phi gives:
    # 2 pi  i0e(a f) exp(-1/2(ahat^2 - 2 f a + g a^2))
    like = lambda a: 3 * d0 ** 2 / d_max ** 3 * a ** (-4) * special.i0e(f * a) * \
                     exp(f * a - 0.5 * (ahat2 + g * a ** 2))
    l_psix = quad(like, max(0, f / g - 5 / sqrt(g)), f / g + 5 / sqrt(g))
    return l_psix[0]
Beispiel #25
0
    def loglr(self, **params):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) =
                I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
                \frac{1}{2}\left<h^0_i, h^0_i\right>,

        at the given point in parameter space :math:`\Theta`.

        Parameters
        ----------
        \**params :
            The keyword arguments should give the values of each parameter to
            evaluate.

        Returns
        -------
        numpy.float64
            The value of the log likelihood ratio evaluated at the given point.
        """
        try:
            wfs = self._waveform_generator.generate(**params)
        except NoWaveformError:
            # if no waveform was generated, just return 0
            return 0.
        hh = 0.
        hd = 0j
        for det, h in wfs.items():
            # the kmax of the waveforms may be different than internal kmax
            kmax = min(len(h), self._kmax)
            # whiten the waveform
            if self._kmin >= kmax:
                # if the waveform terminates before the filtering low frequency
                # cutoff, there is nothing to filter, so just go onto the next
                continue
            h[self._kmin:kmax] *= self._weight[det][self._kmin:kmax]
            hh += h[self._kmin:kmax].inner(h[self._kmin:kmax]).real
            hd += self.data[det][self._kmin:kmax].inner(h[self._kmin:kmax])
        hd = abs(hd)
        return numpy.log(special.i0e(hd)) + hd - 0.5 * hh
Beispiel #26
0
def like_d_cosi_psi(a_hat, f_plus, f_cross, d, x, psi, marg=True):
    """
    Return the likelihood marginalized over phi, using flat (1/2pi) prior
    :param a_hat: the F-stat A parameters
    :param f_plus: F_plus sensitivity
    :param f_cross: F_cross sensitivity
    :param d: distance
    :param x: cos(inclination)
    :param psi: polarization
    :param marg: do or don't do the marginalization
    :returns: the marginalization factor.  I don't think it includes the exp(rho^2/2) term.
    """
    ahat2, f, g = like_parts_d_cosi_psi(a_hat, f_plus, f_cross, x, psi)
    d0 = a_hat[0]
    # Marginalizing over phi (from zero to 2 pi) gives:
    # 2 pi  i0e(a f) exp(-1/2(ahat^2 - 2 f a + g a^2))
    a = d0 / d
    like = exp(f * a - 0.5 * (ahat2 + g * a**2))
    if marg: like *= special.i0e(f * a)
    return like
 def _loglr(self):
     r"""Computes the log likelihood ratio,
     .. math::
         \log \mathcal{L}(\Theta) =
             I_0 \left(\left|\sum_i O(h^0_i, d_i)\right|\right) -
             \frac{1}{2}\left<h^0_i, h^0_i\right>,
     at the current point in parameter space :math:`\Theta`.
     Returns
     -------
     float
         The value of the log likelihood ratio evaluated at the given point.
     """
     params = self.current_params
     try:
         wfs = self._waveform_generator.generate(**params)
     except NoWaveformError:
         return self._nowaveform_loglr()
     hh = 0.
     hd = 0j
     for det, h in wfs.items():
         # the kmax of the waveforms may be different than internal kmax
         kmax = min(len(h), self._kmax)
         if self._kmin >= kmax:
             # if the waveform terminates before the filtering low frequency
             # cutoff, then the loglr is just 0 for this detector
             hh_i = 0.
             hd_i = 0j
         else:
             # whiten the waveform
             h[self._kmin:kmax] *= self._weight[det][self._kmin:kmax]
             # calculate inner products
             hh_i = h[self._kmin:kmax].inner(h[self._kmin:kmax]).real
             hd_i = self.data[det][self._kmin:kmax].inner(
                 h[self._kmin:kmax])
         # store
         setattr(self._current_stats, '{}_optimal_snrsq'.format(det), hh_i)
         hh += hh_i
         hd += hd_i
     hd = abs(hd)
     self._current_stats.maxl_phase = numpy.angle(hd)
     return numpy.log(special.i0e(hd)) + hd - 0.5*hh
Beispiel #28
0
    def loglr(self, **params):
        r"""Computes the log likelihood ratio,

        .. math::

            \log \mathcal{L}(\Theta) = I_0\left(\left|\sum_i O(h^0_i, d_i)\right|\right) - \frac{1}{2}\left<h^0_i, h^0_i\right>,

        at the given point in parameter space :math:`\Theta`.

        Parameters
        ----------
        \**params :
            The keyword arguments should give the values of each parameter to
            evaluate.

        Returns
        -------
        numpy.float64
            The value of the log likelihood ratio evaluated at the given point.
        """
        try:
            wfs = self._waveform_generator.generate(**params)
        except NoWaveformError:
            # if no waveform was generated, just return 0
            return 0.
        hh = 0.
        hd = 0j
        for det,h in wfs.items():
            # the kmax of the waveforms may be different than internal kmax
            kmax = min(len(h), self._kmax)
            # whiten the waveform
            if self._kmin >= kmax:
                # if the waveform terminates before the filtering low frequency
                # cutoff, there is nothing to filter, so just go onto the next
                continue
            h[self._kmin:kmax] *= self._weight[det][self._kmin:kmax]
            hh += h[self._kmin:kmax].inner(h[self._kmin:kmax]).real
            hd += self.data[det][self._kmin:kmax].inner(h[self._kmin:kmax])
        hd = abs(hd)
        return numpy.log(special.i0e(hd)) + hd - 0.5*hh
Beispiel #29
0
def w_stuff(fp,ref_distance,ref_dis_x,ref_dis_y):

    if (fp>0) : 
        mask = np.zeros_like(ref_distance, dtype=np.bool)
        mask[np.tril_indices_from(mask, k=-1)] = True
        
        rij = ref_distance[mask]
        rij_x = ref_dis_x[mask]
        rij_y = ref_dis_y[mask]

        Wij = W(fp, rij)
        Wij_rij_x = 2*np.sum(Wij * rij_x)
        Wij_rij_y = 2*np.sum(Wij * rij_y)
        Wij_rij_norm = np.sqrt(Wij_rij_x*Wij_rij_x + Wij_rij_y*Wij_rij_y)
        fac = i1e(Wij_rij_norm)/i0e(Wij_rij_norm)/Wij_rij_norm
        Wr2 = 2.0 * np.sum( Wij * rij*rij )
        print ('factor=%g  Wij_rij_x=%s Wij_rij_y=%s Wij_rij_norm=%s'%(fac,Wij_rij_x,Wij_rij_y,Wij_rij_norm))
        Wr2_full = fac * Wr2 / mask.shape[0] / (mask.shape[1]-1) / 2
        Wr2 = Wr2 / 2 / mask.shape[0] / (mask.shape[1]-1) / 2
    else:
        Wr2 = 0
        Wr2_full = 0
Beispiel #30
0
    def log_like(self, params):
        """
            log-likelihood function
        """
        # compute waveform
        logger.debug("Generating waveform for {}".format(params))
        wave = self.wave.compute_hphc(params)
        logger.debug("Waveform generated".format(params))

        # if hp, hc == [None], [None]
        # the requested parameters are unphysical
        # Then, return -inf
        if not any(wave.plus):
            return -np.inf

        hh = 0.
        dd = 0.
        _psd_fact = 0.

        if self.marg_time_shift:

            dh_arr = np.zeros(self.Nfr, dtype=complex)

            # compute inner products
            for ifo in self.ifos:
                logger.debug("Projecting over {}".format(ifo))
                dh_arr_thisifo, hh_thisifo, dd_thisifo, _psdf = self.dets[
                    ifo].compute_inner_products(wave,
                                                params,
                                                self.wave.domain,
                                                psd_weight_factor=True)
                dh_arr = dh_arr + np.fft.fft(dh_arr_thisifo)
                hh += np.real(hh_thisifo)
                dd += np.real(dd_thisifo)
                _psd_fact += _psdf

            # evaluate logL
            logger.debug("Estimating likelihood")
            if self.marg_phi_ref:
                abs_dh = np.abs(dh_arr)
                I0_dh = np.log(i0e(abs_dh)) + abs_dh
                R = logsumexp(I0_dh - np.log(self.Nfr))
            else:
                re_dh = np.real(dh_arr)
                R = logsumexp(re_dh - np.log(self.Nfr))

        else:

            dh = 0. + 0.j

            # compute inner products
            for ifo in self.ifos:
                logger.debug("Projecting over {}".format(ifo))
                dh_arr_thisifo, hh_thisifo, dd_thisifo, _psdf = self.dets[
                    ifo].compute_inner_products(wave,
                                                params,
                                                self.wave.domain,
                                                psd_weight_factor=True)
                dh += (dh_arr_thisifo).sum()
                hh += np.real(hh_thisifo)
                dd += np.real(dd_thisifo)
                _psd_fact += _psdf

            # evaluate logL
            logger.debug("Estimating likelihood")
            if self.marg_phi_ref:
                dh = np.abs(dh)
                R = np.log(i0e(dh)) + dh
            else:
                R = np.real(dh)

        logL = -0.5 * (hh + dd) + R - self.logZ_noise - 0.5 * _psd_fact
        return logL
Beispiel #31
0
def marginalize_likelihood(sh,
                           hh,
                           phase=False,
                           distance=False,
                           skip_vector=False,
                           interpolator=None):
    """ Return the marginalized likelihood

    Parameters
    ----------
    sh: complex float or numpy.ndarray
        The data-template inner product
    hh: complex float or numpy.ndarray
        The template-template inner product
    phase: bool, False
        Enable phase marginalization. Only use if orbital phase can be related
        to just a single overall phase (e.g. not true for waveform with
        sub-dominant modes)
    skip_vector: bool, False
        Don't apply marginalization of vector component of input (i.e. leave
        as vector).
    interpolator: function, None
        If provided, internal calculation is skipped in favor of a
        precalculated interpolating function which takes in sh/hh
        and returns the likelihood.

    Returns
    -------
    loglr: float
        The marginalized loglikehood ratio
    """
    if isinstance(sh, float):
        clogweights = 0
    else:
        sh = sh.flatten()
        hh = hh.flatten()
        clogweights = numpy.log(len(sh))

    if phase:
        sh = abs(sh)
    else:
        sh = sh.real

    vweights = 1
    if interpolator:
        # pre-calculated result for this function
        vloglr = interpolator(sh, hh)

        if skip_vector:
            return vloglr
    else:
        # explicit calculation
        if distance:
            # brute force distance path
            dist_rescale, dist_weights = distance

            sh = numpy.multiply.outer(sh, dist_rescale)
            hh = numpy.multiply.outer(hh, dist_rescale**2.0)
            if len(sh.shape) == 2:
                vweights = numpy.resize(dist_weights,
                                        (sh.shape[1], sh.shape[0])).T
            else:
                vweights = dist_weights

        if phase:
            sh = numpy.log(i0e(sh)) + sh
        else:
            sh = sh.real

        # Calculate loglikelihood ratio
        vloglr = sh - 0.5 * hh

    # Do brute-force marginalization if loglr is a vector
    if isinstance(vloglr, float):
        vloglr = float(vloglr)
    elif skip_vector:
        return vloglr
    else:
        vloglr = float(logsumexp(vloglr, b=vweights)) - clogweights

    return vloglr
Beispiel #32
0
def calculate_items(snaps, min_neigh=4, cutoff=1.5, MAXnb=100,
                    nbins=2000, nbinsq=50, Pe=10, rho_0=0.60,
                    CG_flag = False):

    """ 
    snaps is an n-dimensional list which holds all the data from a 
    dump file. Each item in the list is a snapshot dict of the per
    atom properties measured in the simulation.
    """
    import fortran_tools as ft


    outer_vec = []
    inner_vec = []
    corr, corr_b, corr_in, count, lindemann = {}, {}, {}, {}, {}
    MSD, Q, Q2, g6t, g6t_re, g6t_im = {}, {}, {}, {}, {}, {}
    for t1,snap1 in enumerate(snaps):

        # for each snapshot in the dump file data

        box=snap1['box']
        ref_coords = snap1['ucoords']
        mus  = snap1['mus']
        if CG_flag:
            # if time averaged velocities are computed
            # at each time step
            # (to reduce noise of velocity per atom
            # at each timestep)
            vs  = snap1['CG_vs']
        else:
            vs = np.column_stack((snapshot['vx'],
                                  snapshot['vy']))
        

        tmp_list = ft.distance_matrix(ref_coords,
                                      snap1['c_psi6[1]'],
                                      snap1['c_psi6[2]'],
                                      snap1['mus'],
                                      snap1['local_density'],
                                      box, cutoff, 1,rho_0,
                                      MAXnb, nbins,nbinsq,
                                      len(ref_coords),2)

        # distance matrix between particle pairs
        ref_distance, ref_dis_x, ref_dis_y = tmp_list[:3]
        # number of neighbours for all particles
        ref_num_nb, ref_list_nb = tmp_list[3:5]
        
        # correlation functions and structure functions
        g, g6, g6re, g6im, sq = tmp_list[5:10]
        g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:]

        # load array which each atom is labelled as being a member
        # of a cluster, with those atoms having label 0 being
        # members of the largest cluster.
        # Size of this array is the number of atoms in the simulation
        cl_i = snap1['new_c_c1']

        # load array of cluster sizes, going from largest to
        # smallest
        # Size of this array is N_clusters
        cl_s = np.array(sorted(snap1['cluster_sizes'].values(),
                               reverse=True))
        Nc = snap1['N_clusters']
        min_cluster_size = 2
        # compute angular momentum and linear momentum of clusters
        cluster_AngM, cluster_LinM = ft.get_cluster_omega(ref_coords,
                                                          vs, box,
                                                          cl_i, cl_s,
                                                          len(ref_coords),
                                                          2, Nc)
        # compute mean squared angular momentum of clusters
        RMS_AngMom2 = np.nanmean(np.where(cl_s>=min_cluster_size,
                                          np.multiply(cluster_AngM,
                                                      cluster_AngM),
                                          np.nan))
        RMS_AngMom = np.sqrt(RMS_AngMom2)
        
        # compute mean squared linear momentum of clusters
        RMS_LinMom2 = np.nanmean(np.where(cl_s>=min_cluster_size,
                                          np.multiply(cluster_LinM,
                                                      cluster_LinM),
                                          np.nan))
        RMS_LinMom = np.sqrt(RMS_LinMom2)

        # compute mean cluster size
        cluster_size = np.nanmean(np.where(cl_s>=min_cluster_size,
                                           cl_s,np.nan))
        # compute (normalized) mean polarisation
        polarisation = np.linalg.norm(np.mean(mus,axis=0))

        print(f"cluster info: RMS_L, RMS_M, size, "
              f"no_clusters(>={min_cluster_size}) "
              "no_clusters")
        print(RMS_AngMom,RMS_LinMom,cluster_size,
              np.count_nonzero( cl_s >= min_cluster_size ),
              len(cl_s))

        if (Pe>0) : 
            mask = np.zeros_like(ref_distance, dtype=np.bool)

            mask[np.tril_indices_from(mask, k=-1)] = True
            rij = ref_distance[mask]
            rij_x = ref_dis_x[mask]
            rij_y = ref_dis_y[mask]

            Wij = W(Pe, rij)
            Wij_rij_x = 2*np.sum(Wij * rij_x)
            Wij_rij_y = 2*np.sum(Wij * rij_y)
            Wij_rij_norm = np.sqrt(Wij_rij_x*Wij_rij_x
                                   + Wij_rij_y*Wij_rij_y)
            fac = i1e(Wij_rij_norm)/i0e(Wij_rij_norm)/Wij_rij_norm

            Wr2 = 2.0 * np.sum( Wij * rij*rij )
            
            print(f"factor={fac}  Wij_rij_x={Wij_rij_x} "
                  f"Wij_rij_y={Wij_rij_y} Wij_rij_norm={Wij_rij_norm}")
            
            Wr2_full = fac * Wr2 / mask.shape[0] / (mask.shape[1]-1) / 2
            Wr2 = Wr2 / 2 / mask.shape[0] / (mask.shape[1]-1) / 2
        else:
            Wr2 = 0
            Wr2_full = 0


#ITEM: ATOMS id type x y xu yu mux muy fx fy tqz v_psi6
# c_psi6[1] c_psi6[2] f_cg[1] f_cg[2] f_cg[3] f_cg[4] f_cg[5]
# f_cg[6] f_cg[7] c_c1 '<psi6_re>', '<psi6_re^2>', '<psi6_im>',
# '<psi6_im^2>', '<mux>', '<mux^2>', '<muy>', '<muy^2>'


        if t1==0 :
            # beginning of time averages
            p6re =  np.mean(snap1['c_psi6[1]'])
            p6im =  np.mean(snap1['c_psi6[2]'])
            p6   =  np.absolute(np.complex(p6re, p6im))
            sum_psi6  = p6
            sum_psi62 = p6*p6
            sum_psi6_cmplx = np.complex(p6re, p6im)
            sum_mux  = np.mean(snap1['mux'])
            sum_mux2 = np.mean(np.array(snap1['mux']) ** 2)
            sum_muy  = np.mean(snap1['muy'])
            sum_muy2 = np.mean(np.array(snap1['muy']) ** 2)
            theta = np.arctan2(snap1['muy'], snap1['mux'])
            sum_theta  = np.mean(theta)
            sum_theta2 = np.mean(theta ** 2)
            nematic    = (2.*np.cos(theta)**2  - 1.)
            sum_nematic  = np.mean( nematic )
            sum_nematic2  = np.mean( nematic**2 )
            
            sum_RMS_AngMom = RMS_AngMom 
            sum_RMS_AngMom2 = RMS_AngMom * RMS_AngMom
            sum_RMS_LinMom = RMS_LinMom
            sum_RMS_LinMom2 = RMS_LinMom * RMS_LinMom
            sum_cluster_size = cluster_size
            sum_polarisation = polarisation
            
            
            sum_g = np.matrix(g)
            sum_g6 = np.matrix(g6)
            
            sum_g6re = np.matrix(g6re)
            sum_g6im = np.matrix(g6im)
            sum_sq = np.array(sq)
            sum_g_ori = np.array(g_ori)
            sum_g_dp = np.array(g_dp)
            sum_g_dp_tr = np.array(g_dp_tr)
            g_pr = np.array(g_pr)
            sum_g_pr = np.array(g_pr)
            sum_Wr2 = Wr2
            sum_Wr2_full = Wr2_full
            sum_pij_rij = s_pr
            
            
            g_cnt = 1
        
        else:
            # add to time averages
            p6re =  np.mean(snap1['c_psi6[1]'])
            p6im =  np.mean(snap1['c_psi6[2]'])
            p6   =  np.absolute(np.complex(p6re, p6im))
            sum_psi6  += p6
            sum_psi62 += p6*p6
            sum_psi6_cmplx += np.complex(p6re, p6im)
            sum_mux  += np.mean(snap1['mux'])
            sum_mux2 += np.mean(np.array(snap1['mux']) ** 2)
            sum_muy  += np.mean(snap1['muy'])
            sum_muy2 += np.mean(np.array(snap1['muy']) ** 2)
            theta = np.arctan2(snap1['muy'], snap1['mux'])
            sum_theta  += np.mean(theta)
            sum_theta2 += np.mean(theta ** 2)
            nematic    = (2.*np.cos(theta)**2  - 1.)
            sum_nematic  += np.mean( nematic )
            sum_nematic2  += np.mean( nematic**2 )
            
            sum_RMS_AngMom += RMS_AngMom
            sum_RMS_AngMom2 += RMS_AngMom * RMS_AngMom
            sum_RMS_LinMom += RMS_LinMom
            sum_RMS_LinMom2 += RMS_LinMom * RMS_LinMom
            sum_cluster_size += cluster_size
            sum_polarisation += polarisation
            
            sum_g += np.matrix(g)
            sum_g6   += np.matrix(g6)
            sum_g6re += np.matrix(g6re)
            sum_g6im += np.matrix(g6im)
            sum_sq += np.array(sq)
            
            
            sum_g_ori += np.array(g_ori)
            sum_g_dp += np.array(g_dp)
            sum_g_dp_tr += np.array(g_dp_tr)
            sum_g_pr += np.array(g_pr)
            sum_Wr2 += Wr2
            sum_Wr2_full += Wr2_full
            sum_pij_rij += s_pr
            
            g_cnt += 1

        # mask for distance matrix
        nb = np.logical_and(ref_distance<cutoff, ref_distance > 0)

        # count number of neighbours each particle has
        num_neighs_ref = ref_num_nb
        # determine whether particles are surrounded by other particles
        inner_particles_ref = num_neighs_ref > min_neigh
        # or not
        outer_particles_ref = np.logical_not(inner_particles_ref)

        # double count number of pairs which have more than four neighs
        norm_in = np.sum(np.multiply(nb[inner_particles_ref]
                                     [:,inner_particles_ref],
                                     nb[inner_particles_ref]
                                     [:,inner_particles_ref]))+0.0
        # double count number of pairs which have less than four neighs
        norm_b =  np.sum(np.multiply(nb[outer_particles_ref]
                                     [:,outer_particles_ref],
                                     nb[outer_particles_ref]
                                     [:,outer_particles_ref]))+0.0

        # double count number of pairs which have neighbours
        norm_all = np.sum(np.multiply(nb,nb))+0.0

        outer_n = outer_particles_ref.sum()
        inner_n = inner_particles_ref.sum()
        print 'boundary/inner = %s/%s' %(outer_n,inner_n )
        inner, outer = get_inner_outer(snap1)
        inner_vec.extend(inner)        
        outer_vec.extend(outer)
        
        for t2 in range(t1+1, ts):
            # for a snapshot at a later time
            snap2 = snaps[t2]
            t = snap2['step'] - snap1['step']
            coords = snap2['ucoords']#[:30]


            tmp_list = ft.distance_matrix(coords,
                                          snap2['c_psi6[1]'],
                                          snap2['c_psi6[2]'],
                                          snap2['mus'],
                                          snap2['local_density'],
                                          box, cutoff, 0,rho_0,
                                          MAXnb, nbins,nbinsq,
                                          len(coords),2)

            # distance matrix between particle pairs
            distance, distance_x, ref_distance_y = tmp_list[:3]
            # number of neighbours for all particles
            num_nb, list_nb = tmp_list[3:5]

            # correlation functions and structure functions,
            # which are all not calculated and so are 0 in
            # what follows
            # g, g6, g6re, g6im, sq = tmp_list[5:10]
            # g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:]

            # mask for distance matrix
            nb1  = np.logical_and(distance<cutoff, distance > 0)


            out = '%s '%t
            count[t] = (count.get(t, 0)) + 1


            # compute number of neighbours which are still neighbours
            # at a later time
            c = np.sum(np.multiply(nb,nb1))
            c /= norm_all
            corr[t] = (corr.get(t, 0)) + c
            out += '%s '%c

            # compute number of neighbours with more than min_neigh
            # neighbours that are still neighbours at a later time
            c = np.sum(np.multiply(nb[inner_particles_ref]
                                   [:,inner_particles_ref],
                                   nb1[inner_particles_ref]
                                   [:,inner_particles_ref]))
            c /= norm_in
            corr_in[t] = (corr_in.get(t, 0)) + c
            out += '%s '%c

            # compute number of neighbours with less than min_neigh
            # neighbours that are still neighbours at a later time
            c = np.sum(np.multiply(nb[outer_particles_ref]
                                   [:,outer_particles_ref],
                                   nb1[outer_particles_ref]
                                   [:,outer_particles_ref]))
            c /= norm_b
            corr_b[t] = (corr_b.get(t, 0)) + c
            out += '%s '%c

            
            d = coords-ref_coords 
            Dsq = [ ((u-d[j-1])**2).sum() for i, u in enumerate(d)
                    for j in ref_list_nb[i,0:ref_num_nb[i]]   ]
            if len(Dsq)> 0 : 
                lindemann[t] = (lindemann.get(t, 0)) + np.mean(Dsq)


            # MSD of clusters
            cl_i = snap1['new_c_c1']
            cl_s = sorted(snap1['cluster_sizes'].values(), reverse=True)
            Nc = snap1['N_clusters'] 
            com_MSD = ft.get_cluster_msd(ref_coords, coords, box,
                                         cl_i, cl_s, len(coords), 2, Nc)
            MSD[t] = (MSD.get(t, 0)) + com_MSD

            # compute overlap autocorrelation function and psi6
            # autocorrelation function
            tmplist = ft.get_overlap(ref_coords, coords, snap1['c_psi6[1]'],
                                     snap1['c_psi6[2]'], snap2['c_psi6[1]'],
                                     snap2['c_psi6[2]'], box, len(coords), 2)

            overlap, g6t_abs, g6_re, g6_im = tmplist

            Q[t] = (Q.get(t, 0)) + overlap
            Q2[t] = (Q2.get(t, 0)) + overlap*overlap
            g6t_re[t] = (g6t_re.get(t, 0)) + g6_re
            g6t_im[t] = (g6t_im.get(t, 0)) + g6_im
            g6t   [t] = (g6t.get(t, 0)) + g6t_abs

            print(f"{out} , {lindemann[t]} , {MSD[t]} , {Q[t]} , "
                  f"{g6t_re[t]} , {g6t_im[t]}")
  
    ret_t=[corr, corr_b, corr_in, count, lindemann,
           MSD, Q, Q2, g6t, g6t_re, g6t_im]
    
    ret_o=[g_cnt,
           sum_psi6, sum_psi62, sum_psi6_cmplx,
           sum_mux, sum_mux2,
           sum_muy, sum_muy2,
           sum_theta, sum_theta2,
           sum_nematic, sum_nematic2,
           sum_g, sum_g6, sum_g6re, sum_g6im, sum_sq,
           sum_g_ori, sum_g_dp , sum_g_dp_tr, sum_g_pr,
           sum_Wr2, sum_Wr2_full, sum_pij_rij,
           sum_RMS_LinMom, sum_RMS_LinMom2, sum_RMS_AngMom,
           sum_RMS_AngMom2, sum_cluster_size, sum_polarisation,
           inner_vec, outer_vec]
    return ret_t, ret_o
Beispiel #33
0
 def integrand(r0):
     return 2. * r0 * np.exp(-(r0 - g0)**2) * i0e(2. * r0 * g0)
Beispiel #34
0
 def phase_marginalized_likelihood(self, d_inner_h, h_inner_h):
     d_inner_h = xp.abs(d_inner_h)
     d_inner_h = xp.log(i0e(d_inner_h)) + d_inner_h
     log_l = -2 / self.duration * (h_inner_h - 2 * d_inner_h)
     return log_l
Beispiel #35
0
def log_likelihood_marginalized_coal(Data,
                                     frequencies,
                                     noise,
                                     SNR,
                                     chirpm,
                                     symmratio,
                                     spin1,
                                     spin2,
                                     alpha_squared,
                                     bppe,
                                     NSflag,
                                     cosmology=cosmology.Planck15):
    deltaf = frequencies[1] - frequencies[0]

    #Construct template with random luminosity distance
    mass1 = utilities.calculate_mass1(chirpm, symmratio)
    mass2 = utilities.calculate_mass2(chirpm, symmratio)
    DL = 100 * mpc
    template = dcsimr_detector_frame(mass1=mass1,
                                     mass2=mass2,
                                     spin1=spin1,
                                     spin2=spin2,
                                     collision_time=0,
                                     collision_phase=0,
                                     Luminosity_Distance=DL,
                                     phase_mod=alpha_squared,
                                     cosmo_model=cosmology,
                                     NSflag=NSflag)

    #Construct preliminary waveform for template
    frequencies = np.asarray(frequencies)
    amp, phase, hreal = template.calculate_waveform_vector(frequencies)
    h_complex = amp * np.exp(-1j * phase)

    #construct noise model
    #start=time()
    #noise_temp,noise_func, freq = template.populate_noise(detector=detector,int_scheme='quad')
    #noise_root =noise_func(frequencies)
    #noise = np.multiply(noise_root, noise_root)
    #print('noise time: ', time()-start)

    #Fix snr of template to match the data
    snr_template = np.sqrt(4 * simps(amp * amp / noise, frequencies).real)
    h_complex = SNR / snr_template * h_complex

    #Construct the inverse fourier transform of the inner product (D|h)
    #h_complex = np.insert(h_complex,0,np.conjugate(np.flip(h_complex)))
    #Data = np.insert(Data,0,np.conjugate(np.flip(Data)))
    #noise = np.insert(noise,0,np.conjugate(np.flip(noise)))
    #frequncies =np.insert(frequencies,0,-np.flip(frequencies))
    g_tilde = np.divide(np.multiply(np.conjugate(Data), h_complex), noise)
    g = np.abs(np.fft.fft(g_tilde)) * 4 * deltaf  #/len(frequencies)

    gmax = np.amax(g)
    sumg = np.sum(i0e(g) * np.exp(g - gmax)) * 1 / (len(frequencies) * deltaf)
    #print(sumg, gmax/SNR**2,np.log(sumg))
    if sumg != np.inf:
        return np.log(sumg) + gmax - SNR**2
    else:
        #print("inf")
        items = bi(g)
        sumg = np.sum(items) * 1 / (len(frequencies) * deltaf)
        return log(sumg) - SNR**2