def shift(yy1, yy2):
    l = [] # array of the sum of squares
    shift_factor = np.linspace(-5, 5, 1001)
    
    for i in shift_factor:
        nx = x + i
        nyy2 = list(np.ones(len(yy2)))
        squaresum = []
        
        for j in range(len(yy2)): 
            ny2 = spl(nx, yy2[j]) # same shape
            nyy2[j] = ny2(x) # new y-values
            
            squaresum.append(sum(abs(yy1[j]-nyy2[j])))
          
        l.append(sum(squaresum))

    minindex = l.index(min(l))
    sf = list(shift_factor)[minindex]
    nx = x + sf
    nyy2 = []
    for i in range(len(yy2)): 
        ny2 = spl(nx, yy2[i])
        nyy2.append(ny2(x))
   
    return nyy2, sf
예제 #2
0
 def interpolateCurve(self):
     if self.data is None:
         self.states()
     values = np.array(self.data)
     np.swapaxes(values, 1, 0)
     s = []
     lastPoint = values[:, 0]
     lastS = 0
     for i, point in enumerate(values[0]):
         point = values[:, i]
         lastS = math.sqrt(
             np.dot(np.array(point - lastPoint),
                    np.array(point - lastPoint))) + lastS
         s.append(lastS)
         lastPoint = point
     s2 = np.linspace(s[0], s[-1], len(s))
     interpedData = []
     for dim in range(0, len(values[:, 1])):
         data = np.array(values[dim, :]).reshape(len(s), 1).squeeze()
         dimData = []
         s2Index = 0
         lastIndex = 0
         for i, value in enumerate(data):
             if i % 5 == 0 and i + 5 <= len(s):
                 if i + 5 == len(s):
                     tck, u = spl(np.array(data[i - 2:i + 4]).reshape(1, 6),
                                  u=np.array(s[i - 2:i + 4]).reshape(
                                      6, 1).flatten(),
                                  k=5)
                     newValues = splev(s2[lastIndex:-1], tck)
                     for xi in newValues:
                         for xij in xi:
                             dimData.append(xij)
                 else:
                     tck, u = spl(np.array(data[i:i + 6]).reshape(1, 6),
                                  u=np.array(s[i:i + 6]).reshape(
                                      6, 1).flatten(),
                                  k=5)
                     while s2[s2Index] < s[i + 5]:
                         s2Index += 1
                     if s2Index != lastIndex:
                         newValues = splev(s2[lastIndex:s2Index], tck)
                         for xi in newValues:
                             for xij in xi:
                                 dimData.append(xij)
                 lastIndex = s2Index
         interpedData.append(dimData)
     interpedData = np.array(interpedData)
     return interpedData.transpose()
예제 #3
0
def _findBest(snType, wave, flux):
    flux = flux[wave > 4000][wave[wave > 4000] < 7500]
    wave = wave[wave > 4000][wave[wave > 4000] < 7500]

    mySpline = spl(wave, flux, k=5, ext=1)
    tempSpline = mySpline(wave)
    splineRemoved = np.log10(flux / tempSpline)
    sne, types = np.loadtxt(os.path.join(__dir__, 'data', 'spectra',
                                         'snTypes.ref'),
                            dtype='str',
                            unpack=True)
    mySpecs = sne[types == snType]
    bestChi = np.inf
    bestSpec = None
    bestConst = None
    for spec in mySpecs:
        data = _readSpec(
            os.path.join(__dir__, 'data', 'spectra', spec + '.lnw'))
        x = [float(col[2:]) for col in data.colnames if col != 'wave']

        if len(x) < 2:
            continue
        y = data['wave']

        data.remove_column('wave')
        func = interp2d(x, y, np.array([list(r) for r in np.array(data)]))
        tempFlux = np.transpose(func(0, wave))[0]
        res = minimize(_specChi, np.array([1]), args=(splineRemoved, tempFlux))
        if res.fun < bestChi:
            bestChi = res.fun
            bestSpec = spec
            bestConst = res.x
    return (_readSpec(
        os.path.join(__dir__, 'data', 'spectra',
                     bestSpec + '.lnw')), bestConst)
예제 #4
0
def create_score2prob_lin(p_cnv):
    """
    create the function that will transform a cnv score in a p-value 
    of being a cnv

    Just a linear approximation
    """

    kpcnv = p_cnv.keys()
    lin_funct = spl(kpcnv, p_cnv.values(), k=1)
    #x = np.arange(0,50,1)
    #plt.plot(x, lin_funct(x), '-', p_cnv.keys(), p_cnv.values(), '+')

    (x1, x2) = (kpcnv[0], kpcnv[-1])
    a = (lin_funct(x2) - lin_funct(x1)) / (x2 - x1)
    b = lin_funct(x2) - a * x2
    x_where_y_is_1 = (1. - b) / a
    x_where_y_is_0 = (-b) / a

    def sc2prob(x):
        # x should not be a string
        # assert not isinstance(x, six.string_types), "{} is str".format(x)
        # but should be a number
        assert isinstance(x, numbers.Real), \
                                "{} is not a number: type {}".format(x,type(x))

        if x < x_where_y_is_0:
            return 0.
        elif x < x_where_y_is_1:
            return float(lin_funct(x))
        else:
            return 1.

    return sc2prob
예제 #5
0
    def compress_pdf(self, data):
        """ Compress the PDF

        """
        simple_spline = np.array([], dtype=np.float16)
        index_array = np.array([], dtype=np.uint8)
        for i in xrange(1, data.shape[1]):
            spline = spl(data[:, 0], data[:, i], ext=1)
            int_zw = spline(self.zlim)
            index_over = np.where(int_zw > self.thresh)[0]

            if len(index_over) < 2:
                print len(index_over)
                raise ValueError('The grid spacing is too big. This can happen for instance if the PDF resembles a delta function.')

            try:
                index_array = np.append(index_array,
                                        np.array([index_over[0], index_over[-1]], dtype=np.float16))
            except IndexError as e:
                print "Decrease the grid spacing! This can happen if the PDFs are a bit undersmoothed."
                print e.message
            #which points are above the threshold?
            data_over = np.array(int_zw[index_over[0]:index_over[-1]], dtype=np.float16)
            simple_spline = np.append(simple_spline, data_over)
        return simple_spline, index_array
예제 #6
0
    def _fit_cloud(self):
        from scipy.interpolate import InterpolatedUnivariateSpline as spl

        def _line(n, m, i):
            x = np.mean(m[i]) if m[i] else None
            k_p = n - 1
            while k_p > 0 and k_p not in self:
                k_p -= 1
            x_up = self[k_p][not i](0) if k_p >= 0 else x

            if x is None or x > x_up:
                x = x_up
            return spl([0, 1], [x] * 2, k=1)

        self.clear()
        self.update(copy.deepcopy(self.cloud))

        for k, v in sorted(self.items()):
            v[0] = _line(k, v, 0)

            if len(v[1][0]) > 2:
                v[1] = _gspv_interpolate_cloud(*v[1])
            elif v[1][1]:
                v[1] = spl([0, 1], [np.mean(v[1][1])] * 2, k=1)
            else:
                v[1] = self[k - 1][0]
예제 #7
0
    def perform_compression(self, data):
        """ Compress the PDF

        """
        simple_spline = np.array([])
        index_array = np.array([])
        for i in xrange(1, data.shape[1]):
            spline = spl(data[:, 0], data[:, i], ext=1)
            int_zw = spline(zlim)
            index_over = np.where(int_zw > self.thresh)[0]

            if len(index_over) < 2:
                print len(index_over)
                #raise ValueError('The grid spacing is too big. This can happen for instance if the PDF resembles a delta function.')
                continue

            try:
                index_array = np.append(index_array,
                                        np.array([index_over[0], index_over[-1]]))
            except IndexError as e:
                print "Decrease the grid spacing! This can happen if the PDFs are a bit undersmoothed."
                print e.message
            data_over = np.array(int_zw[index_over[0]:index_over[-1]])
            simple_spline = np.append(simple_spline, data_over)
            simple_spline = np.array(simple_spline, dtype='float16')
            index_array = np.array(index_array, dtype='uint8')
        return simple_spline, index_array
예제 #8
0
        def _line(n, m, i):
            x = np.mean(m[i]) if m[i] else None
            k_p = n - 1
            while k_p > 0 and k_p not in self:
                k_p -= 1
            x_up = self[k_p][not i](0) if k_p >= 0 else x

            if x is None or x > x_up:
                x = x_up
            return spl([0, 1], [x] * 2, k=1)
예제 #9
0
파일: utils_mlz.py 프로젝트: mgckind/MLZ
def get_probs(z, pdf, z1, z2):
    pdf = pdf / numpy.sum(pdf)
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dzo = z[1] - z[0]
    dz = 0.001
    Ndz = int((z2 - z1) / dz)
    A = 0
    for i in xrange(Ndz):
        A += dz * PP((z1) + dz / 2. + dz * i)
    return A / dzo
예제 #10
0
def get_probs(z, pdf, z1, z2):
    pdf = pdf / np.sum(pdf)
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dzo = z[1] - z[0]
    dz = 0.001
    Ndz = int((z2 - z1) / dz)
    A = 0
    for i in range(Ndz):
        A += dz * PP((z1) + dz / 2. + dz * i)
    return A / dzo
예제 #11
0
def Full_Fisher(N_bins, bin_dists, f_sky, type_sam):
    """
    Calculates the Full Fisher matrix for N bins
    --------------------------------------------
    Inputs:


    --------------------------------------------
    Outputs:

    """
    nz = 10000  #number of steps to use for the radial/redshift integration
    zarray = np.linspace(0, 4.0, nz)
    z = zarray[1:-1]

    # Dimension of the Fisher matrix
    dim = N_bins + 2

    # The total Fihser matrix has dimensions (N_bins + 2)*(N_bins + 2)
    Full_Fish = np.zeros([dim, dim])

    for i in range(N_bins):
        i_bin = i + 1  #the bin number - starting from one

        # Redshift distribution in the bin
        bin_z = bin_dists[i]

        mean_z = np.mean(bin_z)
        bias = 1.0 + mean_z

        if (type_sam == 0):
            N_gal = 32.0 * np.float(len(bin_z))
        else:
            N_gal = np.float(len(bin_z))

        # ==========================================================
        # ==========================================================
        # take a histogram for the redshift distribution
        y_like, x_like = np.histogram(bin_z, bins=200, density=True)
        x_like = x_like[:-1]

        bin_sp = spl(x_like, y_like, s=0.0, ext=1)  #Interpolate

        Dz = 0.0  # This can change
        n_z = bin_sp(z -
                     Dz)  # this is the normalized n(z) in the range (0.0,4.0)

        # Calculate the derivative now
        der_spl = bin_sp.derivative()
        dn_dDz = -der_spl(z)

        Full_Fish += Fish_single_bin(N_bins, i_bin, mean_z, bias, n_z, dn_dDz,
                                     f_sky, N_gal)

    return Full_Fish
예제 #12
0
def get_prob_Nz(z, pdf, zbins):
    pdf = pdf / np.sum(pdf)
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dzo = z[1] - z[0]
    dz = 0.001
    Ndz = int((zbins[1] - zbins[0]) / dz)
    Nzt = np.zeros(len(zbins) - 1)
    for j in range(len(Nzt)):
        for i in range(Ndz):
            Nzt[j] += dz * PP((zbins[j]) + dz / 2. + dz * i)
    return Nzt / dzo
예제 #13
0
파일: utils_mlz.py 프로젝트: mgckind/MLZ
def get_prob_Nz(z, pdf, zbins):
    pdf = pdf / numpy.sum(pdf)
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dzo = z[1] - z[0]
    dz = 0.001
    Ndz = int((zbins[1] - zbins[0]) / dz)
    Nzt = numpy.zeros(len(zbins) - 1)
    for j in xrange(len(Nzt)):
        for i in xrange(Ndz):
            Nzt[j] += dz * PP((zbins[j]) + dz / 2. + dz * i)
    return Nzt / dzo
예제 #14
0
    def stack_pdf(self, zvalues, compressed_pdfs, zrange=None, weights=None):
        """Stack the PDFs with weights
        zlims: integrate PDF in the ranges
        weights: additional weights
        """

        uncompr = self.uncompress(compressed_pdfs[0],
                                  compressed_pdfs[1])
        if zrange is not None:
            zrange_weights = self.get_zrange_weights(uncompr, zrange)
            if weights is None:
                weights = zrange_weights
            else:
                weights *= zrange_weights

        data_stack = np.average(uncompr, axis=1, weights=weights)

        model = spl(self.zlim, data_stack, ext=1)
        model = spl(self.zlim, data_stack/model.integral(self.zmin, self.zmax), ext=1)
        outputpdf = model(zvalues)
        return np.column_stack((zvalues, outputpdf))
예제 #15
0
    def get_zrange_weights(self, uncompress, zrange, z=None):
        """Calculate the weights by integrating in
        the predefined zrange

        """
        if z is None:
            z = self.zlim
        zrange_weights = np.ones((uncompress.shape[1],))
        for i in xrange(len(zrange_weights)):
            model = spl(z, uncompress[:, i], ext=1)
            zrange_weights[i] = model.integral(zrange[0], zrange[1])

        return zrange_weights
예제 #16
0
 def interpolate(self, crossingPoints):
     points = crossingPoints.transpose()
     tck,u = spl(points,k=5)
     curve = splev(np.linspace(0,1,1000),tck)
     curve = np.array(curve).transpose()
     min = self.disPlan(curve[0])
     minPoint = 0
     for point in curve:
         tmp = self.disPlan(point)
         if(min>tmp):
             min = tmp
             minPoint = point
     return minPoint
예제 #17
0
파일: hdc.py 프로젝트: yqiuu/hd-corner
def plot_hdr1d(data, prob, bins=20, smooth=True, **kwargs):
    if np.isscalar(bins):
        bins=np.linspace(min(data), max(data), bins)
    elif type(bins) is dict:
        bins=bins[data.name]
    xp=bins[:-1] + np.diff(bins)/2.
    yp=np.zeros(len(xp))
    for i, (l, u) in enumerate(zip(bins[:-1], bins[1:])):
        p=prob[(data >= l) & (data < u)]
        yp[i]=max(p) if len(p) != 0 else 0.
    x=np.linspace(xp[0], xp[-1], 100)
    if smooth:
        plt.plot(x, spl(xp, yp)(x), **kwargs)
    else:
        plt.plot(xp, yp, **kwargs)
예제 #18
0
파일: utils_mlz.py 프로젝트: mgckind/MLZ
def get_area(z, pdf, z1, z2):
    """
    Compute area under photo-z Pdf between z1 and z2, PDF must add to 1

    :param float z: redshift
    :param float pdf: photo-z PDF
    :param float z1: Lower boundary
    :param float z2: Upper boundary
    :return: area between z1 and z2
    :rtype: float
    """
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    area = rom(PP, z1, z2, tol=1.0e-05, rtol=1.0e-05)
    dz = z[1] - z[0]
    return area / dz
예제 #19
0
def get_area(z, pdf, z1, z2):
    """
    Compute area under photo-z Pdf between z1 and z2, PDF must add to 1

    :param float z: redshift
    :param float pdf: photo-z PDF
    :param float z1: Lower boundary
    :param float z2: Upper boundary
    :return: area between z1 and z2
    :rtype: float
    """
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    area = rom(PP, z1, z2, tol=1.0e-05, rtol=1.0e-05)
    dz = z[1] - z[0]
    return area / dz
예제 #20
0
    def spline_fit(x, y, xmin, xmax, n):

        # Slice out region
        xc = x[np.nonzero(x == xmin)[0][0]:np.nonzero(x == xmax)[0][0]]
        yc = y[np.nonzero(x == xmin)[0][0]:np.nonzero(x == xmax)[0][0]]

        # Compute spline fit
        if (xmax + n) > len(x):

            print('\n')
            print('NUMBER OF POINTS EXCEEDS INPUT SIZE')
            print('\n')

        else:

            # Initialize output variables
            xcur = np.zeros(n)
            ycur = np.zeros(n)
            yspl = []

        # Compute fit
        for i in np.arange(0, len(xc) + n, n):

            # Slice out n points per step, compute third order cubic spline
            xcur = x[np.nonzero(x == xmin)[0][0] +
                     i:np.nonzero(x == xmin)[0][0] + i + n]
            ycur = y[np.nonzero(x == xmin)[0][0] +
                     i:np.nonzero(x == xmin)[0][0] + i + n]
            ys = spl(xcur, ycur, k=3)
            if i == len(xc) - 1:
                yspl.extend(ys(xcur)[0])
                break
            else:
                yspl.extend(ys(xcur))

        # Store data
        yspl = np.float64(yspl[:len(xc)])

        # Return output
        return xc, yc, yspl
예제 #21
0
파일: utils_mlz.py 프로젝트: mgckind/MLZ
def compute_error(z, pdf, zv):
    """
    Computes the error in the PDF calculation using a reference values from PDF
    it computes the 68% percentile limit around this value

    :param float z: redshift
    :param float pdf: photo-z PDF
    :param float zv: Reference value from PDF (can be mean, mode, median, etc.)
    :return: error associated to reference value
    :rtype: float
    """
    res = 0.001
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dz = z[1] - z[0]
    j = 0
    area = 0
    while area <= 0.68:
        j += 1
        za = zv - res * j
        zb = zv + res * j
        area = rom(PP, za, zb, tol=1.0e-04, rtol=1.0e-04) / dz
    return j * res
예제 #22
0
파일: utils_mlz.py 프로젝트: mgckind/MLZ
def compute_error2(z, pdf, zv):
    L1 = 0.0001
    L2 = (max(z) - min(z)) / 2.
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dz = z[1] - z[0]
    eps = 0.05
    za1 = zv - L1
    zb1 = zv + L1
    area = 0
    LM = L2
    while abs(area - 0.68) > eps:
        za2 = zv - LM
        zb2 = zv + LM
        area = rom(PP, za2, zb2, tol=1.0e-04, rtol=1.0e-04) / dz
        Lreturn = LM
        if area > 0.68:
            L2 = LM
            LM = (L1 + L2) / 2.
        else:
            L1 = LM
            LM = (L1 + L2) / 2.
    return Lreturn
예제 #23
0
def compute_error2(z, pdf, zv):
    L1 = 0.0001
    L2 = (max(z) - min(z)) / 2.
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dz = z[1] - z[0]
    eps = 0.05
    za1 = zv - L1
    zb1 = zv + L1
    area = 0
    LM = L2
    while abs(area - 0.68) > eps:
        za2 = zv - LM
        zb2 = zv + LM
        area = rom(PP, za2, zb2, tol=1.0e-04, rtol=1.0e-04) / dz
        Lreturn = LM
        if area > 0.68:
            L2 = LM
            LM = (L1 + L2) / 2.
        else:
            L1 = LM
            LM = (L1 + L2) / 2.
    return Lreturn
예제 #24
0
def compute_error(z, pdf, zv):
    """
    Computes the error in the PDF calculation using a reference values from PDF
    it computes the 68% percentile limit around this value

    :param float z: redshift
    :param float pdf: photo-z PDF
    :param float zv: Reference value from PDF (can be mean, mode, median, etc.)
    :return: error associated to reference value
    :rtype: float
    """
    res = 0.001
    PP = spl(z, pdf, bounds_error=False, fill_value=0.0)
    dz = z[1] - z[0]
    j = 0
    area = 0
    while area <= 0.68:
        j += 1
        za = zv - res * j
        zb = zv + res * j
        area = rom(PP, za, zb, tol=1.0e-04, rtol=1.0e-04) / dz
    return j * res
예제 #25
0
def create_score2prob_lin(p_cnv):
    """
    create the function that will transform a cnv score in a p-value 
    of being a cnv

    Just a linear approximation
    """


    kpcnv = p_cnv.keys()
    lin_funct = spl(kpcnv, p_cnv.values(), k=1)
    #x = np.arange(0,50,1)
    #plt.plot(x, lin_funct(x), '-', p_cnv.keys(), p_cnv.values(), '+')

    (x1, x2) = (kpcnv[0], kpcnv[-1])
    a = (lin_funct(x2) - lin_funct(x1))/(x2 - x1)
    b = lin_funct(x2) - a*x2
    x_where_y_is_1 = (1. - b)/a
    x_where_y_is_0 = (-b)/a

    
    def sc2prob(x):
        # x should not be a string
        # assert not isinstance(x, six.string_types), "{} is str".format(x)
        # but should be a number
        assert isinstance(x, numbers.Real), \
                                "{} is not a number: type {}".format(x,type(x))

        if x < x_where_y_is_0:
            return 0.
        elif x < x_where_y_is_1:
            return float(lin_funct(x))
        else:
            return 1.
    
    return sc2prob
예제 #26
0
파일: velocity.py 프로젝트: NikiRui/SNEPM
def estimate_continuum(wave,
                       flux,
                       mask=True,
                       minmask=None,
                       maxmask=None,
                       deg=1,
                       flat=True,
                       spline=False,
                       nknots=None):
    """
    perform polynomial fit for local continuum subtraction
    """

    if flat:
        continuum = np.min(flux) + np.zeros(wave.shape[0])
        return continuum
    else:
        ww = wave
        ff = flux
        if mask is True:
            if minmask is not None and maxmask is not None:
                kk = np.where((wave > minmask) & (wave < maxmask))
                ww = np.delete(wave, kk)
                ff = np.delete(flux, kk)
        print "No of points for estimating continuum", ww.shape[
            0], 'of', wave.shape[0]
        if spline:
            from scipy.interpolate import UnivariateSpline as spl
            if nknots is None:
                nknots = len(ww)
            spfit = spl(ww, ff, s=nknots)  #knots downsample by a factor of 10
            continuum = spfit(wave)
        else:  #- poly fit
            coeff = np.polyfit(ww, ff, deg)  #- first order, deg=1
            continuum = np.polyval(coeff, wave)
    return continuum
예제 #27
0
def get_N0_iter(qe_key: str,
                nlev_t: float,
                nlev_p: float,
                beam_fwhm: float,
                cls_unl_fid: dict,
                lmin_ivf,
                lmax_ivf,
                itermax,
                cls_unl_dat=None,
                lmax_qlm=None,
                ret_delcls=False,
                datnoise_cls: dict or None = None,
                unlQE=False,
                version='1'):
    """Iterative lensing-N0 estimate

        Calculates iteratively partially lensed spectra and lensing noise levels.
        This uses the python camb package to get the partially lensed spectra.

        This makes no assumption on response =  1 / noise hence is about twice as slow as it could be in standard cases.

        Args:
            qe_key: QE estimator key
            nlev_t: temperature noise level (in :math:`\mu `K-arcmin)
            nlev_p: polarisation noise level (in :math:`\mu `K-arcmin)
            beam_fwhm: Gaussian beam full width half maximum in arcmin
            cls_unl_fid(dict): unlensed CMB power spectra
            lmin_ivf: minimal CMB multipole used in the QE
            lmax_ivf: maximal CMB multipole used in the QE
            itermax: number of iterations to perform
            lmax_qlm(optional): maximum lensing multipole to consider. Defaults to :math:`2 lmax_ivf`
            ret_delcls(optional): returns the partially delensed CMB cls as well if set
            datnoise_cls(optional): feeds in custom noise spectra to the data. The nlevs and beam only apply to the filtering in this case

        Returns
            Array of shape (itermax + 1, lmax_qlm + 1) with all iterated N0s. First entry is standard N0.


        Note: This assumes the unlensed spectra are known

    #FIXME: this is requiring the full camb python package for the lensed spectra calc.

     """
    assert qe_key in ['p_p', 'p', 'ptt'], qe_key
    try:
        from camb.correlations import lensed_cls
    except ImportError:
        assert 0, "could not import camb.correlations.lensed_cls"

    if lmax_qlm is None:
        lmax_qlm = 2 * lmax_ivf
    lmax_qlm = min(lmax_qlm, 2 * lmax_ivf)
    lmin_ivf = max(lmin_ivf, 1)
    transfi2 = utils.cli(
        hp.gauss_beam(beam_fwhm / 180. / 60. * np.pi, lmax=lmax_ivf))**2
    llp2 = np.arange(lmax_qlm + 1, dtype=float)**2 * np.arange(
        1, lmax_qlm + 2, dtype=float)**2 / (2. * np.pi)
    if datnoise_cls is None:
        datnoise_cls = dict()
        if qe_key in ['ptt', 'p']:
            datnoise_cls['tt'] = (nlev_t * np.pi / 180. / 60.)**2 * transfi2
        if qe_key in ['p_p', 'p']:
            datnoise_cls['ee'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
            datnoise_cls['bb'] = (nlev_p * np.pi / 180. / 60.)**2 * transfi2
    N0s_biased = []
    N0s_unbiased = []
    N1s_biased = []
    N1s_unbiased = []
    delcls_fid = []
    delcls_true = []

    N0_unbiased = np.inf
    N1_unbiased = np.inf
    dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid)
    cls_len_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
    if cls_unl_dat is None:
        cls_unl_dat = cls_unl_fid
        cls_len_true = cls_len_fid
    else:
        dls_unl_true, cldd_true = cls2dls(cls_unl_dat)
        cls_len_true = dls2cls(lensed_cls(dls_unl_true, cldd_true))
    cls_plen_true = cls_len_true
    for irr, it in utils.enumerate_progress(range(itermax + 1)):
        dls_unl_true, cldd_true = cls2dls(cls_unl_dat)
        dls_unl_fid, cldd_fid = cls2dls(cls_unl_fid)
        if it == 0:
            rho_sqd_phi = 0.
        else:
            # The cross-correlation coefficient is identical for the Rfid-biased QE or the rescaled one
            rho_sqd_phi = np.zeros(len(cldd_true))
            rho_sqd_phi[:lmax_qlm + 1] = cldd_true[:lmax_qlm + 1] * utils.cli(
                cldd_true[:lmax_qlm + 1] + llp2 *
                (N0_unbiased[:lmax_qlm + 1] + N1_unbiased[:lmax_qlm + 1]))

        if 'wE' in version:
            assert qe_key in ['p_p']
            if it == 0:
                print('including imperfect knowledge of E in iterations')
            slic = slice(lmin_ivf, lmax_ivf + 1)
            rho_sqd_E = np.zeros(len(dls_unl_true[:, 1]))
            rho_sqd_E[slic] = cls_unl_dat['ee'][slic] * utils.cli(
                cls_plen_true['ee'][slic] + datnoise_cls['ee'][slic])
            dls_unl_fid[:, 1] *= rho_sqd_E
            dls_unl_true[:, 1] *= rho_sqd_E
            cldd_fid *= rho_sqd_phi
            cldd_true *= rho_sqd_phi

            cls_plen_fid_resolved = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
            cls_plen_true_resolved = dls2cls(
                lensed_cls(dls_unl_true, cldd_true))
            cls_plen_fid = {
                ck: cls_len_fid[ck] - (cls_plen_fid_resolved[ck] -
                                       cls_unl_fid[ck][:len(cls_len_fid[ck])])
                for ck in cls_len_fid.keys()
            }
            cls_plen_true = {
                ck:
                cls_len_true[ck] - (cls_plen_true_resolved[ck] -
                                    cls_unl_dat[ck][:len(cls_len_true[ck])])
                for ck in cls_len_true.keys()
            }

        else:
            cldd_true *= (1. - rho_sqd_phi)  # The true residual lensing spec.
            cldd_fid *= (1. - rho_sqd_phi
                         )  # What I think the residual lensing spec is
            cls_plen_fid = dls2cls(lensed_cls(dls_unl_fid, cldd_fid))
            cls_plen_true = dls2cls(lensed_cls(dls_unl_true, cldd_true))

        cls_filt = cls_plen_fid if not unlQE else cls_unl_fid
        cls_w = cls_plen_fid if not unlQE else cls_unl_fid
        cls_f = cls_plen_true
        fal = {}
        dat_delcls = {}
        if qe_key in ['ptt', 'p']:
            fal['tt'] = cls_filt['tt'][:lmax_ivf + 1] + (
                nlev_t * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['tt'] = cls_plen_true['tt'][:lmax_ivf +
                                                   1] + datnoise_cls['tt']
        if qe_key in ['p_p', 'p']:
            fal['ee'] = cls_filt['ee'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            fal['bb'] = cls_filt['bb'][:lmax_ivf + 1] + (
                nlev_p * np.pi / 180. / 60.)**2 * transfi2
            dat_delcls['ee'] = cls_plen_true['ee'][:lmax_ivf +
                                                   1] + datnoise_cls['ee']
            dat_delcls['bb'] = cls_plen_true['bb'][:lmax_ivf +
                                                   1] + datnoise_cls['bb']
        if qe_key in ['p']:
            fal['te'] = np.copy(cls_filt['te'][:lmax_ivf + 1])
            dat_delcls['te'] = np.copy(cls_plen_true['te'][:lmax_ivf + 1])
        fal = utils.cl_inverse(fal)
        for cl in fal.values():
            cl[:lmin_ivf] *= 0.
        for cl in dat_delcls.values():
            cl[:lmin_ivf] *= 0.
        cls_ivfs_arr = utils.cls_dot([fal, dat_delcls, fal])
        cls_ivfs = dict()
        for i, a in enumerate(['t', 'e', 'b']):
            for j, b in enumerate(['t', 'e', 'b'][i:]):
                if np.any(cls_ivfs_arr[i, j + i]):
                    cls_ivfs[a + b] = cls_ivfs_arr[i, j + i]

        n_gg = get_nhl(qe_key,
                       qe_key,
                       cls_w,
                       cls_ivfs,
                       lmax_ivf,
                       lmax_ivf,
                       lmax_out=lmax_qlm)[0]
        r_gg_true = qresp.get_response(qe_key,
                                       lmax_ivf,
                                       'p',
                                       cls_w,
                                       cls_f,
                                       fal,
                                       lmax_qlm=lmax_qlm)[0]
        r_gg_fid = qresp.get_response(
            qe_key, lmax_ivf, 'p', cls_w, cls_w, fal,
            lmax_qlm=lmax_qlm)[0] if cls_f is not cls_w else r_gg_true
        N0_biased = n_gg * utils.cli(
            r_gg_fid**
            2)  # N0 of possibly biased (by Rtrue / Rfid) QE estimator
        N0_unbiased = n_gg * utils.cli(
            r_gg_true**2
        )  # N0 of QE estimator after rescaling by Rfid / Rtrue to make it unbiased
        N0s_biased.append(N0_biased)
        N0s_unbiased.append(N0_unbiased)
        cls_plen_true['pp'] = cldd_true * utils.cli(
            np.arange(len(cldd_true))**2 *
            np.arange(1, len(cldd_true) + 1, dtype=float)**2 / (2. * np.pi))
        cls_plen_fid['pp'] = cldd_fid * utils.cli(
            np.arange(len(cldd_fid))**2 *
            np.arange(1, len(cldd_fid) + 1, dtype=float)**2 / (2. * np.pi))

        if 'wN1' in version:
            if it == 0: print('Adding n1 in iterations')
            from lensitbiases import n1_fft
            from scipy.interpolate import UnivariateSpline as spl
            lib = n1_fft.n1_fft(fal,
                                cls_w,
                                cls_f,
                                np.copy(cls_plen_true['pp']),
                                lminbox=50,
                                lmaxbox=5000,
                                k2l=None)
            n1_Ls = np.arange(50, lmax_qlm + 1, 50)
            if lmax_qlm not in n1_Ls: n1_Ls = np.append(n1_Ls, lmax_qlm)
            n1 = np.array(
                [lib.get_n1(qe_key, L, do_n1mat=False) for L in n1_Ls])
            N1_biased = spl(n1_Ls,
                            n1_Ls**2 * (n1_Ls * 1. + 1)**2 * n1 /
                            r_gg_fid[n1_Ls]**2,
                            k=2,
                            s=0,
                            ext='zeros')(np.arange(len(N0_unbiased)))
            N1_biased *= utils.cli(
                np.arange(lmax_qlm + 1)**2 *
                np.arange(1, lmax_qlm + 2, dtype=float)**2)
            N1_unbiased = N1_biased * (r_gg_fid * utils.cli(r_gg_true))**2
        else:
            N1_biased = np.zeros(lmax_qlm + 1, dtype=float)
            N1_unbiased = np.zeros(lmax_qlm + 1, dtype=float)

        delcls_fid.append(cls_plen_fid)
        delcls_true.append(cls_plen_true)

        N1s_biased.append(N1_biased)
        N1s_unbiased.append(N1_unbiased)

    return (np.array(N0s_biased),
            np.array(N0s_unbiased)) if not ret_delcls else (
                (np.array(N0s_biased), np.array(N0s_unbiased), delcls_fid,
                 delcls_true))
예제 #28
0
def main(model_name, output_dir=OUTPATH):
    # Load SNEMO model
    snemo = sncosmo.get_source(model_name)
    WAVES = snemo._wave
    PHASES = snemo._phase
    fluxes = np.array([flux(PHASES, WAVES) for flux in snemo._model_fluxes]).T

    # Load Hsiao template
    hsiao = sncosmo.Model('hsiao')

    # Define extended wavelengths
    EXTWAVE = np.concatenate([
        np.arange(1000, WAVES[0], 2), WAVES,
        np.arange(np.around(WAVES[-1], -1), 20010, 2)
    ])

    # Extend mean spectral timeseries (SNEMO C0)
    uv_waves = EXTWAVE[EXTWAVE < WAVES[0]]
    ir_waves = EXTWAVE[EXTWAVE > WAVES[-1]]
    opt_waves = EXTWAVE[(EXTWAVE >= WAVES[0]) & (EXTWAVE <= WAVES[-1])]

    uv_join_wave = WAVES[0]
    ir_join_wave = WAVES[-1]
    uv_scale = fluxes[0, :, 0] / (hsiao.flux(time=PHASES, wave=uv_join_wave).T)
    ir_scale = fluxes[-1, :, 0] / (hsiao.flux(time=PHASES,
                                              wave=ir_join_wave).T)

    ext_flux0 = np.concatenate([
        uv_scale * hsiao.flux(time=PHASES, wave=uv_waves).T, fluxes[:, :, 0],
        ir_scale * hsiao.flux(time=PHASES, wave=ir_waves).T
    ])

    # Apodize additional components
    uv_join_end = 3500  # location (in Angstroms) to stop smoothing in the UV
    ir_join_start = 8400  # location (in Angstroms) to start smoothing in the IR

    def apodize(x):
        if x < uv_join_end:
            return 1 / (uv_join_end - uv_join_wave) * (x - uv_join_wave)
        elif x > ir_join_start:
            return 1 - 1 / (ir_join_wave - ir_join_start) * (x - ir_join_start)
        else:
            return 1

    window = np.array(list(map(apodize, WAVES)))
    apodized_comps = (window * fluxes[:, :, 1:].T).T

    ext_flux1 = np.concatenate([
        np.zeros((len(uv_waves), *apodized_comps.shape[1:])), apodized_comps,
        np.zeros((len(ir_waves), *apodized_comps.shape[1:]))
    ])

    # Reshape
    ext_flux0 = ext_flux0.reshape((*ext_flux0.shape, 1))
    ext_flux = np.concatenate([ext_flux0, ext_flux1], axis=-1)

    # Rebin
    minwave = 1000
    maxwave = 20000
    velocity = 1000
    nbins = int(
        np.log10(maxwave / minwave) / np.log10(1 + velocity / 3.e5) + 1)
    velspace_EXTWAVE = np.geomspace(1000, 20000, nbins)

    flux_spls = [spl(PHASES, EXTWAVE, f) for f in ext_flux.T]
    velspace_flux = np.array([s(PHASES, velspace_EXTWAVE)
                              for s in flux_spls]).T

    # Write to file
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    out = []
    for i, phase in enumerate(PHASES):
        for j, wave in enumerate(velspace_EXTWAVE):
            out.append([phase, wave, *velspace_flux[j, i]])
    np.savetxt(os.path.join(output_dir, 'ext_{}.dat'.format(snemo.name)), out)
예제 #29
0
dims = 2048  # dimensions of density array
verbose = False
DEBUG = 2

# Calculate z for range of comoving distances
z = np.zeros(3000)
d_c = np.zeros(3000)
H = np.zeros(3000)

for d in range(1, 3000):
    d_c[d] = d
    z[d] = z_at_value(cosmo.comoving_distance, d * uni.Mpc)
    H[d] = cosmo.H(z[d]).value

# create spine for quick lookup
d2z = spl(d_c, z)
z2H = spl(z, H, s=0)


def read_header(snap):
    snapshot_fname = '/cosma6/data/dp004/dc-smit4/Daemmerung/Planck2013-Npart_2048_Box_3000-Fiducial/run1/snapdir_{0:03d}/Planck2013-L3000-N2048-Fiducial_{0:03d}.0'.format(
        snap)
    return readgadget.header(snapshot_fname)


# read headers for all snapshots to get redshifts
snaps = 24
zz = np.zeros(snaps)
for snap in range(snaps):
    head = read_header(63 - snap)
    zz[snap] = head.redshift
# Empty arrays to be filled with the splines (y1, y2), and the interpolated y-values (yy1, yy2)
y1 = [] # master instrument splines
y2 = []
yy1 = [] # master instrument interpolated y-values
yy2 = []

# for broader peaks a range(40, 105) covers 400 nm to 700 nm
# for narrower peaks a range of (50, 90) covers 500 nm to 640 nm
x = [first_x[i] for i in range(40, 105)] # Here, the range of y-values are chosen

for i in rcal_i:
    first_row.append(fill_list(sheet1, i, 7))
    first_row2.append(fill_list(sheet2, i, 7))

for i in range(len(rcal_i)):
    y1.append(spl(first_x, first_row[i]))
    y2.append(spl(first_x2, first_row2[i]))
    yy1.append(y1[i](x))
    yy2.append(y2[i](x))
    
# -------------------- FUNCTIONS -----------------------------------

### Mean subtraction

mean_yy1 = [np.mean(yy1[i]) for i in range(len(yy1))]
mean_yy2 = [np.mean(yy2[i]) for i in range(len(yy2))]
sm_yy1 = []
sm_yy2 = []
for i in range(len(yy1)): 
    sm_yy1.append([yy1[i][j] - mean_yy1[i] for j in range(len(yy1[i]))])
    sm_yy2.append([yy2[i][j] - mean_yy2[i] for j in range(len(yy1[i]))])
    pcount = headdata['npartTotal'][0][1]
    print (pcount)
    dummy = np.fromfile(f, dtype=np.int32,count=1)
    parts = np.fromfile(f,dtype=part2, count = pcount)
    f.close()
    return parts

# Calculate z for range of comoving distances
z = np.zeros(3000)
d_c = np.zeros(3000)
for d in range(1,3000):
    d_c[d] = d
    z[d] = z_at_value(cosmo.comoving_distance, d * uni.Mpc)
    
# create spine for quick lookup
d2z = spl(d_c,z)

# define Schechter funciton
def schechter(x, phi_star=1.0, a=-1.24): # the luminosity function n(x) with x = L/Lstar 
    return phi_star * x**a * np.exp(-x)
    
# calculate cumulative probability distribution function for Schechter distribution
x = np.linspace(0.1,10, 1000)
r = np.zeros(1000)
for i in range(1000):
    r[i] = integrate.quad(schechter, x[i], np.inf)[0]

# create spline for looking up luminosity value correspondiong to a given p value
P2L = spl(r[::-1], x[::-1], s=0)

# calculate average particle density and L_min for normalising probability distribution
예제 #32
0
# Calculate real-time sensitivity + precision
pred_rt = dat_sk_phat.groupby('date')[['y','tp','fp']].sum().reset_index()
cn_cumsum = ['y','tp','fp']
pred_rt[cn_cumsum] = pred_rt[cn_cumsum].apply(np.cumsum)
pred_rt = pred_rt.loc[pred_rt.query('y>0').index.min():].reset_index(None, True)
pred_rt = pred_rt.assign(sens=lambda x: x.tp/x.y, prec=lambda x: x.tp/(x.tp+x.fp))
pred_rt = pred_rt.assign(se_sens=lambda x: np.sqrt(x.sens*(1-x.sens)/x.y),se_prec=lambda x: np.sqrt(x.prec*(1-x.prec)/(x.tp+x.fp)))
pred_rt = pred_rt.assign(zscore=lambda x: (x.sens-sens_trial)/x.se_sens).query('zscore<inf').reset_index(None,True)
pred_rt = pred_rt.assign(pval=lambda x: 1-stats.norm.cdf(x.zscore))
# Calculat the full "n"
holder_n = np.zeros(len(pred_rt))
for i, date in enumerate(pred_rt.date):
    holder_n[i] = df_sk.query('date <= @date').shape[0]
pred_rt.insert(0,'n_rt',holder_n.astype(int))
mdl_nsq = spl(x=df_n_power.n_nsq, y=df_n_power.power)
mdl_sk = spl(x=df_n_power.n_sk, y=df_n_power.power)
# Add on power
pred_rt = pred_rt.assign(power_nsq=mdl_nsq(pred_rt.n_rt),power_sk=mdl_sk(pred_rt.n_rt))
# Repeat calculations for model types
tmp_pred = dat_sk_phat.groupby(['mdl','date'])[['y','tp','fp']].sum().reset_index()
tmp_pred[cn_cumsum] = tmp_pred.groupby('mdl')[cn_cumsum].cumsum()
tmp_pred = tmp_pred.assign(sens=lambda x: x.tp/x.y, prec=lambda x: x.tp/(x.tp+x.fp)).dropna()[['date','mdl','sens','prec']]
pred_PR = pd.concat([pred_rt.assign(mdl='all')[tmp_pred.columns],tmp_pred],0)
pred_PR = pred_PR.melt(['date','mdl'],None,'msr','val')
pred_PR = pred_PR.assign(mdl=lambda x: x.mdl.map(di_mdl))

# Print how long trial would take
idx = pred_rt.query('pval < @alpha').index
date_reject = pred_rt.loc[idx[np.where(np.diff(idx.values) > 1)[0].max()+1]].date
days2reject = (date_reject - df_sk.date.min()).days
예제 #33
0
파일: sigmoid.py 프로젝트: SHiroaki/python
    curvature_of_func = 1.0 / R

    return curvature_of_func
    
if __name__ == "__main__":
    
    #r, cur = calc_curvature(testf, )
    xdata = np.linspace(0., 1.5, 150)
    curvatures = [ calc_curvature(testf, 
                                  x) for x in xdata]

    ydata = testf(xdata)
     
    x_somepoint =  np.linspace(0., 3, 100)
    y_somepoit = [testf(x) for x in x_somepoint]
    spfunc = spl(x_somepoint, y_somepoit, s=1)

    curvatures_spl = [calc_curvature(spfunc, x) for x in x_somepoint]

    fig, ax1 = plt.subplots()
    ax1.set_xlabel("X value")
    ax1.set_ylabel("Y value")

    for tl in ax1.get_yticklabels():
         tl.set_color("black")
         
    #ax2 = ax1.twinx()
    #ax2.set_ylabel("Curvature", color="r")

    line1 = ax1.plot(xdata, ydata, "b-", label="Function Values")
    #line2 = ax1.plot(x_somepoint, spfunc(x_somepoint), "g-", label="Spline Value")
예제 #34
0
# 3 state 2D adt solve

import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline as spl
from scipy.integrate import solve_ivp
from numpy import sin, cos, tan

dat = np.loadtxt('./tauth_1d.dat')

grid = dat[:, 0]

getTau1, getTau2, getTau3 = spl(grid,
                                dat[:, 1]), spl(grid,
                                                dat[:,
                                                    2]), spl(grid, dat[:, 3])


# for general
def res(t, y):  # returns value of the differential function at the point

    t12, t13, t23 = getTau1(t), getTau2(t), getTau3(t)
    y12 = -t12 - tan(y[1]) * (t13 * sin(y[0]) + t23 * cos(y[0]))
    y13 = -t13 * cos(y[0]) + t23 * sin(y[0])
    y23 = -(1.0 / cos(y[1])) * (t13 * sin(y[0]) + t23 * cos(y[0]))

    return y12, y13, y23


sol = solve_ivp(res, [grid[0], grid[-1]], [0, 0, 0],
                method='RK45',
                t_eval=grid,
예제 #35
0
def _addCCSpec(snType, sedFile, oldWave, specList=None, specName=None):
    phase, newWave, flux = sncosmo.read_griddata_ascii(sedFile)
    wave = np.append(newWave[newWave < oldWave[0]],
                     newWave[newWave > oldWave[-1]])
    if len(wave) == 0:
        return (flux)
    #flux=flux[:][newWave==wave]
    if specList:
        spec = createAveSpec(specList)
    elif specName:
        spec = os.path.join(__dir__, 'data', 'spectra', specName)
        allSpec = _readSpec(spec)
    else:
        tempInd, tempVal = _find_nearest(phase, 0)
        if snType == 'II':
            snType = 'IIP'
        allSpec, bestConst = _findBest(snType, newWave, flux[tempInd])
        #spec=os.path.join(__dir__,'data','spectra',_defaultSpec[snType])
        #allSpec=_readSpec(spec)

    x = [float(x[2:]) for x in allSpec.colnames if x != 'wave']
    y = allSpec['wave']
    allSpec.remove_column('wave')

    func = interp2d(x, y, np.array([list(r) for r in np.array(allSpec)]))
    tempFlux = np.transpose(
        func(phase[phase >= x[0]][phase[phase >= x[0]] <= x[-1]],
             newWave[newWave > 4000][newWave[newWave > 4000] < 7500]))
    finalFlux = np.transpose(
        func(phase[phase >= x[0]][phase[phase >= x[0]] <= x[-1]],
             wave[wave >= y[0]][wave[wave >= y[0]] <= y[-1]]))
    if np.max(np.max(finalFlux)) == 0:
        return
    #if os.path.basename(sedFile)=='SDSS-015339.SED':
    #	print(finalFlux)
    #	print(wave[wave>=y[0]][wave[wave>=y[0]]<=y[-1]])
    #	sys.exit()

    splines = []
    for p in phase[phase >= x[0]][phase[phase >= x[0]] <= x[-1]]:

        ind = np.where(phase == p)[0][0]
        mySpline = spl(
            newWave[newWave > 4000][newWave[newWave > 4000] < 7500],
            flux[ind][newWave < 7500][newWave[newWave < 7500] > 4000],
            k=5,
            ext=1)
        tempSpline = mySpline(
            newWave[newWave < 7500][newWave[newWave < 7500] > 4000])
        splines.append(
            np.log10(
                flux[ind][newWave < 7500][newWave[newWave < 7500] > 4000] /
                tempSpline))

    splines = np.array(splines)
    waves = wave[wave >= y[0]][wave[wave >= y[0]] <= y[-1]]
    for i in range(len(phase[phase >= x[0]][phase[phase >= x[0]] <= x[-1]])):
        #const1=math.fabs(np.max(splines[i])/np.max(tempFlux[i]))

        #const2=math.fabs(np.min(splines[i])/np.max(tempFlux[i]))
        const = minimize(_specChi,
                         np.array([bestConst]),
                         args=(splines[i], tempFlux[i])).x
        #const=np.nanmedian([math.fabs(k) for k in splines[i]/tempFlux[i]])
        finalFlux[i] *= const
        if i == 4:
            final = const
        #if tempFlux[i][0]>splines[i][0]:
        #	constMinus=tempFlux[i][0]-splines[i][0]
        #	finalFlux[i]-=constMinus
        #else:
        #	constMinus=splines[i][0]-tempFlux[i][0]
        #	tempFlux[i]+=constMinus
        #splines[i]+=finalFlux[i]
        ind = np.where(phase == phase[phase >= x[0]][
            phase[phase >= x[0]] <= x[-1]][i])[0][0]
        for j in range(len(waves)):
            ind2 = np.where(newWave == waves[j])[0][0]
            flux[ind][ind2] = max(0, flux[ind][ind2] + finalFlux[i][j])
    #fig=plt.figure()
    #ax=fig.gca()
    #ax.plot(newWave[newWave<7500][newWave[newWave<7500]>4000],tempFlux[4]*const)
    #ax.plot(newWave[newWave<7500][newWave[newWave<7500]>4000],splines[4])
    #plt.savefig('test_'+os.path.basename(sedFile[:-3])+'pdf',format='pdf',overwrite=True)
    #plt.close()
    sncosmo.write_griddata_ascii(phase, newWave, flux, sedFile)
    return