示例#1
0
    def fit(self):
        """
        fit scan to an appropriate spline
        calculate a set of partition function values at different temperatures and fit a spline to the
        partition function values
        generate splines for the first and second derivatives of the partition function
        """
        N = len(self.pivots)
        if N > 2:
            self.V = interpolate.LinearNDInterpolator(self.phis, self.Es)
            self.rootD = interpolate.LinearNDInterpolator(
                self.phis, self.rootDs)
        elif N == 2:
            self.V = interpolate.SmoothBivariateSpline(self.phis[:, 0],
                                                       self.phis[:,
                                                                 1], self.Es)
            self.rootD = interpolate.SmoothBivariateSpline(
                self.phis[:, 0], self.phis[:, 1], self.rootDs)
        else:
            self.V = interpolate.CubicSpline(self.phis, self.Es)
            self.rootD = interpolate.CubicSpline(self.phis, self.rootDs)

        Tlist = np.linspace(10.0, 3001.0, num=20, dtype=np.float64)

        Qs = []
        for T in Tlist:
            Qs.append(self.calc_partition_function(T))

        self.Q = interpolate.CubicSpline(Tlist, Qs)
        self.dQdT = self.Q.derivative()
        self.d2QdT2 = self.dQdT.derivative()
示例#2
0
def find_spline_transform(a_t, sm, tm):
    """
    Determine the residual `x` and `y` offsets between matched coordinates
    after affine transformation and fit 2D spline surfaces to describe the
    spatially-varying correction to be applied.
    """
    spline_order = 3

    # Get the source, after affine transformation, and template coordinates
    source_coo = a_t.apply_transform(get_det_coords(sm))
    template_coo = get_det_coords(tm)
    # Create splines describing the residual offsets in x and y left over
    # after the affine transformation
    kx = ky = spline_order
    sbs_x = interpolate.SmoothBivariateSpline(
        template_coo[:, 0],
        template_coo[:, 1], (template_coo[:, 0] - source_coo[:, 0]),
        kx=kx,
        ky=ky)

    sbs_y = interpolate.SmoothBivariateSpline(
        template_coo[:, 0],
        template_coo[:, 1], (template_coo[:, 1] - source_coo[:, 1]),
        kx=kx,
        ky=ky)

    return (sbs_x, sbs_y)
示例#3
0
def scatter2smoothgridded(outputfile,
                          headerfile=None,
                          xi=[],
                          yi=[],
                          inputfile=None,
                          x=[],
                          y=[],
                          z=[],
                          topotype=2,
                          kx=3,
                          ky=3,
                          s=0):
    """
    interpolate scattered data to gridded data using a smoothed bivariate spline of order kx,ky
    note: this will heavily smooth a complicated DEM but can be of use for
    new grid defined by uniformly spaced xi, yi or a header file
    input data can be x,y,z arrays or an .xyz 3 column file
    """

    from scipy import interpolate

    if (not x) | (not y) | (not z):
        try:
            inputfile
        except NameError:
            err_msg = "Error: you must call with either a data file or x,y,z arrays"
            raise Exception(err_msg)

    if (not xi) | (not yi):
        try:
            headerfile
        except NameError:
            err_msg = "Error: you must call with either a header file or xi,yi coordinate arrays"
            raise Exception(err_msg)

    if inputfile:
        xyz = np.loadtxt(inputfile)
        x = xyz[:, 0]
        y = xyz[:, 1]
        z = xyz[:, 2]

    if headerfile:
        #determine what the output grid will look like from headerfile
        topoheader = topoheaderread(inputfile=headerfile)
        #Create the gridded data using pylab "griddata function."
        xi = np.arange(topoheader['xll'], \
                   topoheader['xll']+topoheader['ncols']*topoheader['cellsize'], \
                   step=topoheader['cellsize'],dtype=float)
        yi = np.arange(topoheader['yll'], \
                   topoheader['yll']+topoheader['nrows']*topoheader['cellsize'], \
                   step=topoheader['cellsize'],dtype=float)

    w = np.ones(np.shape(x)) / np.shape(x)
    sp = interpolate.SmoothBivariateSpline(y, x, z, w=w, kx=kx, ky=ky, s=s)
    Zi = sp(yi, xi)

    (Xi, Yi) = np.meshgrid(xi, yi)
    Yi = np.flipud(Yi)
    Zi = np.flipud(Zi)
    griddata2topofile(Xi, Yi, Zi, outputfile, topotype)
示例#4
0
def fine_dewarp(im, lines):
    im_h, im_w = im.shape[:2]

    debug = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
    points = []
    y_offsets = []
    for line in lines:
        if len(line) < 10 or abs(line.fit_line().angle()) > 0.001: continue
        line.fit_line().draw(debug, thickness=1)
        base_points = np.array([letter.base_point() for letter in line.inliers()])
        median_y = np.median(base_points[:, 1])
        y_offsets.append(median_y - base_points[:, 1])
        points.append(base_points)

        for underline in line.underlines:
            mid_contour = (underline.top_contour() + underline.bottom_contour()) / 2
            all_mid_points = np.stack([
                underline.x + np.arange(underline.w), mid_contour,
            ])
            mid_points = all_mid_points[:, ::4]
            points.append(mid_points)

        for p in base_points:
            pt = tuple(np.round(p).astype(int))
            cv2.circle(debug, (pt[0], int(median_y)), 2, lib.RED, -1)
            cv2.circle(debug, pt, 2, lib.GREEN, -1)
    cv2.imwrite('points.png', debug)

    points = np.concatenate(points)
    y_offsets = np.concatenate(y_offsets)
    mesh = np.mgrid[:im_w, :im_h].astype(np.float32)
    xmesh, ymesh = mesh

    # y_offset_interp = interpolate.griddata(points, y_offsets, xmesh, ymesh, method='nearest')
    # y_offset_interp = y_offset_interp.clip(-5, 5)
    # mesh[1] += y_offset_interp  # (mesh[0], mesh[1], grid=False)

    y_offset_interp = interpolate.SmoothBivariateSpline(
        points[:, 0], points[:, 1], y_offsets.clip(-3, 3),
        s=4 * points.shape[0]
    )
    ymesh -= y_offset_interp(xmesh, ymesh, grid=False).clip(-3, 3)

    conv_xmesh, conv_ymesh = cv2.convertMaps(xmesh, ymesh, cv2.CV_16SC2)
    out = cv2.remap(im, conv_xmesh, conv_ymesh,
                    interpolation=cv2.INTER_LINEAR,
                    borderValue=np.median(im)).T
    cv2.imwrite('corrected.png', out)

    debug = cv2.cvtColor(out, cv2.COLOR_GRAY2BGR)
    for line in lines:
        base_points = np.array([letter.base_point() for letter in line.inliers()[1:-1]])
        base_points[:, 1] -= y_offset_interp(base_points[:, 0], base_points[:, 1], grid=False)
        Line.fit(base_points).draw(debug, thickness=1)
    cv2.imwrite('corrected_line.png', debug)

    return out
示例#5
0
        def get_interpolator(_pts):
            """ Create bivariate spline interpolator """

            x = [pt.x for pt in _pts]
            y = [pt.y for pt in _pts]
            z = [pt.z for pt in _pts]

            _interp_f = interpolate.SmoothBivariateSpline(x, z, y, kx=4, ky=4)

            return _interp_f
示例#6
0
    def fit_TPpwvEL_curve(self, pwv_vector, EL_vector):
        """
        Fits a curve that relates the elevation EL and the KID power Pkid to the
        sky temperature Tb_sky. A smooth bivariate spline or third order is used
        for the interpolation. A separate 2D function is made for each filter in
        the filterbank of the MKID chip and each function is saved in a separate
        file.

        Parameters
        ------------
        EL_vector: vector or scalar
            Values of the elevation for which the KID power and sky temperature
            are to be calculated.
            Unit: degrees
        pwv: vector or scalar
            Values of the precipitable water vapor for which the KID power and
            sky temperature are to be calculated.
            Unit: mm
        """
        length_EL_vector = len(EL_vector)
        # eta_atm, F = self.load_etaF_data()
        # peak_indices = find_peaks(eta_atm[0, :]*(-1))[0] #gives indices of peaks

        #obtain data
        Tb_sky, Pkid = self.load_TP_data(pwv_vector, EL_vector)

        # make vectors of matrices
        pwv_vector_long = np.array([])
        EL_vector_long = np.array([])
        for i in range(0, len(pwv_vector)):
            pwv_vector_long = np.append(pwv_vector_long, pwv_vector[i]*np.ones(length_EL_vector))
            EL_vector_long = np.append(EL_vector_long, EL_vector)
        # make interpolations
        for j in range(0, self.num_filters):
            split_Tb_sky = tuple(np.vsplit(Tb_sky[:, j, :], len(Tb_sky[:, 0])))
            Tb_sky_vector = np.hstack(split_Tb_sky)
            split_Pkid = tuple(np.vsplit(Pkid[:, j, :], len(Pkid[:, 0])))
            Pkid_vector = np.hstack(split_Pkid)
            # if j in peak_indices:
            EL_vector_long = EL_vector_long.reshape([1, EL_vector_long.size])
            f = interpolate.SmoothBivariateSpline(EL_vector_long, Pkid_vector, \
            Tb_sky_vector, s = len(EL_vector_long))
            # f_pwv = interpolate.SmoothBivariateSpline(Pkid_vector, EL_vector_long, \
            # pwv_vector_long, s = len(Pkid_vector), kx = 3, ky = 3)
            if self.D1:
                name = self.path_model + '\Data\splines_Tb_sky\spline_' + '%.1f' % (self.filters[j]/1e9) +'GHz_D1'
            else:
                name = self.path_model + '\Data\splines_Tb_sky\spline_' + '%.1f' % (self.filters[j]/1e9) +'GHz'
            # name_pwv = self.path_model + '\Data\splines_pwv\spline_' + '%.1f' % (self.filters[j]/1e9) +'GHz_D1'
            np.save(name, np.array(f))

            f_load = np.load(name + '.npy', allow_pickle= True)
            f_function = f_load.item()
        return 0
示例#7
0
def make_spline(x, y, z, interpolator, x_grid, y_grid, mask):
    '''
    Convenient wrapper to abstract away interpolation. 
    
    inputs: x: numpy array of x values
            y: "            " y "     "
            z: "            " z "     "
            interpolator: string of either "radial", "nearest", or "smooth" to choose interpolating
                          function.
            
            x_grid: mesh of x values over which to interpolate
            y_grid: "     " y "                               "
            mask: boolean mask of areas to exclude from the x,y mesh
    
    returns: mesh grid, inputs x,y,z after filtering, and the filtered z mesh values Z 
    '''

    # If there is a nan temperature, we can't plot it so we remove it
    temp_mask = ~np.isnan(z)

    z = z[temp_mask]
    x = x[temp_mask]
    y = y[temp_mask]
    pos_mask = np.where(y < -70)
    z = z[pos_mask]
    x = x[pos_mask]
    y = y[pos_mask]

    B1, B2 = np.meshgrid(x_grid, y_grid, indexing='xy')
    points = np.array([x, y]).T
    values = z

    if interpolator == 'radial':
        spline = sp.interpolate.Rbf(x, y, z, function='cubic', smooth=5)
        Z = spline(B1.T, B2.T)

    if interpolator == 'nearest':
        spline = it.NearestNDInterpolator(points, values)
        Z = spline(B1.T, B2.T)

    if interpolator == 'smooth':
        spline = it.SmoothBivariateSpline(x, y, z)
        Z = spline.ev(B1.T, B2.T)

    for i in range(len(B2)):
        for j in range(len(B2)):
            if mask[i][j]:
                continue
            else:
                Z[i][j] = None

    return B1, B2, Z, x, y, z
示例#8
0
文件: dr1.py 项目: gbrammer/unicorn
def check_backgrounds():
    """
    UDF
    
    Plot the automatically-determined backgrounds as a function of position
    in the UDF frame
    """
    c = catIO.Readfile('../F140W/HUDF12-F140W.reform.cat')
    ok = c.mag_auto < 27.5
    
    fp = open('udf_backgrounds.dat','w')
    fp.write('# id c0 cx cy x y mag\n')
    for i in np.arange(c.N)[ok]:
        id = c.number[i]
        bgfile = 'UDF_FIT/UDF_%05d.bg.dat' %(id)
        if os.path.exists(bgfile):
            line = open(bgfile).readlines()[1][:-1]
            fp.write('%s  %7.1f %7.1f  %.2f\n' %(line, c.x_image[i], c.y_image[i], c.mag_auto[i]))
    
    fp.close()
    
    bg = catIO.Readfile('udf_backgrounds.dat')
    
    ok = (bg.mag > 24) & (bg.c0+0.003 > 0) & (bg.c0+0.003 < 0.004)
    plt.scatter(bg.x[ok], bg.y[ok], c=bg.c0[ok], s=30, vmin=-0.004, vmax=0.002)
    
    #### Try 2D as in sciypy example (need newer version)
    # from scipy.interpolate import griddata
    # 
    # points = [bg.x[ok], bg.y[ok]]
    # values = bg.c0[ok]
    # 
    # grid_z2 = griddata(points, values, (bg.x, bg.y), method='cubic')
    
    from scipy import interpolate
    bg_spline = interpolate.SmoothBivariateSpline(bg.x[ok], bg.y[ok], bg.c0[ok], kx=4, ky=4)
    
    test = bg.c0*0.
    for i in range(bg.N):
        test[i] = bg_spline(bg.x[i], bg.y[i])
    #
    plt.scatter(bg.x+3600, bg.y, c=test, s=30, vmin=-0.004, vmax=0.002)
        
    plt.text(2000,3800,'Data (mag > 24)', ha='center', va='center')
    plt.text(5600,3800,'Interpolated', ha='center', va='center')
    plt.savefig('background_spline.pdf')
    
    import cPickle as pickle
    fp = open('background_spline.pkl','wb')
    pickle.dump(bg_spline, fp)
    fp.close()
示例#9
0
    def offset_base(self, xyin):
        """Derive Displacement at the zenith

        Parameters
        ----------
        xyin : `np.ndarray`, (N, 2)
            Input coordinates.
            Unit is degree for sky, mm for PFI, and pixel for MCS.

        Returns
        -------
        offsetx : `np.ndarray`, (N, 1)
            Displacement in x-axis.
        offsety : `np.ndarray`, (N, 1)
            Displacement in y-axis.
        """

        if self.skip1_off:
            logging.info("------ Skipped.")
            offsetx = offsety = np.zeros(xyin.shape[1])
        else:
            # sky-x sky-y off-x off-y
            dfile = mypath+"data/offset_base_"+self.mode+".dat"
            IpolD = np.loadtxt(dfile).T

            x_itrp = ipol.SmoothBivariateSpline(IpolD[0, :], IpolD[1, :],
                                                IpolD[2, :], kx=5, ky=5, s=1)
            y_itrp = ipol.SmoothBivariateSpline(IpolD[0, :], IpolD[1, :],
                                                IpolD[3, :], kx=5, ky=5, s=1)

            logging.info("Interpolated the base offset")

            offsetx = np.array([x_itrp.ev(i, j) for i, j in zip(*xyin)])
            offsety = np.array([y_itrp.ev(i, j) for i, j in zip(*xyin)])

        return offsetx, offsety
def get_interpolated_pstrain():

    [x, y, pstrain] = get_geodynamic_pstrain()

    # 'fdfault' generated grid (curvilinear)
    result = fdfault.output('longterm_initial_mesh', 'vxbody')
    result.load()

    # added_layer_thickness = 60.

    # print(nby1, nby0, nby0+nby1)

    # plt.pcolor(result.x[:, nby0:nby0+nby1], result.y[:, nby0:nby0+nby1], result.vx[:, nby0:nby0+nby1])
    # plt.scatter(x,y)
    # plt.show()

    # print(np.shape(result.x))
    # print(np.shape(result.y[:,0:nby1]))
    # print(result.y[0,0:nby1])
    # sys.exit()

    print("====== done step 1 =======")

    npts = 30
    pstraini = []

    new_xx = result.x[:, nby0:nby0 + nby1]
    new_yy = result.y[:, nby0:nby0 + nby1]
    new_result = result.vx[:, nby0:nby0 + nby1]

    for (xpt, ypt) in zip(new_xx.flatten(), new_yy.flatten()):
        dist = np.sqrt((x - xpt)**2 + (y - ypt)**2)
        order = dist.argsort()
        f1 = interpolate.SmoothBivariateSpline(x[order[:npts]],
                                               y[order[:npts]],
                                               pstrain[order[:npts]],
                                               kx=3,
                                               ky=3)

        pstrainres = f1(xpt, ypt)
        pstraini.append(pstrainres[0][0])

    print("====== done step 2 =======")
    pstraini = np.reshape(np.array(pstraini), np.shape(new_result))
    print(np.shape(pstraini))
    sys.exit()

    return pstraini
示例#11
0
def panei_mr(targetTemp, baseDir):
    '''given a target temp, returns a function giving
    radius as a function of mass. 
    function is derived from cubic interpolation of Panei models'''
    assert np.all((targetTemp>=5000*units.K)&(targetTemp<45000*units.K)), \
        "Model invalid at temps less than 4000 or greater than 45,000 K"

    # read panei model grid in
    teffs,masses,radii = \
        read_panei_file(os.path.join(baseDir,'Panei/12C-MR-H-He/12C-MR-H-He.dat'))

    # this function interpolates to give radius as function of temp, mass
    func = interp.SmoothBivariateSpline(teffs, masses, radii)

    # this function specifies the target temp to give just radius as fn of mass
    f2 = lambda x: func(targetTemp, x)[0]
    return f2
示例#12
0
def test_interpolation(mesh):
    from scipy import interpolate

    x, y = mesh.x, mesh.y
    Z = np.exp(-x**2 - y**2)
    # We ensure interpolation points are within convex hull
    xi = np.random.uniform(0.1, 0.9, 10)
    yi = np.random.uniform(0.1, 0.9, 10)

    # Stripy
    zn1 = mesh.interpolate_nearest(xi, yi, Z)
    zl1, err = mesh.interpolate_linear(xi, yi, Z)
    zc1, err = mesh.interpolate_cubic(xi, yi, Z)

    # cKDTree
    tree = interpolate.NearestNDInterpolator((x, y), Z)
    zn2 = tree(xi, yi)

    # Qhull
    tri = interpolate.LinearNDInterpolator((x, y), Z, 0.0)
    zl2 = tri((xi, yi))

    # Clough Tocher
    cti = interpolate.CloughTocher2DInterpolator(np.column_stack([x,y]),\
                                                 Z, tol=1e-10, maxiter=20)
    zc2 = cti((xi, yi))
    zc2[np.isnan(zc2)] = 0.0

    # Spline
    spl = interpolate.SmoothBivariateSpline(x, y, Z)
    zc3 = spl.ev(xi, yi)

    # Radial basis function
    rbf = interpolate.Rbf(x, y, Z)
    zc4 = rbf(xi, yi)

    print("squared residual in interpolation\n  \
           - nearest neighbour = {}\n  \
           - linear = {}\n  \
           - cubic (clough-tocher) = {}\n  \
           - cubic (spline) = {}\n  \
           - cubic (rbf) = {}"                              .format(((zn1 - zn2)**2).max(), \
                                      ((zl1 - zl2)**2).max(), \
                                      ((zc1 - zc2)**2).max(), \
                                      ((zc1 - zc3)**2).max(), \
                                      ((zc1 - zc4)**2).max(),) )
示例#13
0
    def get_simple_interpolation_fn(self, axis=MEDIAL):
        """
        Returns a simple spline interpolation callable fitted across the whole field.

        :param axis: Pass constant SAGGITAL, MERIDIONAL, or MEDIAL
        :return: Scipy callable which accepts x and y positions and provides interpolated value.
        """
        lst = []
        for point in self.get_subset(axis):
            lst.append((point.x, point.y, point.mtf50))
        x_lst, y_lst, z_lst = zip(*lst)

        fn = interpolate.SmoothBivariateSpline(x_lst,
                                               y_lst,
                                               z_lst,
                                               kx=2,
                                               ky=2,
                                               s=float("inf"))
        return fn
示例#14
0
def test_derivative(mesh):
    from scipy import interpolate

    # Create a field to test derivatives
    x, y = mesh.x, mesh.y
    Z = np.exp(-x**2 - y**2)
    Zx = -2 * x * Z
    Zy = -2 * y * Z
    gradZ = np.hypot(Zx, Zy)

    # Stripy
    t = clock()
    Zx1, Zy1 = mesh.gradient(Z, nit=10, tol=1e-10)
    t1 = clock() - t
    gradZ1 = np.hypot(Zx1, Zy1)

    # Spline
    spl = interpolate.SmoothBivariateSpline(x, y, Z)
    t = clock()
    Zx2 = spl.ev(x, y, dx=1)
    Zy2 = spl.ev(x, y, dy=1)
    t2 = clock() - t
    gradZ2 = np.hypot(Zx2, Zy2)

    # Clough Tocher
    # This one is most similar to what is used in stripy
    t = clock()
    cti = interpolate.CloughTocher2DInterpolator(np.column_stack([x,y]),\
                                                 Z, tol=1e-10, maxiter=20)
    t3 = clock() - t
    Zx3 = cti.grad[:, :, 0].ravel()
    Zy3 = cti.grad[:, :, 1].ravel()
    gradZ3 = np.hypot(Zx3, Zy3)

    res1 = ((gradZ1 - gradZ)**2).max()
    res2 = ((gradZ2 - gradZ)**2).max()
    res3 = ((gradZ3 - gradZ)**2).max()
    print("squared error in first derivative\n  \
           - stripy = {} took {}s\n  \
           - spline = {} took {}s\n  \
           - cloughtocher = {} took {}s".format(res1, t1, res2, t2, res3, t3))
示例#15
0
def fit_spline( x, y, x_ref, knot_x=None, knot_y=None, num_knots=5, smooth=False, smooth_fac=None, weights=None, order=3):
        '''
        performs spline fit of the form dx = f(x,y)
        knot_x/knot_y are 1-d arrays that are the  x and y coordinates of knot locations
        '''
        if knot_x == None:
            knot_x = np.linspace(np.min(x), np.max(x), num=num_knots)
        if knot_y == None:
            knot_y = np.linspace(np.min(y), np.max(y), num=num_knots)
    

        if not smooth:
            if smooth_fac == None:
                spline = interpolate.LSQBivariateSpline(x, y, x_ref, knot_x, knot_y, kx=order, ky=order, w=weights)
                x_new = spline.ev(x, y)
            else:
                spline = interpolate.LSQBivariateSpline(x, y, x_ref, knot_x, knot_y, kx=order, ky=order,w=weights, s=smooth_fac)
                x_new = spline.ev(x, y)
        else:
            spline = interpolate.SmoothBivariateSpline(x, y, x_ref, w=weights)
            x_new = spline.ev(x, y)
        return x_new, spline 
示例#16
0
    def interp_2d_values_from_profiles(self):
        """Interpolate values in 2D from profiles"""
        ux = np.array([], dtype=np.float)
        vy = np.array([], dtype=np.float)
        new_xt = self.points['xt']
        new_xl = self.points['xl'] + self.points['zone']
        new_values = np.zeros((self.nb_var, len(self.points)))
        for i, profile in enumerate(self.section_seq):
            first_xt = profile.get_limit_by_idx(0)['Xt_profil']
            last_xt = profile.get_limit_by_idx(-1)['Xt_profil']
            xt = (profile.coord.array['Xt'] - first_xt) / (last_xt - first_xt)
            ux = np.concatenate((ux, xt))
            vy = np.concatenate((vy, np.array([i] * profile.nb_points)))

        for j, var in enumerate(self.var_names()):
            z = np.array([], dtype=np.float)
            for profile in self.section_seq:
                z = np.concatenate((z, profile.coord.values[var]))

            if self.interp_values == 'BIVARIATE_SPLINE':
                interp_bivariate_spline = interpolate.SmoothBivariateSpline(
                    ux, vy, z, kx=3, ky=3)
                for k, (u, v) in enumerate(zip(new_xt, new_xl)):
                    new_values[j, k] = interp_bivariate_spline(u, v)[0][0]

            else:
                if self.interp_values == 'BILINEAR':
                    method = 'linear'
                elif self.interp_values == 'BICUBIC':
                    method = 'cubic'
                else:
                    raise NotImplementedError
                new_values[j, :] = interpolate.griddata((ux, vy),
                                                        z, (new_xt, new_xl),
                                                        method=method)

        return new_values
示例#17
0
def CubicSplineInterpolation(data = None, mask = None,filePath= ""):
    # griddata()
    results = np.zeros(data.shape)
    for i in range(data.shape[0]):
        if i % 100 == 0:
            print(i)
        temp = np.zeros((int(np.sum(mask[i,:,:,0].flatten())),3))
        index = 0
        for j in range(data.shape[1]):
            for k in range(data.shape[2]):
                if mask[i,j,k,0] == 1:
                    temp[index,0] = k
                    temp[index,1] = j
                    temp[index,2] = data[i,j,k,0]
                    index = index + 1
        x = np.linspace(0, 40, 41)
        y = np.linspace(0, 40, 41)
        # print(temp[:,2].max())
        # x, y = np.meshgrid(x, y)#20*20的网格数据
        # z = interpolate.griddata(temp[:,0:2], temp[:,2], (x, y), method='cubic')
        # func = interpolate.interp2d(temp[:,0], temp[:,1], temp[:,2], kind='cubic')
        w = np.ones((len(temp[:,0]),1))
        zum = np.sum(temp[:,2]**2)
        smooth = 0.01
        print(zum)
        func = interpolate.SmoothBivariateSpline(temp[:,1], temp[:,0], temp[:,2], w=w, s = 0)
        z = func(x, y)
        results[i,:,:,0] = z
        # plt.subplot(121)
        # print(z.shape)
        # plt.imshow(z)
        # plt.subplot(122)
        # plt.imshow(data[i,:,:,0])
        # plt.show()
    np.save(filePath, results)
    return results
                                                 yoff)
            continue
        else:
            smean[missmean] = scoarse[missmean]
        smasknotmiss = np.logical_and(np.logical_not(miss), smask)
        if (np.sum(smasknotmiss) == 0):
            smean = scoarse.copy()
        sanddiffs = ldat[smasknotmiss] - smean[smasknotmiss]
        ## if (np.sum(smasknotmiss) > 5):
        ##   fill = np.interp(np.nonzero(np.logical_not(smasknotmiss)), np.nonzero(smasknotmiss), sanddiffs)
        ## else:
        ##   continue
        goodpnt = np.nonzero(np.logical_not(smasknotmiss))
        fillpnt = np.nonzero(smasknotmiss)
        if (np.sum(smasknotmiss) > 0):
            interpfunc = interpolate.SmoothBivariateSpline(
                goodpnt[0], goodpnt[1], sanddiffs)
            fill = interpfunc.ev(fillpnt[0], fillpnt[1])
            adj = np.zeros((256, 256))
            adj[goodpnt] = sanddiffs
            adj[fillpnt] = fill
        else:
            adj = np.zeros((256, 256))
        ## adj[np.logical_not(smasknotmiss)] = fill
        ## smalldiff = np.logical_and(np.less(ldat, ldat + (sandsdev*2)), np.greater(ldat, ldat - (sandsdev*2)))
        ## goodgood = np.logical_and(smask, smalldiff)
        ldatadj = ldat - adj
        # add one for each deviation above baseline
        n_dev[ldatadj > baseline] += 1
        ## ldatadj[np.logical_not(smalldiff)] = 0

        # save the maximum deviation
示例#19
0
def compute_Delta(redmap_cluster_file, redmap_member_file, output_file,
                  num_lam_bins, num_z_bins):
    #Compute Delta given redmap catalog and redmapmember catalog

    print "Reading redmapper catalog..."
    # Data catalog
    try:
        hdu = pf.open(redmap_cluster_file)
        memmatchid_clusters = hdu[1].data.field('mem_match_id')
        lam_clusters = hdu[1].data.field('lambda_chisq')
        z_clusters = hdu[1].data.field('z_lambda')
        ra_clusters = hdu[1].data.field('ra')
        dec_clusters = hdu[1].data.field('dec')
        hdu.close()
    except:
        print "Invalid redmapper cluster file!"
    num_clusters = len(lam_clusters)

    print "Reading redmapper member catalog..."
    # Read in member catalog
    try:
        hdu = pf.open(redmap_member_file)
        memmatchid_members = hdu[1].data.field('mem_match_id')
        pfree_members = hdu[1].data.field('pfree')
        p_members = hdu[1].data.field('p')
        r_members = hdu[1].data.field('r')
        theta_i_members = hdu[1].data.field('theta_i')
        theta_r_members = hdu[1].data.field('theta_r')
        hdu.close()
    except:
        print "Invalid redmapper member file!"

    num_members = len(r_members)
    #Compute membership probabilities
    memprob_members = pfree_members * p_members * theta_i_members * theta_r_members

    print "Calculating meanr..."
    # Calculate meanr for every cluster
    # This stores <R_mem> for every cluster
    meanr_clusters = np.zeros(num_clusters) - 1.
    memmatchid_clusters_output = np.zeros(num_clusters) - 1.
    counter = 0
    prev_index = 0
    for mi in xrange(1, num_members):
        if (mi % 1000000 == 0):
            print "member = ", mi + 1, " out of ", num_members + 1
        if (memmatchid_members[mi] != memmatchid_members[prev_index]):
            meanr_clusters[counter] = np.sum(
                memprob_members[prev_index:mi] *
                r_members[prev_index:mi]) / np.sum(
                    memprob_members[prev_index:mi])
            memmatchid_clusters_output[counter] = memmatchid_members[
                prev_index]
            counter += 1
            prev_index = mi
    # Last cluster done separately
    meanr_clusters[counter] = np.sum(
        memprob_members[prev_index:] * r_members[prev_index:]) / np.sum(
            memprob_members[prev_index:])
    memmatchid_clusters_output[counter] = memmatchid_members[prev_index]

    # Test for mismatch
    bad = np.where(memmatchid_clusters_output != memmatchid_clusters)[0]
    if (len(bad) > 0):
        raise Exception("Mismatching indices!")

    # Compute average meanr in bins of richness and redshift
    lam_bins_meanr = np.exp(
        np.linspace(np.log(0.999 * np.min(lam_clusters)),
                    np.log(1.001 * np.max(lam_clusters)),
                    num=num_lam_bins + 1))
    z_bins_meanr = np.linspace(0.999 * np.min(z_clusters),
                               1.001 * np.max(z_clusters),
                               num=num_z_bins + 1)
    meanr_mat = np.zeros((num_lam_bins, num_z_bins))
    print "Computing meanr across grid..."
    for li in xrange(0, num_lam_bins):
        for zi in xrange(0, num_z_bins):
            in_bin = np.where((lam_clusters > lam_bins_meanr[li])
                              & (lam_clusters <= lam_bins_meanr[li + 1])
                              & (z_clusters > z_bins_meanr[zi])
                              & (z_clusters <= z_bins_meanr[zi + 1]))[0]
            #Only compute mean if there are clusters in bin
            if (len(in_bin) > 0):
                meanr_mat[li, zi] = np.mean(meanr_clusters[in_bin])

    print "Computing average value of meanr as a function of richness and z..."
    # Get average value of meanr for clusters of that richness and redshift, evaluated at richness and redshift of all clusters

    # Use simple interpolation of grid
    avg_meanr_clusters_simple = interp_avg_meanr(lam_clusters, z_clusters,
                                                 lam_bins_meanr, z_bins_meanr,
                                                 meanr_mat)

    # Get avg meanr using ungridded spline fit
    spline = interpolate.SmoothBivariateSpline(lam_clusters, z_clusters,
                                               meanr_clusters)
    avg_meanr_clusters_ungridspline = spline.ev(lam_clusters, z_clusters)

    # Get avg meanr using gridded spline fit
    meanr_mat = np.nan_to_num(meanr_mat)
    rbs = interpolate.RectBivariateSpline(
        np.exp(0.5 *
               (np.log(lam_bins_meanr[1:]) + np.log(lam_bins_meanr[:-1]))),
        0.5 * (z_bins_meanr[1:] + z_bins_meanr[:-1]),
        meanr_mat,
        ky=2)
    avg_meanr_clusters_gridspline = rbs.ev(lam_clusters, z_clusters)

    #Compute Delta for all clusters
    avg_meanr_clusters = np.copy(avg_meanr_clusters_gridspline)
    Delta_clusters = (meanr_clusters - avg_meanr_clusters) / avg_meanr_clusters

    print "Outputting to fits file..."
    #Create columns for fits file
    ra_col = pf.Column(name='RA', format='E', array=ra_clusters)
    dec_col = pf.Column(name='DEC', format='E', array=dec_clusters)
    lam_col = pf.Column(name='lambda_chisq', format='E', array=lam_clusters)
    z_col = pf.Column(name='z_lambda', format='E', array=z_clusters)
    meanr_col = pf.Column(name='R_mem', format='E', array=meanr_clusters)
    avg_meanr_col = pf.Column(name='mean_R_mem',
                              format='E',
                              array=avg_meanr_clusters)
    Delta_col = pf.Column(name='Delta', format='E', array=Delta_clusters)
    #All columns
    cols = pf.ColDefs(
        [ra_col, dec_col, lam_col, z_col, meanr_col, avg_meanr_col, Delta_col])
    #Write to fits
    tbhdu = pf.TableHDU.from_columns(cols)
    try:
        tbhdu.writeto(output_file)
    except:
        print "Unable to write to output file!"
        raise
示例#20
0
def make_contour_plot_multrow(Xs,
                              Ys,
                              Cs,
                              elevation=30,
                              azimuthal=30,
                              alpha=0.8,
                              xlabel='',
                              ylabel='',
                              zlabel='',
                              clabel='',
                              title='',
                              titles=[],
                              bounds=None,
                              colors=[],
                              equal_mass=1,
                              colorbartype='simple',
                              label=None,
                              logC=True,
                              cmin=0.8,
                              cmax=1.,
                              savefig='plots/plot.png'):
    # {{{
    if not len(Xs) == len(Ys) == len(Cs):
        raise IOError("Length of lists to be plotted not the same")
    # Known failure mode: when all arrays in Xrows are of the same length
    nrows, ncols = np.shape(Xs)
    print("No of rows = %d, columns = %d" % (nrows, ncols))
    #
    gmean = (5.**0.5 - 1) * 0.5
    if logC:
        bounds = np.log10(bounds)
        print("Using logscale on Z", file=sys.stderr)
        for ridx, C in enumerate(Cs):
            for cidx, R in enumerate(C):
                Cs[ridx][cidx] = np.log10(R)
        if 'FF' in clabel:
            cmin = -2.3
        else:
            Cs = np.array(Cs)
            print("Shape of C = ", np.shape(Cs), "dtype of C = ", type(Cs))
            cmin = np.inf
            for tmpr in Cs:
                for tmpc in tmpr:
                    print(np.shape(tmpc), np.shape(tmpr))
                    if cmin > np.min(tmpc):
                        cmin = np.min(tmpc)
            print("Min = ", cmin)
            cmin = np.round(cmin, decimals=3)
        #clabel = clabel + ' (Log)'
    else:
        print("NOT using logscale on Z", file=sys.stderr)
        if 'FF' in clabel:
            cmin = 10**-2.3
        else:
            Cs = np.array(Cs)
            print("Shape of C = ", np.shape(Cs), "dtype of C = ", type(Cs))
            cmin = np.inf
            for tmpr in Cs:
                for tmpc in tmpr:
                    cmin = min(cmin, np.min(tmpc))
            print("Min = ", cmin)
            cmin = np.round(cmin, decimals=3)
    cmax = -np.inf
    for tmpr in Cs:
        for tmpc in tmpr:
            cmax = max(cmax, np.max(tmpc))
    cmax = np.round(cmax, decimals=3)
    #
    if bounds != None and (type(bounds) == np.ndarray or type(bounds) == list):
        print("bounds before insert  : ", bounds)
        bounds = insert_min_max_into_array(bounds, cmin, cmax)
        print("bounds after insert  : ", bounds)
    #
    # Insert default values of bounds
    if bounds is None and logC:
        if 'FF' in clabel or 'mathcal{M}' in clabel:
            bounds = np.log10([0.0001, 0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 1])
        elif '\mathcal{M}_c' in clabel:
            print("CHIRP MASS PLOT")
            bounds = np.log10([0.005, 0.01, 0.02, 0.03, 0.05, 0.1, 0.2])
        elif '\Delta' in clabel:
            bounds = np.log10([0.005, 0.01, 0.03, 0.05, 0.1, 0.2, 0.5, 1.])
        else:
            bounds = np.linspace(cmin, cmax, 10)
            #bounds = np.append( bounds - 0.1, 0 )
    elif bounds is None:
        raise IOError("Non-log default colorbar bounds not supported")
    for tid, t in enumerate(title):
        t[tid].replace('_', '-')
    fig = plt.figure(int(1e7 * np.random.random()),
                     figsize=(4 * ncols, 4 * nrows))
    # cmap = plt.cm.RdYlGn_r#gist_heat_r##winter#gnuplot#PiYG_r#RdBu_r#jet#rainbow#RdBu_r#Spectral_r
    # cmap = plt.cm.PiYG_r#rainbow#RdBu_r#RdYlGn_r
    #cmap = plt.get_cmap('jet_r', 20)
    cmap = plt.cm.OrRd
    #cmaplist = [cmap(i) for i in range(cmap.N)]
    #cmap = cmap.from_list('Custom map', cmaplist, cmap.N)
    # cmap.set_under('gray')
    print("bounds = ", bounds)
    if type(bounds) == np.ndarray or type(bounds) == list:
        norm = mp.colors.BoundaryNorm(bounds, cmap.N)
    else:
        tmp_bounds = np.linspace(cmin, cmax, 10)
        norm = mp.colors.BoundaryNorm(tmp_bounds, cmap.N)
    #
    # Begin plotting loop
    nplot = 0
    allaxes = []
    for rowid in range(nrows):
        for colid in range(ncols):
            nplot += 1
            ax = fig.add_subplot(nrows, ncols, nplot)
            allaxes.append(ax)
            #
            X, Y, C = Xs[rowid][colid], Ys[rowid][colid], Cs[rowid][colid]
            print(np.shape(X), np.shape(Y), np.shape(C))
            # Add points in the bottom plane marking spins
            if equal_mass == colid + 1:
                tmpX, tmpY = X, Y
                X = np.append(X, tmpY)
                Y = np.append(Y, tmpX)
                C = np.append(C, C)
            #
            Xrange = np.linspace(min(X), max(X), 2000)
            Yrange = np.linspace(min(Y), max(Y), 2000)
            #Xrange = np.linspace( -1, 1, 100 )
            #Yrange = np.linspace( -1, 1, 100)
            Xmap, Ymap = np.meshgrid(Xrange, Yrange)
            print(np.shape(X), np.shape(Y), np.shape(C))
            colormap = plt.mlab.griddata(X, Y, C, Xmap, Ymap, interp='linear')
            #
            import scipy.interpolate as si
            rbfi = si.SmoothBivariateSpline(X, Y, C, kx=4, ky=4)
            #colormap = rbfi(Xrange, Yrange)
            #
            # New interpolation scheme
            #
            #xyzData = np.append( np.append( [X], [Y], axis=0 ), [C], axis=0 )
            #xyzData = scipy.ndimage.zoom(xyzData, 3)
            #Xmap = xyzData[:,0]
            #Ymap = xyzData[:,1]
            #colormap = xyzData[:,2]
            print("Shape pof Xmap, Ymap, colormap = ", np.shape(Xmap),
                  np.shape(Ymap), np.shape(colormap))
            #Xmap = scipy.ndimage.zoom(Xmap, 3)
            #Ymap = scipy.ndimage.zoom(Ymap, 3)
            #colormap = scipy.ndimage.zoom(colormap, 3)
            if len(colors) == (len(bounds) - 1):
                CS = ax.contourf(Xmap, Ymap, colormap,
                                 levels=bounds,
                                 colors=colors,
                                 alpha=0.75,\
                                 # cmap=plt.cm.spectral,\
                                 linestyles='dashed')
                '''CS = ax.tricontourf(X,Y,C,\
                    levels=bounds,\
                    colors=colors,\
                    alpha=0.9)'''
                '''CS1 = ax.scatter(X, Y, c='k', s=5)#, \
                    #reduce_C_function=np.max)'''
            else:
                ax.contourf(Xmap,
                            Ymap,
                            colormap,
                            bounds,
                            cmap=cmap,
                            linestyles='dashed')
            ax.set_xlabel(xlabel)
            ax.set_ylabel(ylabel)
            ax.set_xlim([-1, 1])
            ax.set_ylim([-1, 1])
            # ax.zaxis.set_rotate_label(False)
            #ax.set_zlabel(zlabel, rotation=90)
            print("Len(titles) = %d, NCOLS = %d" % (len(titles), ncols))
            if len(titles) == ncols:
                ax.set_title(titles[colid], verticalalignment='bottom')
            elif colid == ncols / 2:
                ax.set_title(title[rowid] + '\n $q=%d$' % (colid + 1),
                             verticalalignment='bottom')
            else:
                ax.set_title('$q=%d$' % (colid + 1),
                             verticalalignment='bottom')
            ax.grid()
    #
    if colorbartype == 'simple':
        ax2 = fig.add_axes([0.92, 0.1, 0.01, 0.7])
        cb = plt.colorbar(CS, cax=ax2, orientation=u'vertical', format='%.3f')
        cb.set_label(clabel)
    else:
        ax2 = fig.add_axes([0.92, 0.1, 0.01, 0.7])
        #ax2 = fig.add_axes([0.2, 0.05, 0.6, 0.02])
        # Make the colorbar
        if type(bounds) == np.ndarray or type(bounds) == list:
            cb = mp.colorbar.ColorbarBase(ax2,
                                          cmap=cmap,
                                          norm=norm,
                                          spacing='uniform',
                                          format='%.2f',
                                          orientation=u'vertical',
                                          ticks=bounds,
                                          boundaries=bounds)
        else:
            # How does this colorbar know what colors to span??
            cb = mp.colorbar.ColorbarBase(ax2,
                                          cmap=cmap,
                                          norm=norm,
                                          spacing='uniform',
                                          format='%.2f',
                                          orientation=u'vertical',
                                          ticks=tmp_bounds)
        # Add tick labels
        if logC and (type(bounds) == np.ndarray or type(bounds) == list):
            cb.set_ticklabels(np.round(10**bounds, decimals=4))
        elif type(bounds) == np.ndarray or type(bounds) == list:
            cb.set_ticklabels(np.round(bounds, decimals=4))
        # cb = fig.colorbar(scat, shrink=0.5, aspect=30, spacing='proportional',\
        #  ticks=[0,-0.5,-1,-1.3,-1.6,-1.9,-2.1,-2.4,-2.7,-3])
        #ax2.set_title(clabel, loc='left')
        # ,labelpad=-0.3,y=1.1,x=-0.5)
        cb.set_label(clabel,
                     verticalalignment='top',
                     horizontalalignment='center')
        #if max(C) < cmax and min(C) > cmin: cb.set_clim([cmin,cmax])
    fig.tight_layout(rect=(0, 0, 0.93, 1))
    if '.png' in savefig:
        savefig = savefig.split('.png')[0] + '_q123.png'
    elif '.pdf' in savefig:
        savefig = savefig.split('.pdf')[0] + '_q123.pdf'
    fig.savefig(savefig)
    return
示例#21
0
def estimate_alpha_and_beta(input_filepath, quantity_to_compute, boundary,
                            radius, grid_size, value_change, method_angle,
                            method_curv, region_of_interest, region_points):
    """
    Imports a matrix of parameter values corresponding
    to a (alpha,beta) point and perform spline interpolation
    to make a parameter surface.
    Parameter surface is used to find intersection with
    initial parameter value plus / minus one standard deviation.
    Three criteria to find suggested alpha-beta value from intersection.

    Args:
        input_filepath (str): Surface model filename and location where data is stored.
        quantity_to_compute(str): Parameter name.
        boundary (list): Boundary of searching grid.
        radius (float): Minimum radius of circle to search outside of.
        grid_size (int): Size of searching grid ( grid_size x grid_size matrix)
        value_change (float): Desired change in curvature / bend angle to achieve
        method_angle (str): Method for computing angle.
        method_curv (str): Method for computing curvature.
        region_of_interest (str): Method for setting the region of interest ['manual' | 'commandline' | 'landmarking']
        region_points (list): If region_of_interest is 'commandline', this a flatten list of the start and endpoint
    """
    # Get grid values
    base_path = get_path_names(input_filepath)

    # Get region points

    # Compute discrete values for quantity
    if type(boundary[-1]) is str:
        boundary = np.asarray(boundary, dtype=float)
    data = compute_quantities(input_filepath,
                              boundary,
                              quantity_to_compute,
                              method_curv,
                              method_angle,
                              region_of_interest,
                              region_points,
                              n=grid_size,
                              projection=False)
    # Get grid boundary
    amin, amax, bmin, bmax = float(boundary[0]), float(boundary[1]), float(
        boundary[2]), float(boundary[3])

    # Set standard deviations used to find intersection

    # Defined SD planes for curvature
    # Tolerance added for adjusting SD
    # if there are no intersections found
    def value_plus(tolerance=0.0):
        return initial_value + value_change - tolerance

    def value_minus(tolerance=0.0):
        return initial_value - value_change + tolerance

    n = len(data)
    alpha = np.linspace(amin, amax, n)
    beta = np.linspace(bmin, bmax, n)
    alpha_long = np.linspace(amin, amax, 300)
    beta_long = np.linspace(bmin, bmax, 300)
    xx, yy = np.meshgrid(alpha, beta)

    points = np.zeros((n, 2))
    for i in range(len(xx)):
        points[i] = [alpha[i], beta[i]]

    # Spline interpolation
    f = interpolate.SmoothBivariateSpline(xx.ravel(), yy.ravel(), data.ravel())

    initial_value = f(0, 0)
    methods = [value_plus, value_minus]

    # Find intersecting points
    # Reduces SD if no points are found
    for plane in methods:
        zeros = alpha_beta_intersection(plane, f, alpha_long, beta_long)
        if len(zeros) == 0:
            empty = True

            # Leeway tolerance for matching quantity on interpolated surface
            tol = 0.005 if quantity_to_compute == "curvature" else 0.1
            max_iter = 50
            iterations = 0

            print("-- Found no points..Adjusting SD")
            while empty and iterations < max_iter:
                print("-- Iterations: %i" % (iterations + 1))
                zeros = alpha_beta_intersection(plane, f, alpha_long,
                                                beta_long, tol)
                if len(zeros) > 0:
                    empty = False
                iterations += 1
                if quantity_to_compute == "curvature":
                    tol += 0.001
                elif quantity_to_compute == "angle":
                    tol += 0.2

        # Check points and apply criteria
        # to find suggested values for alpha and beta
        if len(zeros) > 0:
            points = []
            for p in zeros:
                if (plane.__name__ == "value_plus" and quantity_to_compute == "curvature") \
                        or (plane.__name__ == "value_minus" and quantity_to_compute == "angle"):
                    if p[1] < 0:
                        points.append(p)
                else:
                    if p[1] > 0:
                        points.append(p)

            suggested_point = points[0]
            dist = 1e9
            for p in points[1:]:
                dist_tmp = la.norm(np.array(p))
                if radius < dist_tmp < dist:
                    dist = dist_tmp
                    suggested_point = p

            # Write points to file
            write_alpha_beta_point(base_path, suggested_point, plane.__name__,
                                   quantity_to_compute)
示例#22
0
文件: maps.py 项目: webia1/sumopy
    def evaluate_quote(self, longitudes, latitudes, elevations, x_point,
                       y_point):

        dists = np.sqrt(
            np.sum((np.stack(
                (longitudes, latitudes), axis=-1) - [x_point, y_point])**2, 1))

        if is_scipy:
            print 'use scipy to interpolate'
            #tck = interpolate.splrep(x, y, s=0)
            #xnew = np.linspace(np.min(x), np.max(x), 200)
            #ynew = interpolate.splev(xnew, tck, der=0)
            #if 1:

            nearest_longitudes = longitudes[(dists <
                                             self.interpolation_radius)]
            nearest_latitudes = latitudes[(dists < self.interpolation_radius)]
            nearest_elevations = elevations[(dists <
                                             self.interpolation_radius)]
            ##            nearest_longitudes = longitudes[(longitudes < x_point + self.interpolation_radius)&(longitudes > x_point - self.interpolation_radius)&(latitudes < y_point + self.interpolation_radius)&(latitudes > y_point - self.interpolation_radius)]
            ##            nearest_latitudes = latitudes[(longitudes < x_point + self.interpolation_radius)&(longitudes > x_point - self.interpolation_radius)&(latitudes < y_point + self.interpolation_radius)&(latitudes > y_point - self.interpolation_radius)]
            ##            nearest_elevations = elevations[(longitudes < x_point + self.interpolation_radius)&(longitudes > x_point - self.interpolation_radius)&(latitudes < y_point + self.interpolation_radius)&(latitudes > y_point - self.interpolation_radius)]
            print[x_point, y_point
                  ], nearest_longitudes, nearest_latitudes, nearest_elevations
            if len(nearest_longitudes) > 15:

                f_inter = interpolate.SmoothBivariateSpline(
                    nearest_longitudes,
                    nearest_latitudes,
                    nearest_elevations,
                )  #kind = self.method_interp )
                ##    #############################################
                ##                xnew = np.linspace(x_point-self.interpolation_radius/2, x_point+self.interpolation_radius/2,200)
                ##                ynew = np.linspace(y_point-self.interpolation_radius/2, y_point+self.interpolation_radius/2,200)
                ##                X, Y = np.meshgrid(xnew, ynew)
                ##                Z = f_inter(xnew, ynew)
                ##
                ##
                ##                fig = plt.figure()
                ##                ax = fig.gca(projection='3d')
                ##
                ##
                ##                # Plot the surface.
                ##                surf = ax.plot_surface(X, Y, Z, cmap=cmp.coolwarm,
                ##                                       linewidth=0, antialiased=False)
                ##
                ##                fig.colorbar(surf, shrink=0.5, aspect=5)
                ##                plt.savefig('/home/cristian/scenarios_cri/Elevation/interpolation_%d'%(xnew[0]))
                ##                plt.show()
                #############################################
                quote = f_inter(x_point, y_point)
            else:
                quote = elevations[np.argmin(dists)]
                print 'nearest quote'
        else:

            nearest_quotes = elevations[(dists < 100)]
            nearest_dists = dists[(dists < 100)]
            numerator = 0.0
            denominator = 0.0
            for near_quote, near_dist in zip(nearest_quotes, nearest_dists):
                numerator += near_quote / (10**(near_dist / 10))
                denominator += 1 / (10**(near_dist / 10))

    ##                        print numerator, denominator
            if denominator != 0:
                quote = numerator / denominator
            else:
                quote = elevations[np.argmin(dists)]
                print 'nearest quote'

        return quote
示例#23
0
def interp_hazard_curve(sitelon, sitelat, hazcurvefile):
    from openquake.nrmllib.hazard.parsers import HazardCurveXMLParser
    from numpy import array, where
    from scipy import interpolate
    import warnings
    warnings.filterwarnings("ignore")

    hcm = HazardCurveXMLParser(hazcurvefile).parse()

    #curves = hcc._set_curves_matrix(hcm)

    #extract lat/lons from POES
    curlat = []
    curlon = []
    curves = []
    for loc, poes in hcm:
        curlon.append(loc.x)
        curlat.append(loc.y)
        curves.append((poes))

    curlon = array(curlon)
    curlat = array(curlat)
    curves = array(curves)

    # check to see if site within model
    if sitelat >= min(curlat) and sitelat <= max(curlat) \
       and sitelon >= min(curlon) and sitelon <= max(curlon):

        # find indexes about site
        index1 = where((curlon >= sitelon-1.) & (curlon <= sitelon+1.) \
                       & (curlat >= sitelat-1.) & (curlat <= sitelat+1.))[0]

    lonstrip = array(curlon)
    latstrip = array(curlat)
    curstrip = array(curves)

    # build array of poes for each return period
    interphazcurve = []
    for i in range(0, len(curstrip[0])):
        poearray = []
        for c in curstrip:
            poearray.append(c[i])

        # standard 2D interpolation does not work!
        '''
        # do 2D interpolation across log values
        interpfunc = interpolate.interp2d(lonstrip, latstrip, \
                                          log(array(poearray)), kind='linear')
        interphazcurve.append(exp(interpfunc(sitelon, sitelat)[0]))
        '''
        # use SmoothBivariateSpline instead
        '''
        # smooth ln hazard - changed to linear b/c cannot interp log(0)                              
        interpfunc = interpolate.SmoothBivariateSpline(lonstrip, latstrip, \
                                                       log(array(poearray)))
        # evaluate SmoothBivariateSpline                                  
        interphazcurve.append(exp(interpfunc.ev(sitelon, sitelat)))
        '''
        # smooth hazard
        interpfunc = interpolate.SmoothBivariateSpline(lonstrip, latstrip, \
                                                       array(poearray))
        # evaluate SmoothBivariateSpline
        interphazcurve.append(interpfunc.ev(sitelon, sitelat))

    return array(interphazcurve), hcm.metadata, curstrip
示例#24
0
def intp_comparison(isotope,option,size,Grid_Setting,Test_type,space_factor,
                    op_mode,X,Y,sorting_index,Kernel_type,Xsobol,Ysobol):
    
    """
    Comparison between Cubic Splines and GP models.
    
    inputs:
        *isotope: nuclide to use for comparison
        *option: 
            -Grid: for comparisons on models trained on a Grid
            -Random: for comparisons on models trained on a random selection
                     of the datasets.
            -Sobol: for comparisons on models trained on a Sobol sequence.
        *size: The number of samples to consider. Only considered when selecting
               the option 'Sobol' or 'Random'.
        *Grid_Setting: Selects the option on the design generator (Even, Odd or
                       Checkerboard).
        *Test_type: Selects the option on the design generator(All, inner or
                    borders).
        *space_factor: Sets the spacing on the design generator.
        *op_mode: Sets the operation mode of the design generator (normal,
                  testing).
        * X,Y, Xsobol, Ysobol: Training / Testing sets
        * sorting_index: Only used for X and Y on a grid. Reorder Ys points to
                         correctly match the points of the sampling grid.
        * Kernel_type: Used to select the kernel type used (Kernels-Mass, 
                       Kernels-Adens. etc)
    outputs:
        * Values_Cubic,Values_GPR: Contain diagnostics such as MAE, RMSE, MSE,
                                   Rsquared, plus mean predictive variance and
                                   fractions of predicted points within 
                                   predictive variance for GPR.
    """
    
    Temperature = np.array(list(set(X[:,0])))
    Burnup = np.array(list(set(X[:,1])))
    Burnup = np.sort(Burnup)
    
    Ys = np.array(Y[isotope])[sorting_index]
    Ys = Ys.reshape((len(Temperature),len(Burnup))).T 
    Ydata = np.array(Y[isotope])
  
    if option == 'Grid':
    
    # =========================================================================
    # Cubic
    # =========================================================================
        Tmesh, Bmesh = np.meshgrid(Temperature,Burnup)
        BtrainCubic,BtestCubic = gen_mask(
                Grid_Setting,Bmesh,Test_type,space_factor,op_mode)
        TtrainCubic,TtestCubic = gen_mask(
                Grid_Setting,Tmesh,Test_type,space_factor,op_mode)
        YtrainCubic,YtestCubic = gen_mask(
                Grid_Setting,Ys,Test_type,space_factor,op_mode)
    
    # =========================================================================
    # GPR
    # =========================================================================
        XtrainGPR = np.vstack((TtrainCubic,BtrainCubic)).T
        YtrainGPR = YtrainCubic
        XtestGPR = np.vstack((TtestCubic,BtestCubic)).T
        YtestGPR = YtestCubic
    
    elif option == 'random':
        Indexes = np.arange(len(X))
        IdxTrain = np.random.choice(Indexes,size = size)
        IdxTest = np.array([idx for idx in Indexes if idx not in IdxTrain])

        XtrainGPR = X[IdxTrain]
        XtestGPR = X[IdxTest]
        BtrainCubic = XtrainGPR[:,0]
        TtrainCubic = XtrainGPR[:,1]
        BtestCubic = XtestGPR[:,0]
        TtestCubic = XtestGPR[:,1]
        YtrainGPR = Ydata[IdxTrain]
        YtestGPR = Ydata[IdxTest]
        YtrainCubic = YtrainGPR
        YtestCubic = YtestGPR
    
    elif option == 'Sobol':
        XtrainGPR = Xsobol[:size]
        YtrainGPR = Ysobol[:size]
        XtestGPR = X
        YtestGPR = Y[isotope]
        BtrainCubic = XtrainGPR[:,1]
        TtrainCubic = XtrainGPR[:,0]
        BtestCubic = XtestGPR[:,1]
        TtestCubic = XtestGPR[:,0]
        YtrainCubic = YtrainGPR
        YtestCubic = YtestGPR
        

    # =========================================================================
    # Interpolators
    # =========================================================================
    CubicInt = intp.SmoothBivariateSpline(BtrainCubic,TtrainCubic,YtrainCubic)
    Yintp_Cubic = CubicInt.ev(BtestCubic,TtestCubic)
    Cubic_Errors = get_Error(
            Yintp_Cubic,YtestCubic)
    
    # =========================================================================
    # GPR:
    # =========================================================================

    try:
        # =====================================================================
        # Load Kernel Params:
        # =====================================================================
        
        Kernel = np.load('Path/'+Kernel_type+\
                         '/{}/{}.npy'.format(option,isotope),
                         allow_pickle=True).item()
        Params = Kernel['Params']
        Lambda_Inv = Kernel['LAMBDA']
        
        # =====================================================================
        # Precalculate alpha_ and K_inv:
        # =====================================================================
        alpha_,K_inv = alpha_calculator(Kernel['Params'],XtrainGPR,YtrainGPR)
        
        # =====================================================================
        # Get predictions:
        # =====================================================================
        
        GPR = [GPR_MODEL_w_Std(Params,Lambda_Inv,XtrainGPR,YtrainGPR,alpha_,
                               K_inv,x) for x in XtestGPR]
        GPR_pred = np.array(GPR)[:,0]
        GPR_std = np.array(GPR)[:,1]
        GPR_Errors = get_Error(GPR_pred,YtestGPR)
    except FileNotFoundError:
        return 404
     
        
    Mean_GPR_std = np.mean(GPR_std)
    Max_GPR_std = np.max(GPR_std)
    RsquareGPR = Rsquare(GPR_pred,YtestGPR)
    RsquareCubic = Rsquare(Yintp_Cubic,YtestCubic)
    One_sigma = [1 if \
                 GPR_pred[i]-GPR_std[i] < YtestGPR[i] < GPR_pred[i]+GPR_std[i] \
                 else 0 for i in range(len(GPR_pred))]
    Two_sigma = [1 if \
                 GPR_pred[i]-2*GPR_std[i] < YtestGPR[i] < GPR_pred[i]+2*GPR_std[i] \
                 else 0 for i in range(len(GPR_pred))]
    f_1sigma = (sum(One_sigma)/len(One_sigma))*100
    f_2sigma = (sum(Two_sigma)/len(Two_sigma))*100
    # =========================================================================
    # Print Summary
    # =========================================================================

    print('Mean Y = ',np.mean(Ys))
    print('MAE (Cubic | GPR) = ','{:.3e}'.format(Cubic_Errors[1]),\
          '|','{:.3e}'.format(GPR_Errors[1]))
    print('MSE (Cubic | GPR) = ','{:.3e}'.format(Cubic_Errors[2]),\
          '|','{:.3e}'.format(GPR_Errors[2]))
    print('RMSE (Cubic | GPR) = ','{:.3e}'.format(Cubic_Errors[3]),\
          '|','{:.3e}'.format(GPR_Errors[3]))
    print('Mean Rel. Error (%) (Cubic | GPR) = ','{:.3e}'.format(Cubic_Errors[4])\
          ,'|','{:.3e}'.format(GPR_Errors[4]))
    print('Max Rel. Error (%) (Cubic | GPR) = ','{:.3e}'.format(Cubic_Errors[5])\
          ,'|','{:.3e}'.format(GPR_Errors[5]))
    print('R^2 Coeff. of Determination (Cubic | GPR) = ',\
          '{:.3e}'.format(RsquareCubic),'|','{:.3e}'.format(RsquareGPR))

    Values_Cubic = [error for error in Cubic_Errors]+[RsquareCubic]
    Values_GPR = [error for error in GPR_Errors] + [RsquareGPR, 
                 Mean_GPR_std,Max_GPR_std,f_1sigma,f_2sigma]
    return Values_Cubic,Values_GPR
示例#25
0
 def time_smooth_bivariate_spline(self, n_samples):
     interpolate.SmoothBivariateSpline(self.x, self.y, self.z)
示例#26
0
def extrapolate_emodulus(lut,
                         datax,
                         deform,
                         emod,
                         deform_norm,
                         deform_thresh=.05,
                         inplace=True):
    """Use spline interpolation to fill in nan-values

    When points (`datax`, `deform`) are outside the convex
    hull of the lut, then :func:`scipy.interpolate.griddata` returns
    nan-valules.

    With this function, some of these nan-values are extrapolated
    using :class:`scipy.interpolate.SmoothBivariateSpline`. The
    supported extrapolation values are currently limited to those
    where the deformation is above 0.05.

    A warning will be issued, because this is not really
    recommended.

    Parameters
    ----------
    lut: ndarray of shape (N, 3)
        The normalized (!! see :func:`normalize`) LUT (first axis is
        points, second axis enumerates datax, deform, and emodulus)
    datax: ndarray of size N
        The normalized x data (corresponding to `lut[:, 0]`)
    deform: ndarray of size N
        The normalized deform (corresponding to `lut[:, 1]`)
    emod: ndarray of size N
        The emodulus (corresponding to `lut[:, 2]`); If `emod`
        does not contain nan-values, there is nothing to do here.
    deform_norm: float
        The normalization value used to normalize `lut[:, 1]` and
        `deform`.
    deform_thresh: float
        Not the entire LUT is used for bivariate spline interpolation.
        Only the points where `lut[:, 1] > deform_thresh/deform_norm`
        are used. This is necessary, because for small deformations,
        the LUT has an extreme slope that kills any meaningful
        spline interpolation.
    inplace: bool
        If True (default), replaces nan values in `emod` in-place.
        If False, `emod` is not modified.
    """
    if not inplace:
        emod = np.array(emod, copy=True)
    # unknowns are nans and deformation values above the threshold
    unkn = np.logical_and(np.isnan(emod), deform > deform_thresh / deform_norm)

    if np.sum(unkn) == 0:
        # nothing to do
        return emod

    warnings.warn(
        "LUT extrapolation is barely tested and may yield " +
        "unphysical values!", KnowWhatYouAreDoingWarning)

    lut_crop = lut[lut[:, 1] > deform_thresh / deform_norm, :]

    itp = spint.SmoothBivariateSpline(
        x=lut_crop[:, 0],
        y=lut_crop[:, 1],
        z=lut_crop[:, 2],
    )

    emod[unkn] = itp.ev(datax[unkn], deform[unkn])
    return emod
示例#27
0
    default='mean'
)
args = parser.parse_args()
kind = args.kind

start = time.time()

temp_elev_lst = fs.read_all_tsi_val_temp_elev(VAL, tsi_path)
gridded = fs.populate_temp_elev_dict(temp_elev_lst, grid_temp, grid_elev, single_value=True)

fig = plt.figure()
fig.set_size_inches(14, 14)

grid_x, grid_y = np.mgrid[min(gridded['temp']):max(gridded['temp']):200j, 
                          min(gridded['elev']):max(gridded['elev']):200j]
spline = interpolate.SmoothBivariateSpline(gridded['temp'], gridded['elev'], gridded[kind])

print('knots\n', spline.get_knots())
print('coefficients\n', spline.get_coeffs())

grid_z = spline.ev(grid_x, grid_y)
i = len(temp_elev_lst['val'])

"""
grid_z2 = []
for u in np.linspace(min(gridded['temp']), max(gridded['temp']), 200):
    for v in np.linspace(min(gridded['elev']), max(gridded['elev']), 200):
        grid_z2.append(eval_surf_spline(u, v, knots[0], knots[1], coeffs))
        
grid_z2 = np.reshape(grid_z2, (200, 200))
grid_x2, grid_y2 = np.mgrid[min(gridded['temp']):max(gridded['temp']):200j, 
示例#28
0
 def make_Tau(self):
     tau_ary = np.load("tau_ary.npy")
     self.Tau_ip = ip.SmoothBivariateSpline(tau_ary[0], tau_ary[2],
                                            tau_ary[1])
def sim(S1    = params["sol1nI"],
        S1CH  = params["sol1cH"], 
        S1CV  = params["sol1cV"], 
        S2    = params["sol2nI"],
        S2CH  = params["sol2cH"],
        S2CV  = params["sol2cV"], 
        S3    = params["soltnI"],
        S3CH  = params["soltcH"],
        S3CV  = params["soltcV"], 
        H1    = params["hex1G"],
        S4    = params["csol1nI"],
        S4CH  = params["csol1cH"],
        S4CV  = params["csol1cV"],
        S5    = params["csol2nI"],
        S5CH  = params["csol2cH"],
        S5CV  = params["csol2cV"],
        H2    = params["hex2G"],
        S6    = params["csol3nI"],
        S6CH  = params["csol3cH"],
        S6CV  = params["csol3cV"],
        S7    = params["csol4nI"],
        S7CH  = params["csol4cH"],
        S7CV  = params["csol4cV"],
        Obj   = params["sol3nI"],
        ObjCH = params["sol3cH"],
        ObjCV = params["sol3cV"],
        S9    = params["sol4nI"],
        alpha = params["alpha"],
        theta = params["theta"],
        delta = params["delta"],
        seed  = 0,
        erL   = errorsigmaL,
        erTh  = errorsigmaTheta):
    np.random.seed(seed = seed)
    rs     = [np.random.normal(size = 6) for dummy in range(0, len(eleprefix))]
    errors = [[r[0]*erL, r[1]*erL, r[2]*erL,
               cos(r[3]*erTh)*cos(r[5]*erTh) - cos(r[4]*erTh)*sin(r[3]*erTh)*sin(r[5]*erTh),
              -cos(r[3]*erTh)*sin(r[5]*erTh) - cos(r[4]*erTh)*cos(r[5]*erTh)*sin(r[3]*erTh),
               sin(r[3]*erTh)*sin(r[4]*erTh),
               cos(r[5]*erTh)*sin(r[3]*erTh) + cos(r[3]*erTh)*cos(r[4]*erTh)*sin(r[5]*erTh),
               cos(r[3]*erTh)*cos(r[4]*erTh)*cos(r[5]*erTh) - sin(r[3]*erTh)*sin(r[5]*erTh),
              -cos(r[3]*erTh)*sin(r[4]*erTh)] for r in rs] 
    cmdA = "{} -o {} hexuscope.in {}{}".format(EXE, GDFFILE, 
          "".join(["{}={} ".format(x,y) for x, y in zip(params.keys(), 
          [S1, S1CH, S2CV, S2, S2CH, S2CV, H1, S3, 
           S3CH, S3CV, S4, S4CH, S4CV, 
           S5, S5CH, S5CV, H2, S6, S6CH, S6CV, S7, S7CH, S7CV, Obj, ObjCH, ObjCV, S9, alpha, theta, delta])]), 
          "".join(["{}={} ".format(s, t) for x, y in zip(errornames, errors) for s, t in zip(x, y)]))

    cmdC = "{} -o {} {} time x y z G".format(EXETRANS, TRANSFILE, GDFFILE)

    cmdB = "{} -o {} {}".format(EXETXT, ASCIIFILE, GDFFILE)

    cmdD = "{} -o {} {}".format(EXETXT, TRANSASCII, TRANSFILE)
    
    # cmdA,C,D to track the particles, cmdA,B to run standard screen
    os.system(cmdA)
    # os.system(cmdC)
    os.system(cmdB)
    # os.system(cmdD)
    screen =  np.loadtxt(ASCIIFILE, skiprows=5)
    
    x  = screen[:,0]
    y  = screen[:,1]
    kx = np.divide(screen[:,4], screen[:,6])
    ky = np.divide(screen[:,5], screen[:,6])

    meankx = np.mean(kx)
    sigkx  = np.std(kx)

    meanky = np.mean(ky)
    sigky  = np.std(ky)
    
    N = 40
    # set a fixed kx, ky limit if necessary
    sigkx = 0.040 / maxsig
    sigky = 0.040 / maxsig

    x_bins = [[[] for n in range(0,N)] for m in range(0,N)]
    y_bins = [[[] for n in range(0,N)] for m in range(0,N)]

    x_grid = np.zeros([N, N])
    y_grid = np.zeros([N, N])

    kx_grid, ky_grid = np.meshgrid(sigkx*np.linspace(-maxsig, maxsig, N),
                                 sigky*np.linspace(-maxsig, maxsig, N))


    for xi, yi, kxi, kyi in zip(x, y, kx, ky):
        i = int(0.5*N*((kyi-meanky)/(maxsig*sigky)) + 0.5*N)
        j = int(0.5*N*((kxi-meankx)/(maxsig*sigkx)) + 0.5*N)
        if i < 0 or i > N-1 or j < 0 or j > N-1:
            continue
        x_bins[i][j].append(xi)
        y_bins[i][j].append(yi)

    for i in range(0, N):
        for j in range(0, N):
            x_grid[i,j] = np.mean(x_bins[i][j])
            y_grid[i,j] = np.mean(y_bins[i][j])

    # Remove possible nan points that would make following interpolation step fail
    y_grid[np.isnan(y_grid)]=0
    x_grid[np.isnan(x_grid)]=0
    index = np.where(x_grid != 0)

    xfunc = interpolate.SmoothBivariateSpline(kx_grid[index].flatten(), ky_grid[index].flatten(), x_grid[index].flatten(), kx=5, ky=5)
    yfunc = interpolate.SmoothBivariateSpline(kx_grid[index].flatten(), ky_grid[index].flatten(), y_grid[index].flatten(), kx=5, ky=5)

    ky_fine = np.linspace(-sigkx*maxsig, sigkx*maxsig, 201)
    kx_fine = np.linspace(-sigkx*maxsig, sigkx*maxsig, 201)

    FILENAME = "trnsmssn_realpotential.pickle"
    FILENAME = 'trnsmssn_new.pickle'
    FILENAME = "/home/chenyu/Desktop/GaussianProcess/GPTrelated/trnsmssn_antialiasing.pickle"

    with open(FILENAME, "rb") as f:
        trnsmssn = pickle.load(f)

    shadow = np.array([[trnsmssn((xfunc(kx, ky)[0][0]/sampleScale + sampleL/2)%sampleL, 
                                 (yfunc(kx, ky)[0][0]/sampleScale + sampleL/2)%sampleL)[0] for kx in kx_fine] for ky in ky_fine])
    return maxsig*sigkx, maxsig*sigky, shadow
示例#30
0
smooths = {}
for band in bands:
    smooths[band] = 10
for band in bands:
    z = ravel(array(z_mat[band]))
    ez = ravel(array(ez_mat[band]))
    print "fitting band ", band
    print inter.__file__
    try:
        print x.shape
        print y.shape
        print z.shape
        #tck[band] = inter.bisplrep(x, y, z, w=1.0/ez, kx=1, ky=1, s=0.0*len(x))
        tck[band] = inter.SmoothBivariateSpline(x,
                                                y,
                                                z,
                                                w=1.0 / ez,
                                                s=smooths[band] * len(x))
    except:
        print x, y, z, ez, smooths[band] * len(x)
        print type(x), x.shape
        print type(y), y.shape
        print type(z), z.shape
        print type(ez), ez.shape
        sys.exit(1)
    tck["e_" + band] = inter.SmoothBivariateSpline(x,
                                                   y,
                                                   ez,
                                                   kx=1,
                                                   ky=1,
                                                   s=0.0 * len(x))