Example #1
0
    def assign_interpol_controller(self):
        """ controller from optimal actions """

        # Compute grid of u
        self.u_policy_grid = []

        # for all inputs
        for k in range(self.sys.m):
            self.u_policy_grid.append(
                np.zeros(self.grid_sys.xgriddim, dtype=float))

        # For all state nodes
        for node in range(self.grid_sys.nodes_n):

            # use tuple to get dynamic list of indices
            indices = tuple(self.grid_sys.nodes_index[node, i]
                            for i in range(self.n_dim))

            # If no action is good
            if (self.action_policy[indices] == -1):

                # for all inputs
                for k in range(self.sys.m):
                    self.u_policy_grid[k][indices] = 0

            else:
                # for all inputs
                for k in range(self.sys.m):
                    self.u_policy_grid[k][
                        indices] = self.grid_sys.actions_input[
                            self.action_policy[indices], k]

        # Compute Interpol function
        self.interpol_functions = []

        # for all inputs
        for k in range(self.sys.m):
            if self.n_dim == 2:
                self.interpol_functions.append(
                    interpol2D(
                        self.grid_sys.xd[0],
                        self.grid_sys.xd[1],
                        self.u_policy_grid[k],
                        bbox=[None, None, None, None],
                        kx=1,
                        ky=1,
                    ))
            elif self.n_dim == 3:
                self.interpol_functions.append(
                    rgi([
                        self.grid_sys.xd[0], self.grid_sys.xd[1],
                        self.grid_sys.xd[2]
                    ], self.u_policy_grid[k]))
            else:
                points = tuple(self.grid_sys.xd[i] for i in range(self.n_dim))
                self.interpol_functions.append(
                    rgi(points, self.u_policy_grid[k], method='linear'))

        # Asign Controller
        self.ctl.vi_law = self.vi_law
Example #2
0
    def Interpolators(self):
        """ Add the u and v `Interpolator <https://docs.scipy.org/doc/scipy-0.16.1/reference/generated/scipy.interpolate.RegularGridInterpolator.html>`_ objects to the weather object (two new attributes :
            self.uInterpolator and self.vInterpolator).
            ::

                u = self.uInterpolator([t,lat,lon])
                #with u in m/s, t in days, lat and lon in degrees.
        """
        self.uInterpolator = rgi((self.time, self.lat, self.lon), self.u)
        self.vInterpolator = rgi((self.time, self.lat, self.lon), self.v)
Example #3
0
 def interp_VQ(self, dset, qT=False, V_label= 'Voltage', q_label='mAh/g', num_pts = 100):
     '''Interpolates exp data to get eos'''
     if qT != False:
         #theoretical cap supplied; otherwise use self.theor_Q
         self.theor_Q = qT
     dset['Qf']=dset[q_label]/self.theor_Q
     self.q_grid=dset['Qf']
     self.V_grid=dset[V_label]
     self.V = rgi((self.q_grid.values,), self.V_grid.values)
     self.q = rgi((self.V_grid.values,), self.q_grid.values)
     return()
Example #4
0
    def interpfungen(x, y, z, data0, data1, data2):
        fn0 = rgi((x, y, z), data0)
        fn1 = rgi((x, y, z), data1)
        fn2 = rgi((x, y, z), data2)

        def fun(pos, guess):
            postuple = (pos[0], pos[1], pos[2])
            theta = [fn0(postuple), fn1(postuple), fn2(postuple)]
            err = False
            return theta, err

        return fun
def interp3(xx, yy, zz, V, points, shape):
    interp = rgi((xx, yy, zz), V, bounds_error=False, fill_value=0)
    values = interp(points)
    values = values.reshape((int(shape[1]), int(shape[0]), int(shape[2])))
    values = rotate(values, -90)
    values = np.fliplr(values)
    return values
Example #6
0
def crustal_model_files(alt = [200, 1000], anomaly = 'Global', lim = [0., 360. -90., 90.], binsize = 0.1):
    """"
    Reads the .bin IDL files of the crustal magnetic field model (Langlais) for a range of altitudes and creates a function based on a linear interpolation.
    
    Parameters:
        alt: 2-elements array, optional
            The array containing the altitude range. Default is [200, 1000] km.
            
        anomaly: string, optional
            The anomaly index, e. g., A1, A2, A6, etc. This string is used to find the directory where the model matrices are located. Default is 'Global'.
           
        lim: 4-elements array, optional
            An array cointaining the limits for latitude and longitude data, in which: [lon_min, lon_max, lat_min, lat_max]. Default is the whole range of Mars.
             
        binsize: double, optional
            The size of the lon and lat bins (must be the same size). Default is 0.1 degrees.
            
    Returns:
        A function and a matrix containing the data.
    """
    longitude = np.linspace(lim[0], lim[1], int((lim[1] - lim[0]) / binsize + 1))
    latitude = np.linspace(lim[2], lim[3], int((lim[3] - lim[2]) / binsize + 1))
    altitude = np.linspace(alt[0], alt[1], int(alt[1] - alt[0] + 1))
    br = np.empty((len(longitude), len(latitude), len(altitude)))
    
    for i in range(len(altitude)):
        h = int(i + alt[0])
        data = sp.io.readsav('/home/oliveira/ccati_mexuser/LANGLAIS_Matrices/'+anomaly+'/LANGLAIS_BR_ALT_' + \
                             str(h) + '_RES_01.bin')
        br[:, :, i] = data['zbins'].T
    
    fn = rgi((longitude, latitude, altitude), br)
       
    return fn, br
Example #7
0
def GetInterpolater(hdulist, E,species):
    "Returns a regulargridinterpolator object which can be called as interp((r,z),method='linear')."
    
    # Returns Ebin nor nuclei output file assuming E=10*1.2^i in Kin. Energy in MeV
    EBin = int(np.round(np.log(E/10.)/np.log(1.2)))
    
    if species == 'proton':
        "Primary + Secondary Protons."
        vals = np.sum(hdulist[0].data[5:7,EBin],axis=0)
    if species == 'antiproton':
        "Secondary + Tertiary Antiproton"
        vals = np.sum(hdulist[0].data[3:5,EBin],axis=0)
    if species == 'electron':
        "Primary + Secondary Electrons."
        vals = hdulist[0].data[0,EBin]
    if species == 'positron':
        "Secondary + Tertiary Positron"
        vals = np.sum(hdulist[0].data[1:3,EBin],axis=0)
        
    # Build Grid for interpolator based on FITS header
    height = -hdulist[0].header['CRVAL2']
    radius = hdulist[0].header['CDELT1']*hdulist[0].header['NAXIS1']
    grid_r = np.linspace(0,radius,hdulist[0].header['NAXIS1'])
    grid_z = np.linspace(-height, height,hdulist[0].header['NAXIS2'])
    interp = rgi((grid_r,grid_z),np.transpose(vals),fill_value=np.float32(0), method='linear',bounds_error=False)
    return interp
Example #8
0
    def bp_smear(im_r):
        im_hat, R = im_r
        '''
        # only necessary on windows
        import numpy as np
        N = im_hat.shape[0]
        from scipy.interpolate import RegularGridInterpolator as rgi
        # set the frequency range
        if (N % 2 == 0):
            freq_range = np.arange(-N/2, N/2)
        else:
            freq_range = np.arange(-(N-1)/2, (N+1)/2)

        # creating the sample grid
        sample_grid = np.array(np.meshgrid(freq_range, freq_range, freq_range)).T
        '''
        '''rotating the sample_grid by multiplying by rotation matrix transpose
        Since R is orthonormal, this is the same as inverting
        Multiplying by R transpose gives us the coordinates in the local basis
        '''
        local_x, local_y, local_z = np.tensordot(R.T, sample_grid, axes=(1, 3))
        # smearing on the local_z coordinates
        local_smear = np.sinc(local_z / N)

        # #interpolator for the local_x and local_y coordinates. Interpolates on the FT
        local_interpolator = rgi((freq_range, freq_range), im_hat, bounds_error=False, fill_value=0)
        # local back projection
        local_backproj_hat = local_interpolator(np.stack((local_x, local_y), axis=3)) * local_smear
        print('completed smear')
        return local_backproj_hat, local_smear
    def run_sample(self, filepath):
        """
        Run sampling.
        """
        timer = common.Timer()
        Rs = self.get_views()

        # As rendering might be slower, we wait for rendering to finish.
        # This allows to run rendering and fusing in parallel (more or less).

        depths = common.read_hdf5(filepath)

        timer.reset()
        tsdf = self.fusion(depths, Rs)

        xs = np.linspace(-0.5, 0.5, tsdf.shape[0])
        ys = np.linspace(-0.5, 0.5, tsdf.shape[1])
        zs = np.linspace(-0.5, 0.5, tsdf.shape[2])
        tsdf_func = rgi((xs, ys, zs), tsdf)

        modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0]
        points = self.get_random_points(tsdf)
        values = tsdf_func(points)
        t_loc, t_scale = self.get_transform(modelname)

        occupancy = (values <= 0.)
        out_file = self.get_outpath(filepath)
        np.savez(out_file, points=points, occupancy=occupancy, loc=t_loc, scale=t_scale)

        print('[Data] wrote %s (%f seconds)' % (out_file, timer.elapsed()))
Example #10
0
 def find_best(im_i, im_j):
     im_i_f = rgi((grid_range, grid_range), im_i, bounds_error=False, fill_value=0)
     im_j_f = rgi((grid_range, grid_range), im_j, bounds_error=False, fill_value=0)
     # tracks angle pairs and pair quality)
     best_lines = [None, -np.inf]
     for theta in angles:
         for psi in angles:
             li = im_i_f(grid_range[:, None] * np.array([np.cos(theta), np.sin(theta)], None))
             lj = im_j_f(grid_range[:, None] * np.array([np.cos(psi), np.sin(psi)], None))
             forward = np.dot(li / np.linalg.norm(li), lj / np.linalg.norm(lj))
             if forward > best_lines[1]:
                 best_lines[0] = (psi, theta)
                 best_lines[1] = forward
             backward = np.dot(np.flipud(li), lj)
             if backward > best_lines[1]:
                 best_lines[0] = (np.pi + psi, theta)
                 best_lines[1] = backward
     return best_lines[0]
Example #11
0
 def get_regular_grid_interpolator(self, rgi_file):
     #SRC
     x, y, band_data = self.get_gsw_information(rgi_file)
     interpolator = rgi(points=(y[::-1], x),
                        values=np.flip(band_data, 0),
                        method='nearest',
                        bounds_error=False,
                        fill_value=255)
     del x, y, band_data
     return interpolator
def reinterp_to_grid(data, old_Xgrid, new_Ns, Ls):
    xg, yg, zg = old_Xgrid
    xg, yg, zg = pad_grid(xg), pad_grid(yg), pad_grid(zg)
    data_interp = rgi((zg, yg, xg), pad_array(data))

    Zg_hires, Yg_hires, Xg_hires = reinterp_generate_grid(new_Ns,
                                                          Ls,
                                                          return_mesh=1,
                                                          pad=0)
    pts = np.array([Zg_hires.ravel(), Yg_hires.ravel(), Xg_hires.ravel()]).T
    return data_interp(pts).reshape(*new_Ns)
Example #13
0
def project_fst_parallel(mol, orientations, return_hat=False, pad=0):
    """
    works like project_fst but takes iterable of rotation matrices and runs in parallel
    does not work on Windows
    """
    # shape of data, apply desired padding and find good size for FFT
    N = next_fast_len(mol.shape[0] + pad)

    # set the frequency range
    if N % 2 == 0:
        w = np.arange(-N / 2, N / 2)
    else:
        w = np.arange(-(N - 1) / 2, (N + 1) / 2)

    # pad roughly evenly
    mol = np.pad(mol, (int(np.ceil((N - mol.shape[0]) / 2)), int(np.floor((N - mol.shape[0]) / 2))),
                 mode='constant')
    # coordinate grid
    omega = np.meshgrid(w, w, w, indexing='ij')

    # Fourier transform
    rho_hat = np.fft.fftshift(np.fft.fftn(mol))
    rho_hat *= np.sign((1 - (np.abs(omega[0] + omega[1] + omega[2]) % 2) * 2)) / N ** 3
    del mol

    # create sampling grid
    eta_x, eta_y = np.meshgrid(w, w, indexing='ij')
    eta_x = eta_x[:, :, None]
    eta_y = eta_y[:, :, None]

    # interpolation function
    rho_hat_f = rgi((w, w, w), rho_hat, bounds_error=False, fill_value=0)

    def project_(R):
        grid = eta_x * R.T[0] + eta_y * R.T[1]

        # values of data's FFT interpolated to points on slice, tweak dimensions to make broadcasting work
        im_hat = rho_hat_f(grid)[:, :, None]

        # scaling factor
        im_hat *= np.sign((1 - (np.abs(eta_x + eta_y) % 2) * 2)) * N ** 2
        # returns im_hat if return_hat argument is true
        if return_hat:
            return im_hat

        # apply inverse FFT to translate back to original space
        im = np.real(np.fft.ifftn(np.fft.ifftshift(im_hat[:, :, 0])))

        # memory saving stuff
        return im

    with Pool() as p:
        images = p.map(project_, orientations)
    return list(images)
Example #14
0
def init_multigrid(proj, geo, alpha, alg):
    # WARNING: This takes a lot of memory!
    if alg == 'SART':
        italg = tigre.algorithms.sart
    if alg == 'SIRT':
        italg = tigre.algorithms.sirt
    finalsize = geo.nVoxel

    maxval = max(proj.ravel())
    minval = min(proj.ravel())

    # Start with 16 (raise this for larger images)
    geo.nVoxel = np.array([16, 16, 16])
    geo.dVoxel = geo.sVoxel / geo.nVoxel
    if (geo.nVoxel > finalsize).all():
        return np.zeros(finalsize, dtype=np.float32)
    niter = 100
    initres = np.zeros(geo.nVoxel, dtype=np.float32)
    while (geo.nVoxel != finalsize).all():
        geo.dVoxel = geo.sVoxel / geo.nVoxel

        initres = italg(proj, geo, alpha, niter, init=initres, verbose=False)

        # get new dims(should be a way to do this more efficiently).

        geo.nVoxel = geo.nVoxel * 2
        geo.nVoxel[geo.nVoxel > finalsize] = finalsize[geo.nVoxel > finalsize]
        geo.dVoxel = geo.sVoxel / geo.nVoxel
        (x, y, z) = (np.linspace(minval,
                                 maxval,
                                 geo.nVoxel[0] / 2,
                                 dtype=np.float32),
                     np.linspace(minval,
                                 maxval,
                                 geo.nVoxel[1] / 2,
                                 dtype=np.float32),
                     np.linspace(minval,
                                 maxval,
                                 geo.nVoxel[2] / 2,
                                 dtype=np.float32))

        # evaluate the function sart at the points xv,yv,zv

        xv, yv, zv = [
            tile_array(tile_array(x, 2), geo.nVoxel[0]**2),
            tile_array(tile_array(y, 2), geo.nVoxel[0]**2),
            tile_array(tile_array(x, 2), geo.nVoxel[0]**2)
        ]

        initres = rgi((x, y, z), initres)(np.column_stack((xv, yv, zv)))
        initres = initres.reshape(geo.nVoxel)

    return initres
def interp3(xrange, yrange, zrange, v, xi, yi, zi, **kwargs):
    #http://stackoverflow.com/questions/21836067/interpolate-3d-volume-with-numpy-and-or-scipy
    #from numpy import array
    from scipy.interpolate import RegularGridInterpolator as rgi

    x = np.arange(xrange[0],xrange[1])
    y = np.arange(yrange[0],yrange[1])
    z = np.arange(zrange[0],zrange[1])
    interpolator = rgi((x,y,z), v, **kwargs)
    
    pts = np.array([np.reshape(xi,(-1)), np.reshape(yi,(-1)), np.reshape(zi,(-1))]).T    
    Vi = interpolator(pts)
    return np.reshape(Vi, np.shape(xi))        
Example #16
0
def init_multigrid(proj, geo, alpha,alg):
    # WARNING: This takes a lot of memory!
    if alg=='SART':
        italg = tigre.algorithms.sart
    if alg=='SIRT':
        italg = tigre.algorithms.sirt
    finalsize = geo.nVoxel

    maxval= max(proj.ravel())
    minval = min(proj.ravel())


    # Start with 16 (raise this for larger images)
    geo.nVoxel = np.array([16, 16, 16])
    geo.dVoxel = geo.sVoxel / geo.nVoxel
    if (geo.nVoxel > finalsize).all():
        return np.zeros(finalsize, dtype=np.float32)
    niter = 100
    initres = np.zeros(geo.nVoxel, dtype=np.float32)
    while (geo.nVoxel != finalsize).all():
        geo.dVoxel = geo.sVoxel / geo.nVoxel

        initres = italg(proj, geo, alpha, niter, init=initres,verbose=False)

        # get new dims(should be a way to do this more efficiently).

        geo.nVoxel = geo.nVoxel * 2
        geo.nVoxel[geo.nVoxel > finalsize] = finalsize[geo.nVoxel > finalsize]
        geo.dVoxel = geo.sVoxel / geo.nVoxel
        (x,y,z)=(
        np.linspace(minval, maxval, geo.nVoxel[0]/2,dtype=np.float32),
        np.linspace(minval, maxval, geo.nVoxel[1]/2,dtype=np.float32),
        np.linspace(minval, maxval, geo.nVoxel[2]/2,dtype=np.float32)
        )


        # evaluate the function sart at the points xv,yv,zv

        xv,yv,zv=[tile_array(tile_array(x,2), geo.nVoxel[0]**2),
        tile_array(tile_array(y,2), geo.nVoxel[0]**2),
        tile_array(tile_array(x,2), geo.nVoxel[0]**2)]




        initres = rgi((x, y, z), initres)(np.column_stack((xv,yv,zv)))
        initres = initres.reshape(geo.nVoxel)



    return initres
Example #17
0
def transform_by_matrix(voxel, matrix, offset):
    """
    transform a voxel by matrix, then apply an offset
    Note that the offset is applied after the transformation
    """
    sx, sy, sz = voxel.shape
    gridx, gridy, gridz = _get_centeralized_mesh_grid(sx, sy, sz)  # the coordinate grid of the new voxel
    mesh = np.array([gridx.reshape(-1), gridy.reshape(-1), gridz.reshape(-1)])
    mesh_rot = np.dot(np.linalg.inv(matrix), mesh) + np.array([sx / 2, sy / 2, sz / 2]).reshape(3, 1)
    mesh_rot = mesh_rot - np.array(offset).reshape(3, 1)    # grid for new_voxel should get a negative offset

    interp = rgi((np.arange(sx), np.arange(sy), np.arange(sz)), voxel,
                 method='linear', bounds_error=False, fill_value=0)
    new_voxel = interp(mesh_rot.T).reshape(sx, sy, sz)  # todo: move mesh to center
    return new_voxel
Example #18
0
def project_fst(mol, R, return_hat=False, pad=0):
    """
    simulates EM results given molecule mol and rotation matrix R
    if return_hat is True, returns FT of image
    pad specified number of zeros to pad edges with, default is 0
    """
    # shape of data
    N = next_fast_len(mol.shape[0] + pad)

    # set the frequency range
    if N % 2 == 0:
        w = np.arange(-N / 2, N / 2)
    else:
        w = np.arange(-(N - 1) / 2, (N + 1) / 2)

    # pad as roughly evenly
    mol = np.pad(mol, (int(np.ceil((N - mol.shape[0]) / 2)), int(np.floor((N - mol.shape[0]) / 2))),
                 mode='constant')
    # coordinate grid
    omega = np.meshgrid(w, w, w)

    # Fourier transform
    rho_hat = np.fft.fftshift(np.fft.fftn(mol))
    rho_hat *= np.sign((1 - (np.abs(omega[0] + omega[1] + omega[2]) % 2) * 2)) / N ** 3

    # create sampling grid
    eta_x, eta_y = np.meshgrid(w, w)
    eta_x = eta_x[:, :, None]
    eta_y = eta_y[:, :, None]
    grid = eta_x * R.T[0] + eta_y * R.T[1]

    # interpolation function
    rho_hat_f = rgi((w, w, w), rho_hat, bounds_error=False, fill_value=0)

    # values of data's FFT interpolated to points on slice, tweak dimensions to make broadcasting work
    im_hat = rho_hat_f(grid)[:, :, None]

    # scaling factor
    im_hat *= np.sign((1 - (np.abs(eta_x + eta_y) % 2) * 2)) * N ** 2
    # returns im_hat if return_hat argument is true
    if return_hat:
        return im_hat

    # apply inverse FFT to translate back to original space
    im = np.real(np.fft.ifftn(np.fft.ifftshift(im_hat[:, :, 0])))

    return im
Example #19
0
def _interp3D(xs, ys, zs, values, shape):
    '''
    3-D interpolation on a non-uniform grid, where z is non-uniform but x, y are uniform
    '''
    from scipy.interpolate import RegularGridInterpolator as rgi

    # First interpolate uniformly in the z-direction
    flipflag = False
    Nfac = 10
    NzLevels = Nfac * shape[2]
    nx, ny = shape[:2]
    zmax = np.nanmax(zs)
    zmin = np.nanmin(zs)
    zshaped = np.reshape(zs, shape)
    # if zs are upside down, flip to interpolate vertically
    if np.nanmean(zshaped[..., 0]) > np.nanmean(zshaped[..., -1]):
        flipflag = True
        zshaped = np.flip(zshaped, axis=2)
        values = np.flip(values, axis=2)

    dz = (zmax - zmin) / NzLevels

    # TODO: change to use mean height levels
    zvalues = zmin + dz * np.arange(NzLevels)

    new_zs = np.tile(zvalues, (nx, ny, 1))
    values = fillna3D(values)

    new_var = interp_along_axis(zshaped,
                                new_zs,
                                values,
                                axis=2,
                                method='linear',
                                pad=False)
    # This assumes that the input data is in the correct projection; i.e.
    # the native weather grid projection
    xvalues = np.unique(xs)
    yvalues = np.unique(ys)

    # TODO: temporarily switch x, y values until can confirm
    interp = rgi((yvalues, xvalues, zvalues),
                 new_var,
                 bounds_error=False,
                 fill_value=np.nan)
    return interp
Example #20
0
def bguSlice(gamma, IF, EIF):
    ih = IF.shape[0]
    iw = IF.shape[1]
    gh = gamma.shape[0]
    gw = gamma.shape[1]
    gd = gamma.shape[2]
    ao = gamma.shape[3]
    ai = gamma.shape[4]
    x = repmat(np.arange(0, iw), ih, 1)
    y = repmat(np.arange(0, ih).T, iw, 1).T
    bg_coord_x = (x + 0.5) * (gw - 1) / iw
    bg_coord_y = (y + 0.5) * (gh - 1) / ih

    bg_coord_z = EIF * (gd - 1)

    bg_coord_xx = bg_coord_x + 1
    bg_coord_yy = bg_coord_y + 1
    bg_coord_zz = bg_coord_z + 1

    xx = np.linspace(0, gh, gh)
    yy = np.linspace(0, gw, gw)
    zz = np.linspace(0, gd, gd)

    affine_model = get_td_list(ai, ao)
    for j in range(ai):
        for i in range(ao):
            my_inter_fun = rgi((xx, yy, zz),
                               gamma[:, :, :, i, j],
                               bounds_error=False)
            pts = np.array([bg_coord_xx, bg_coord_yy, bg_coord_zz]).T
            # print(np.max(gamma))
            affine_model[i][j] = my_inter_fun(pts).T
            print('[INFO]>>> affine_model[{}][{}].shape:{}'.format(
                i, j, affine_model[i][j].shape))
    ll = np.ones((ih, iw))
    ll_ = np.array([IF[:, :, 0], IF[:, :, 1], IF[:, :, 2], ll])
    input1 = ll_.swapaxes(0, 1).swapaxes(1, 2)
    output = 0
    for i in range(ai):
        new_ = np.array(
            [affine_model[0][i], affine_model[1][i], affine_model[2][i]])
        affine_model2 = new_.swapaxes(0, 1).swapaxes(1, 2)
        print('[INFO]>>> affine_model2.shape:{}'.format(affine_model2.shape))
        output = output + (affine_model2 * input1[:, :, i][:, :, None])
    return output
Example #21
0
    def InturpEle(self, matrix):
        #interpolates initial elevation data to be of given size
        datasize = matrix.shape
        yres = (datasize[0] - 1) / self.yres
        xres = (datasize[1] - 1) / self.xres
        x, y = np.linspace(0, datasize[1] - 1,
                           datasize[1]), np.linspace(0, datasize[0] - 1,
                                                     datasize[0])
        grid = rgi((y, x), matrix)

        output = np.empty((self.yres, self.xres))
        for i in range(self.yres):
            yloc = i * yres
            for j in range(self.xres):
                xloc = j * xres
                output[i, j] = grid([yloc, xloc])

        return output
Example #22
0
def test_regular_grid_interpolator():
    points = [np.linspace(0, 1, 100) for i in range(3)]
    M = np.random.normal(size=[100] * 3)
    y = tuple([np.random.uniform(size=100) for i in range(3)])
    r = RegularGridInterpolator(tuple([tf.constant(p) for p in points]),
                                tf.constant(M),
                                method='linear')
    from scipy.interpolate import RegularGridInterpolator as rgi
    r_ = rgi(points, M, method='linear', fill_value=None, bounds_error=False)
    sess = tf.Session()
    u = sess.run(r(y))
    sess.close()
    u_ = r_(y)
    #    import pylab as plt
    #    print(u-u_)
    #    plt.hist(u-u_,bins=100)
    #    plt.show()
    #print(u-u_)
    assert np.all(np.isclose(u, u_, atol=1e-4))
Example #23
0
    def _brgi(self):
        """
        Regular grid interpolator for B.
        """
        from scipy.interpolate import RegularGridInterpolator as rgi
        if self._rgi is not None:
            return self._rgi

        # - (rho,s,phi) coordinates:
        rho = self.grid.rg
        s = self.grid.sg
        phi = self.grid.pg
        br, bth, bph = self.bg

        # Because we need the cartesian grid to stretch just beyond r=rss,
        # add an extra dummy layer of magnetic field pointing radially outwards
        rho = np.append(rho, rho[-1] + 0.01)
        extras = np.ones(br.shape[0:2] + (1, ))
        br = np.concatenate((br, extras), axis=2)
        bth = np.concatenate((bth, 0 * extras), axis=2)
        bph = np.concatenate((bph, 0 * extras), axis=2)

        # - convert to Cartesian components and make interpolator on
        # (rho,s,phi) grid:
        ph3, s3, rh3 = np.meshgrid(phi, s, rho, indexing='ij')
        sin_th = np.sqrt(1 - s3**2)
        cos_th = s3
        sin_ph = np.sin(ph3)
        cos_ph = np.cos(ph3)

        # Directly stack the expressions below, to save a bit of memory
        # bx = (sin_th * cos_ph * br) + (cos_th * cos_ph * bth) - (sin_ph * bph)
        # by = (sin_th * sin_ph * br) + (cos_th * sin_ph * bth) + (cos_ph * bph)
        # bz = (cos_th * br) - (sin_th * bth)
        bstack = np.stack(
            ((sin_th * cos_ph * br) + (cos_th * cos_ph * bth) - (sin_ph * bph),
             (sin_th * sin_ph * br) + (cos_th * sin_ph * bth) + (cos_ph * bph),
             (cos_th * br) - (sin_th * bth)),
            axis=-1)

        self._rgi = rgi((phi, s, rho), bstack, bounds_error=False)
        return self._rgi
Example #24
0
def radar2model_grid(radar_map, radar_lat, radar_lon, radar_height, model_lats,
                     model_lons, vertical_levels):

    my_interpolating_function = rgi((radar_lat, radar_lon, radar_height),
                                    radar_map,
                                    fill_value=-999.0)

    radar_at_model_space = numpy.empty(
        (len(model_lats), len(model_lons), len(vertical_levels)))
    for i, cmlats in enumerate(model_lats):
        for j, cmlons in enumerate(model_lons):
            for k, cl in enumerate(vertical_levels):
                if cmlats < radar_lat.max() and cmlats > radar_lat.min(
                ) and cmlons < radar_lon.max() and cmlons > radar_lon.min():
                    pts = numpy.array([[cmlats, cmlons, cl]])
                    vi = my_interpolating_function(pts)
                else:
                    vi = -999.0

                radar_at_model_space[i, j, k] = vi

    return radar_at_model_space
Example #25
0
def mask_seafloor(D):
    """
    Use known bathymetry to mask Sv and power

    Bit too chunky without more work
    """

    # Load in gridded bathymetry
    bathy_file = '/home/hugke729/PhD/Data/Penny_Strait/Penny_strait.mat'
    Dz = loadmat(bathy_file)
    lon, lat, z = [Dz['topo' + key] for key in ['lon', 'lat', 'depth']]
    z_interp = rgi((lon[:, 0], lat[0, :]), z)

    z_along_line = z_interp(np.c_[D['lon'], D['lat']])

    Z = D['z'][:, np.newaxis]*np.ones_like(D['Sv'])
    z_below_bot = Z > z_along_line

    D['power'] = ma.masked_where(z_below_bot, D['power'])
    D['Sv'] = ma.masked_where(z_below_bot, D['Sv'])

    return D
Example #26
0
def remap_data_on_query_points(data1,xyz2):
# this function queries the dataset stored in the data1 dictionary at the 
# locations specified by the grid points xyz2 and stores the data in the
# variable dataq

    print "interpolating data to a uniform grid\n"
    x1 = np.array(data1['x'])
    y1 = np.array(data1['y'])
    z1 = np.array(data1['z'])

    x2 = np.array(xyz2['x'])
    y2 = np.array(xyz2['y'])
    z2 = np.array(xyz2['z'])

    dataq = {}

    # construct a new structured volume with uniform and equal spacing along
    # all the three axes
    xq,yq,zq = np.meshgrid(x2,y2,z2,indexing = 'ij')

    # create array of string to query the data dictionary
#    var = ['rho', 'u', 'v', 'w', 'p', 'T']

    # query data for various flow parameters and store in dataq
#    for current_var in var:        
    current_var = 'rho'
    my_interpolating_function = rgi((x1,y1,z1), np.array(data1[current_var]))
    dataq[current_var] = my_interpolating_function(np.array([xq,yq,zq]).T)

    # perform tranpose to maintain shape
    dataq[current_var] = np.transpose(dataq[current_var])
    
    # store the grid points
    dataq['x'] = x2
    dataq['y'] = y2
    dataq['z'] = z2
    
    return dataq
Example #27
0
def grid_align(V,
               T,
               V_scale=None,
               V_origin=None,
               bounds_error=False,
               fill_value=np.nan):
    assert (len(V.shape) == len(T.shape))
    if V_scale is None:
        V_scale = np.array(T.shape) / np.array(V.shape)
    if V_origin is None:
        V_origin = np.zeros(len(V.shape))
    assert (not np.any(V_scale == 0))
    V_axes = [
        np.linspace(V_origin[ax],
                    V_origin[ax] + (V.shape[ax] - 1) * V_scale[ax],
                    num=V.shape[ax]) for ax in range(len(V.shape))
    ]
    interp_func = rgi(V_axes,
                      V,
                      bounds_error=bounds_error,
                      fill_value=fill_value)
    points = np.argwhere(np.ones_like(T))
    return interp_func(points).reshape(T.shape)
Example #28
0

xs = np.load(
    'UNSW4_1st withBG TEST ephiQWM UNSW4_1st structure 2nm 800x800 - 10-4-12-4.npz'
)['xs']
ys = np.load(
    'UNSW4_1st withBG TEST ephiQWM UNSW4_1st structure 2nm 800x800 - 10-4-12-4.npz'
)['ys']
zs = np.load(
    'UNSW4_1st withBG TEST ephiQWM UNSW4_1st structure 2nm 800x800 - 10-4-12-4.npz'
)['zs']
ephi = 10**6 * np.load(
    'UNSW4_1st withBG TEST ephiQWM UNSW4_1st structure 2nm 800x800 - 10-4-12-4.npz'
)['ephi']

interpolate = rgi(points=(xs, ys, zs), values=ephi, bounds_error=False)


def V_hrl(point):
    return interpolate(point)[0] + V_step(point[2])


def V_doubledot(point):
    return double_HO(point[0]) + V_HO1d(point[1]) + V_step(point[2])


def dV_doubledot(point):
    return np.add(np.add(dV_double_HO(point[0]), dV_HO1d(point[1])),
                  dV_step(point[2]))

Example #29
0
def find_reduced_dim(datadir, imgdir):

    print('making figure 3 ...')

    # Set up figure
    fig = plt.figure()

    # Load data
    txtdir = datadir + 'l1_l2/txt_files/'

    TC_R_avg_imp = np.loadtxt(txtdir + "/TC_R_avg_imp.txt")
    TC_R_avg_unalt = np.loadtxt(txtdir + "/TC_R_avg_unalt.txt")
    TC_R_rms_imp = np.loadtxt(txtdir + "/TC_R_rms_imp.txt")
    TC_R_rms_unalt = np.loadtxt(txtdir + "/TC_R_rms_unalt.txt")

    TC_P1_avg_imp = np.loadtxt(txtdir + "/TC_P1_avg_imp.txt")
    TC_P1_avg_unalt = np.loadtxt(txtdir + "/TC_P1_avg_unalt.txt")
    TC_P1_rms_imp = np.loadtxt(txtdir + "/TC_P1_rms_imp.txt")
    TC_P1_rms_unalt = np.loadtxt(txtdir + "/TC_P1_rms_unalt.txt")

    TC_P2_avg_imp = np.loadtxt(txtdir + "/TC_P2_avg_imp.txt")
    TC_P2_avg_unalt = np.loadtxt(txtdir + "/TC_P2_avg_unalt.txt")
    TC_P2_rms_imp = np.loadtxt(txtdir + "/TC_P2_rms_imp.txt")
    TC_P2_rms_unalt = np.loadtxt(txtdir + "/TC_P2_rms_unalt.txt")

    TC_A_avg_imp = np.loadtxt(txtdir + "/TC_A_avg_imp.txt")
    TC_A_avg_unalt = np.loadtxt(txtdir + "/TC_A_avg_unalt.txt")
    TC_A_rms_imp = np.loadtxt(txtdir + "/TC_A_rms_imp.txt")
    TC_A_rms_unalt = np.loadtxt(txtdir + "/TC_A_rms_unalt.txt")

    # Data information
    l2min = 0 / 64  # x min
    l2max = 32 / 64  # x max
    l2inc = 1 / 64  # x increment
    l1min = 0 / 64  # y min
    l1max = 32 / 64  # y max
    l1inc = 1 / 64  # y increment

    l2 = np.arange(l2min, l2max + l2inc, l2inc)
    l1 = np.arange(l1min, l1max + l1inc, l1inc)

    L2, L1 = np.meshgrid(l2, l1, indexing='ij')

    # Make 2D array of compression ratio
    L0 = 1.0 - L1 - L2
    R = 16 * L0 + 4 * L1 + L2
    print(R)

    # R = 16*L0**2 + 4*L1**2 + L2**2 + \
    #      4*L1*L2  + L1*L2   + L0*L2
    # print(R2)

    grid1 = AxesGrid(fig, (0.08, 0.54, 0.80, 0.35),
                     nrows_ncols=(1, 4),
                     axes_pad=0.1,
                     aspect=True,
                     label_mode="L",
                     share_all=True,
                     cbar_location="right",
                     cbar_mode="single",
                     cbar_size="7%",
                     cbar_pad="3%")

    grid2 = AxesGrid(fig, (0.08, 0.12, 0.80, 0.35),
                     nrows_ncols=(1, 4),
                     axes_pad=0.1,
                     aspect=False,
                     label_mode="L",
                     share_all=True,
                     cbar_location="right",
                     cbar_mode="none")

    # Get locations on fixed compression ratio
    ax = grid1[1].axes
    Rim = ax.contour(L2, L1, R, [6,8,10,12], origin='lower', \
        colors=('m','b','g','y'))
    # Rim = ax.contour(L2, L1, L0, [0.2,0.4,0.6,0.8], origin='lower', \
    #     colors=('m','b','g','y'))
    ax.clabel(Rim, Rim.levels, inline=True, inline_spacing=30, \
        fmt=r'$r = %i$', fontsize=8, rightside_up=True, \
        manual=[(0.1,0.1),(0.2,0.2),(0.3,0.3),(0.4,0.4)])
    xr6 = Rim.allsegs[0][0][:, 0]
    yr6 = Rim.allsegs[0][0][:, 1]
    xr8 = Rim.allsegs[1][0][:, 0]
    yr8 = Rim.allsegs[1][0][:, 1]
    xr10 = Rim.allsegs[2][0][:, 0]
    yr10 = Rim.allsegs[2][0][:, 1]
    xr12 = Rim.allsegs[3][0][:, 0]
    yr12 = Rim.allsegs[3][0][:, 1]

    # xr6  = np.linspace(0, 0.5, 9)
    # xr8  = np.linspace(0, 0.5, 9)
    # xr10 = np.linspace(0, 0.5, 9)
    # xr12 = np.linspace(0, 0.5, 9)
    # yr6  = np.linspace(1/8, 1/8, 9)
    # yr8  = np.linspace(2/8, 2/8, 9)
    # yr10 = np.linspace(3/8, 3/8, 9)
    # yr12 = np.linspace(4/8, 4/8, 9)

    xyr6 = np.stack((xr6, yr6), axis=1)
    xyr8 = np.stack((xr8, yr8), axis=1)
    xyr10 = np.stack((xr10, yr10), axis=1)
    xyr12 = np.stack((xr12, yr12), axis=1)

    Xr6, Yr6 = np.meshgrid(xr6, yr6, indexing='ij')
    Xr8, Yr8 = np.meshgrid(xr8, yr8, indexing='ij')
    Xr10, Yr10 = np.meshgrid(xr10, yr10, indexing='ij')
    Xr12, Yr12 = np.meshgrid(xr12, yr12, indexing='ij')

    for i in range(4):

        ax = grid1[i].axes

        if i == 0:
            num = TC_R_avg_imp
            den = TC_R_avg_unalt
            ax.set_title('Computing R')
        elif i == 1:
            num = TC_P1_avg_imp
            den = TC_P1_avg_unalt
            ax.set_title('Computing P1')
        elif i == 2:
            num = TC_P2_avg_imp
            den = TC_P2_avg_unalt
            ax.set_title('Computing P2')
        elif i == 3:
            num = TC_A_avg_imp
            den = TC_A_avg_unalt
            ax.set_title('Computing A')

        img = num / den
        # img = np.exp(num/den)
        # img = np.log10(np.sqrt(num/den))
        print(np.max(img))
        print(np.min(img))

        # line0 = ax.contour(L2, L1, img, [0.75, 0.9], origin='lower', colors=('k'), \
        #     linestyles=(':'))

        # x075 = line0.allsegs[0][0][:,0]
        # y075 = line0.allsegs[0][0][:,1]
        # x09  = line0.allsegs[1][0][:,0]
        # y09  = line0.allsegs[1][0][:,1]

        # xy075 = np.stack((x075, y075), axis=1)
        # xy09  = np.stack((x09,  y09),  axis=1)

        # Rim = ax.contour(L2, L1, L0, [0.2,0.4,0.6,0.8], origin='lower', \
        #     colors=('m','b','g','y'))
        # ax.clabel(line0, line0.levels, inline=True, inline_spacing=30, \
        #     fmt=r'$r = %i$', fontsize=8, rightside_up=True, \
        #     manual=[(0.1,0.1)])

        # xr8  = Rim.allsegs[1][0][:,0]
        # yr8  = Rim.allsegs[1][0][:,1]
        # xr10 = Rim.allsegs[2][0][:,0]
        # yr10 = Rim.allsegs[2][0][:,1]
        # xr12 = Rim.allsegs[3][0][:,0]
        # yr12 = Rim.allsegs[3][0][:,1]

        # im = ax.contourf(L2, L1, img, 100, origin='lower', \
        #     extent=[l2min,l2max,l1min,l1max], cmap='bwr', \
        #     vmin=-0.8, vmax=0.2)
        im = ax.contourf(L2, L1, img, 100, origin='lower', \
            extent=[l2min,l2max,l1min,l1max], cmap='bwr', \
            vmin=0, vmax=2)

        # im.set_clim(-0.8, 0.8)
        im.set_clim(0, 2)
        cbar = ax.cax.colorbar(im)
        # ax.cax.set_ylabel('$\log(TC_{im}/TC_{un})$')
        ax.cax.set_ylabel('$TC_{im}/TC_{un}$')

        # ax.set_xticks([ntmin,(ntmax+ntmin)/2,ntmax])
        # ax.set_yticks([l1min,(l1max+l1min)/2,l1max])
        ax.set_xticks(np.linspace(1 / 8, 3 / 8, 3))
        ax.set_yticks(np.linspace(1 / 8, 3 / 8, 3))
        ax.set_xticklabels([])
        ax.set_yticklabels(['1/8', '2/8', '3/8'])
        ax.set_ylabel('$\ell_1$')

        ax = grid2[i].axes

        # Get data along each compression ratio line
        f_l0 = rgi((l2, l1), L0, method='linear')
        f_l1 = rgi((l2, l1), L1, method='linear')
        f_l2 = rgi((l2, l1), L2, method='linear')

        # f075_l0  = f_l0(xy075)
        # f075_l1  = f_l1(xy075)
        # f075_l2  = f_l2(xy075)
        # print(f075_l0)
        # print(f075_l1)
        # print(f075_l2)

        # f09_l0  = f_l0(xy09)
        # f09_l1  = f_l1(xy09)
        # f09_l2  = f_l2(xy09)

        # best_vals, covar = curve_fit(fit_equation, (f075_l0,f075_l1,f075_l2), \
        #     np.linspace(0.75,0.75,len(f075_l0)) )
        # print('best_vals: {}'.format(best_vals))

        l0_1D = np.reshape(L0, (-1))
        l1_1D = np.reshape(L1, (-1))
        l2_1D = np.reshape(L2, (-1))
        img_1D = np.reshape(img, (-1))
        # print(len(l0_1D))

        # l0_1D = l0_1D[0::2]
        # l1_1D = l1_1D[0::2]
        # l2_1D = l2_1D[0::2]
        # img_1D = img_1D[0::2]

        best_vals, covar = curve_fit(fit_equation, (l0_1D, l1_1D, l2_1D),
                                     img_1D)
        print('best_vals: {}'.format(best_vals))
        print('error: {}'.format(np.sqrt(np.diag(covar))))
        print('covar: {}'.format(covar))

        best_vals, covar = curve_fit(fit_equation2, (l0_1D, l1_1D, l2_1D),
                                     img_1D)
        print('best_vals: {}'.format(best_vals))
        print('error: {}'.format(np.sqrt(np.diag(covar))))
        print('covar: {}'.format(covar))

        best_vals, covar = curve_fit(fit_equation3, (l0_1D, l1_1D, l2_1D),
                                     img_1D)
        print('best_vals: {}'.format(best_vals))
        print('error: {}'.format(np.sqrt(np.diag(covar))))
        print('covar: {}'.format(covar))

        # f09  = f(xyr8)
        # fr10 = f(xyr10)
        # fr12 = f(xyr12)

        # ax.plot(f075_l2,  f075_l0,  'm-')
        # ax.plot(f075_l2,  f075_l1,  'b')
        # ax.plot(f09_l2, f09_l0, 'm--')
        # ax.plot(f09_l2, f09_l1, 'b--')
        ax.grid('on')

        # ax.set_xticks([ntmin,(ntmax+ntmin)/2,ntmax])
        # ax.set_yticks([l1min,(l1max+l1min)/2,l1max])
        ax.set_xticks(np.linspace(1 / 8, 3 / 8, 3))
        # ax.set_yticks(np.linspace(1/8,3/8,3))
        ax.set_xticklabels(['1/8', '2/8', '3/8'])
        # ax.set_yticklabels(['1/8','2/8','3/8'])
        ax.set_xlabel('$\ell_2$')
        # ax.set_ylabel('$\ell_1$')
        ax.set_xlim(0, 0.5)

    # print('saving image ...')
    fig.set_size_inches(6.5, 3.25,
                        forward=True)  # figure size must be set here
    # plt.savefig(imgdir + 'fig3.png', dpi=300)

    # img = np.transpose(np.divide(num, den))
    # img = np.divide(num, den)
    # Xaxis, Yaxis = np.meshgrid(xaxis, yaxis, copy=False, indexing='ij')
    # cont_levs = 100
    # xmin = np.min(xaxis)
    # xmax = np.max(xaxis)
    # ymin = np.min(yaxis)
    # ymax = np.max(yaxis)

    # fig = plt.figure(clear=True)
    # ax = fig.add_subplot(1,1,1)

    print('\tdone with reduced dim')
Example #30
0
def __LOS_Gas_Grid(tNum, n_thread, l_max,b_max,res,H2,HI,H2_mult,r_min,r_max,z_step=0.02,H2_map='PEB'):
    """
    LOS Integration Kernel for Pohl, Englmaier, Bissantz 2008  (arXiv: 0712.4264)
    Grid_points are linearly interpolated in 3-dimensions.
    """    
    #==============================================
    # Integration Parameters
    #==============================================
    import tmp
    reload(tmp)
    func = tmp.func
    R_solar = 8.5 # Sun at 8.5 kpc
    kpc2cm  = 3.08568e21
    z_start,z_stop,z_step = max(R_solar-r_max,0),R_solar+r_max,z_step # los start,stop,step-size in kpc
    
    # distances along the LOS
    zz = np.linspace(start=z_start,stop=z_stop,num=int(np.ceil((z_stop-z_start)/z_step)))
    deg2rad = np.pi/180.
    pi = np.pi
    # List of lat/long to loop over.
    bb = np.linspace(-b_max+res/2.,b_max+res/2.,int(np.ceil(2*b_max/res)))*deg2rad
    # divide according to thread
    stride = len(bb)/n_thread
    if tNum == n_thread-1: bb = bb[tNum*(stride):]
    else: bb = bb[tNum*stride:(tNum+1)*stride]
        
    ll = np.linspace(-l_max+res/2.,l_max+res/2.,np.ceil(int(2*l_max/res)))*deg2rad
    
    # Master projected skymap 
    proj_skymap = np.zeros(shape=(len(bb),len(ll)))
    
    #Prepare the interpolator based on the image info
    grid_x = np.linspace(-14.95, 14.95,300)
    grid_z = np.linspace(-.4875, .4875,40)
    H2_PEB_Interp = rgi((grid_z,grid_x,grid_x),H2_PEB,fill_value=np.float32(0), method='linear',bounds_error=False)
    grid_x = np.linspace(-50, 50,501)
    grid_z = np.linspace(-2, 2,51)
    HI_NS_Interp = rgi((grid_z,grid_x,grid_x),HI_NS,fill_value=np.float32(0), method='linear',bounds_error=False)
    X_CO_MAX = 100*np.ones(len(zz))
    
    # Loop latitudes 
    for bi in range(len(bb)):
        # loop longitude
        for lj in range(len(ll)):
            los_sum=0.
            l,b = ll[lj], bb[bi]
            # z in cylindrical coords
            z = zz*sin(b) 
            x,y = -zz*cos(l)*cos(b)+R_solar, +zz*sin(l)*cos(b)
            r_2d = sqrt(x**2+y**2)
            
            if H2==True:
                # Call the interpolator on the current set of points
                if H2_map=='PEB':
                    X_CO = 3e19*np.minimum( exp(r_2d/5.3),X_CO_MAX)
                    los_sum += H2_mult*sum(func(x,y,z)*H2_PEB_Interp((z,-y,x)))*z_step*kpc2cm
                elif H2_map=='NS': 
                    pass 
                else: raise Exception("Invalid H2 map chosen")
                     
            if HI==True:
                los_sum += sum(func(x,y,z)*HI_NS_Interp((z,y,x)))*z_step*kpc2cm
              
            proj_skymap[bi,lj] = los_sum 
    return proj_skymap
Example #31
0
def ext_calc(l, b, d, npoints=100):
    t = np.linspace(0, 1, npoints)
    star_pos = d * np.array([
        np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
        np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
        np.sin(b * np.pi / 180)
    ])

    if star_pos[0] > 1997.5:
        d = 1997.49 / (np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180))
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])
    if star_pos[0] < -1997.5:
        d = -1997.49 / (np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180))
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])
    if star_pos[1] > 1997.5:
        d = 1997.49 / np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180)
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])
    if star_pos[1] < -1997.5:
        d = -1997.49 / np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180)
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])
    if star_pos[2] > 297.5:
        d = 297.49 / np.sin(b * np.pi / 180)
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])
    if star_pos[2] < -297.5:
        d = -297.49 / np.sin(b * np.pi / 180)
        star_pos = d * np.array([
            np.cos(b * np.pi / 180) * np.cos(l * np.pi / 180),
            np.cos(b * np.pi / 180) * np.sin(l * np.pi / 180),
            np.sin(b * np.pi / 180)
        ])

    interpolation_points = np.array(
        [star_pos[0] * t, star_pos[1] * t, star_pos[2] * t]).transpose()

    interpolator = rgi((x, y, z), extmap)

    interpolated_vals = interpolator(interpolation_points)

    extout = sum(interpolated_vals) * (d / npoints)

    return extout
Example #32
0
import numpy as np
import pyfits
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator as rgi

H2_hdulist = pyfits.open('./mod-total-rev2int.fit')
H2 = H2_hdulist[0].data

grid_x = np.linspace(-14.95, 14.95, 300)
grid_z = np.linspace(-.4875, .4875, 40)

H2_Interp = rgi((grid_z, grid_x, grid_x),
                H2,
                fill_value=np.float32(0),
                method='linear',
                bounds_error=False)

x = np.linspace(-1, 1, 100)
y = []

for i in x:
    y.append(H2_Interp((i, 8, 0)))

plt.yscale('log')
plt.plot(x, y)

plt.show()
def flyby(output_dir,
          flyby_a,
          flyby_n,
          do_rand_start=1,
          l_start=None,
          l_dir=np.array([np.pi / 8, np.sqrt(0.5), 1.]),
          norm_B0=1,
          method='matt',
          output_plot=1):
    # Loading data with the correct method scales the data to the physical variables
    data = diag.load_data(output_dir,
                          flyby_n,
                          prob='from_array',
                          method=method)
    Ns, Ls = get_grid_info(data)
    Ls[:2] *= flyby_a  # stretch perpendicular lengths
    zg, yg, xg = reinterp_generate_grid(Ns, Ls)
    # Zg, Yg, Xg = np.meshgrid(zg, yg, xg, indexing='ij')  # only needed if defining a function on grid

    # extending to general mean fields
    rho_data = data['rho']
    B = np.array((data['Bcc1'], data['Bcc2'], data['Bcc3']))
    B_0 = diag.get_mag(diag.box_avg(B), axis=0)  # single time entry
    v_A = diag.alfven_speed(rho_data, B)
    scale_B_mean = 1 / B_0 if norm_B0 else 1  # 1 / <Bx> = a^2
    scale_v_A = 1 / v_A if norm_B0 else 1  # rho^1/2 / <Bx> = rho^1/2 * a^2
    scale_rho = flyby_a**2 if norm_B0 else 1

    Bx = pad_array(data['Bcc1'] * scale_B_mean)
    By = pad_array(data['Bcc2'] * scale_B_mean)
    Bz = pad_array(data['Bcc3'] * scale_B_mean)
    ux = pad_array(data['vel1'] * scale_v_A)
    uy = pad_array(data['vel2'] * scale_v_A)
    uz = pad_array(data['vel3'] * scale_v_A)
    rho = pad_array(rho_data * scale_rho)
    Bmag = np.sqrt(Bx**2 + By**2 + Bz**2)

    # interpolaters
    Bx_i = rgi((zg, yg, xg), Bx)
    By_i = rgi((zg, yg, xg), By)
    Bz_i = rgi((zg, yg, xg), Bz)
    ux_i = rgi((zg, yg, xg), ux)
    uy_i = rgi((zg, yg, xg), uy)
    uz_i = rgi((zg, yg, xg), uz)
    Bmag_i = rgi((zg, yg, xg), Bmag)
    rho_i = rgi((zg, yg, xg), rho)

    # if plotting, do a short flyby to cut down on space
    # otherwise fly through either 1/10 of the box
    # or 5 million points (for large resolutions) for analysis
    N_points = Ns.prod()
    N_dl = 50000 if output_plot else min(int(5e6), N_points // 10)
    dl = yg[1] - yg[0]  # walk in steps of dy = a*Ly / Ny
    total_length = N_dl * dl
    lvec = np.linspace(0, total_length, N_dl, endpoint=False).reshape(N_dl, 1)

    if do_rand_start:
        # start at random point in box
        l_start = uniform(high=Ls)
    else:
        assert l_start is not None, 'l_start must be a valid numpy array!'
    # direction biased in x direction (z, y, x)
    l_dir /= np.sqrt(np.sum(l_dir**2))
    pts = np.mod(l_start + lvec * l_dir, Ls)

    # Interpolate data along line running through box
    FB = {}
    FB['Bx'], FB['By'], FB['Bz'] = Bx_i(pts), By_i(pts), Bz_i(pts)
    FB['ux'], FB['uy'], FB['uz'] = ux_i(pts), uy_i(pts), uz_i(pts)
    FB['Bmag'], FB['rho'] = Bmag_i(pts), rho_i(pts)
    FB['start_point'], FB['direction'] = l_start, l_dir
    FB['l_param'], FB['points'] = lvec[:, 0], pts
    FB['a'], FB['snapshot_number'] = flyby_a, flyby_n
    FB['normed_to_Bx'] = 'true' if norm_B0 else 'false'
    FB['norms'] = {'B': scale_B_mean, 'u': scale_v_A, 'rho': scale_rho}

    return FB
plt.colorbar()

fig.add_subplot(1, 2, 2)
totmap = Map(np.log10(np.nansum((10.0**model)**2.0, axis=2) * dz), EMmap.meta.copy())
print np.nanmin(totmap.data), np.nanmax(totmap.data)
totmap.plot(cmap=emcm, vmin=20, vmax=30)
plt.colorbar()

plt.savefig('comparison')
plt.close()"""

# Convert cartesian density data into spherical so it can be plotted
x1 = np.linspace(xrange[0], xrange[1], side)
y1 = np.linspace(yrange[0], yrange[1], side)
z1 = np.linspace(zrange[0], zrange[1], side)
intf = rgi((x1, y1, z1), 10.0**model[:].copy())
rad = np.linspace(1.0, 1.25, 21)
lat = np.linspace(-np.pi/2, np.pi/2, 180)
lon = np.linspace(-np.pi, np.pi, 360)
print lon.shape, lat.shape, rad.shape
radi, lati, loni = np.array([i for i in product(rad, lat, lon)]).T
print loni.shape, lati.shape, radi.shape
yi = radi * np.cos(lati) * np.sin(loni)
xi = np.sin(lati)
zi = -radi * np.cos(lati) * np.cos(loni)
print xi.shape, yi.shape, zi.shape
print np.array([xi, yi, zi]).shape
modelsph = intf(np.array([xi, yi, zi]).T)
print modelsph.shape, np.nanmin(modelsph), np.nanmax(modelsph)
modelsph = modelsph.reshape(21, 180, 360)
Example #35
0
def interpolate_output_to_new_grid(run_dir, last_iter, new_grid,
                                   get_grid_args=dict()):
    """
    Takes outputs U, V, T, S, Eta and interpolates them onto new grid

    Assumes both grids are 3D and Cartesian.

    Results for 2D aren't quite right. Interpolation falls back to nearest
    neighbour where it should be linear.

    Inputs
    ------
    run_dir: str
        Directory containing output
    last_iter: int
        The number in the output filenames that are to be interpolated
        For example, for T.00000360000.data, last_iter is 36000
    new_grid: Grid instance
        Grid from MyGrids.Grid
    get_grid_args: dict
        Arguments to pass to get_grid
        `g_in = get_grid(run_dir, squeeze_hfacs=False, **get_grid_args)`

    Returns
    -------
    all_outputs: dict
        Contains U, V, T, S, and Eta on new grid. Shape of these arrays
        are (Nz × Ny × Nx) or (Ny × Nx) for Eta

    Notes
    -----
    Not set up to work with OBCS (hFacs are not dealt with correctly)
    """
    # filterwarnings('ignore', '.*invalid value encountered in true_divide')
    # filterwarnings('ignore', '.*invalid value encountered in less_equal')

    # Helper function
    def get_coord(grid, coord_str):
        """get_coord(g, 'xc') returns g.xc or None if g has no attribute xc
        Remove extra value for xf and yf, since that's what's need in outer
        function"""
        try:
            out = getattr(grid, coord_str)
            if coord_str in ['xf', 'yf']:
                out = out[:-1]
            return out.copy()
        except TypeError:
            return None

    # Input and output grids
    run_dir = os.path.normpath(run_dir) + os.sep
    g_in = get_grid(run_dir, squeeze_hfacs=False, **get_grid_args)
    g_out = new_grid

    coord_sys = dict(
        U=('xf', 'yc', 'zc', 'hFacW'), V=('xc', 'yf', 'zc', 'hFacS'),
        T=('xc', 'yc', 'zc', 'hFacC'), S=('xc', 'yc', 'zc', 'hFacC'),
        Eta=('xc', 'yc', None, 'hFacC'))

    # Preallocate dict that is returned
    all_outputs = {}

    for k, (x, y, z, h) in coord_sys.items():
        threeD = False if k is 'Eta' else True

        # Read in all grids for current quantity
        xi, yi, zi, hi = [get_coord(g_in, q) for q in (x, y, z, h)]
        hi = hi[0, ...] if not threeD else hi

        # Read actual output
        fname = run_dir + k + '*'
        quantity = rdmds(fname, last_iter)

        # Convert zeros to NaN for values that aren't water
        # This is really important for T and S, where the average of say 35
        # and 0 is unphysical. For U and V, it's not as important but still
        # worthwhile
        quantity[hi == 0] = np.nan

        # Smooth at each depth level before interpolation. Helps reduce large
        # divergences
        # Update: 6/3/17. Try without smoothing
        gf_opts = dict(sigma=0, keep_nans=False, gf_kwargs=dict(truncate=8))
        if threeD:
            for i, level in enumerate(quantity):
                quantity[i, ...] = nan_gaussian_filter(level, **gf_opts)
        else:
            quantity = nan_gaussian_filter(quantity, **gf_opts)

        # Add a border around output to avoid problems with regions between
        # the centre of the first and last cells in a given dimension and the
        # edge of the domain
        quantity = add_border_values(quantity)

        # Add in associated values to x, y, z
        xp2 = np.r_[xi[0] - g_in.dx[0], xi, xi[-1] + g_in.dx[-1]]
        yp2 = np.r_[yi[0] - g_in.dy[0], yi, yi[-1] + g_in.dy[-1]]
        zp2 = np.r_[zi[0] - g_in.dz[0], zi, zi[-1] + g_in.dz[-1]] if threeD else None

        # Grid associated with added border
        pts_in = (zp2, yp2, xp2) if threeD else (yp2, xp2)

        # Overall interpolation will be combo of linear and nearest neighbour
        # to get the best of both
        interp_input = dict(points=pts_in, values=quantity,
                            bounds_error=False, fill_value=None)
        f_lin = rgi(method='linear', **interp_input)
        f_near = rgi(method='nearest', **interp_input)

        # Output grids
        xo, yo, zo = [get_coord(g_out, q) for q in (x, y, z)]
        if threeD:
            Zo, Yo, Xo = np.meshgrid(zo, yo, xo, indexing='ij')
        else:
            Yo, Xo = np.meshgrid(yo, xo, indexing='ij')

        pts_out = (Zo, Yo, Xo) if threeD else (Yo, Xo)

        # Linear interpolate to start with, then fill in bits near boundaries
        # with nearest neighbour interpolation, then fill everything else
        lin_out = f_lin(pts_out)
        near_out = f_near(pts_out)

        lin_out_is_nan = np.isnan(lin_out)
        lin_out[lin_out_is_nan] = near_out[lin_out_is_nan]

        # Fill any remaining gaps
        lin_out = remove_nans_laterally(lin_out, inverted_dimensions=True)

        # For completely empty levels, copy the level above
        if threeD:
            levels_to_copy = np.where(np.all(np.isnan(lin_out), axis=(1, 2)))[0]
            for level in levels_to_copy:
                lin_out[level, ...] = lin_out[level - 1, ...]

        all_outputs[k] = lin_out

    return all_outputs
def tangent_planes_to_zone_of_interest(cropAx, parametrized_skel, 
                                             s_inx, e_inx, g_radius, g_res, shift_impose, direction, H_th):
    
    p_inx, p_bound, p_shiftX, p_shiftY = s_inx, [], 0 , 0 
    sz = cropAx.shape
    x, y = np.mgrid[-g_radius:g_radius:g_res, -g_radius:g_radius:g_res]
    z = np.zeros_like(x)
    
    c_mesh = (2*g_radius)/(2*g_res)
    
    xyz = np.array([np.ravel(x), np.ravel(y), np.ravel(z)]).T
    
    tangent_vecs = unit_tangent_vector(parametrized_skel)   
    interpolating_func = rgi((range(sz[0]),range(sz[1]),range(sz[2])), 
                                cropAx,bounds_error=False,fill_value=0)
    
    cent_ball = (x**2+y**2)<g_res*1
    count = 1
    while s_inx != e_inx:
        
        #print s_inx
        
        point = parametrized_skel[s_inx]        
        utv = tangent_vecs[s_inx]
        
        if np.array_equal(utv, np.array([0, 0, 0])):
            s_inx = s_inx+direction
            continue

        rot_axis = pr.unit_normal_vector(utv, np.array([0,0,1]))
        theta = pr.angle(utv, np.array([0,0,1]))
        rot_mat = pr.rotation_matrix_3D(rot_axis, theta)
        rotated_plane = np.squeeze(pr.rotate_vector(xyz, rot_mat))        
        cross_section_plane = rotated_plane+point  
        
        cross_section = interpolating_func(cross_section_plane)
        bw_cross_section = cross_section>=0.5
        bw_cross_section = np.reshape(bw_cross_section, x.shape)
        label_cross_section, nn = label(bw_cross_section, neighbors=4, return_num=True)
        main_lbl = np.unique(label_cross_section[cent_ball])
        main_lbl = main_lbl[np.nonzero(main_lbl)]
    
        if len(main_lbl)!=1:
            s_inx = s_inx+direction
            continue
        
        bw_cross_section = label_cross_section==main_lbl
        
        nz_X = np.count_nonzero(np.sum(bw_cross_section, axis=0))
        nz_Y = np.count_nonzero(np.sum(bw_cross_section, axis=1))      
        if (nz_X<4) | (nz_Y<4):
            s_inx = s_inx+direction
            continue
        
        if shift_impose:
            
            props = regionprops(bw_cross_section.astype(np.int))            
            y0, x0 = props[0].centroid
    
            shiftX = np.round(c_mesh-x0).astype(np.int)
            shiftY = np.round(c_mesh-y0).astype(np.int)
            
            p = max(abs(shiftX), abs(shiftY))
            
            if p != 0:
                
                bw_cross_section = np.pad(bw_cross_section, p, mode='constant')
    
                bw_cross_section = np.roll(bw_cross_section,shiftY,axis=0)
                bw_cross_section = np.roll(bw_cross_section,shiftX,axis=1)
            
                bw_cross_section = bw_cross_section[p:-p, p:-p]
                
        
        label_cross_section, nn = label(bw_cross_section, neighbors=4, return_num=True)
        if nn != 1:
            main_lbl = np.unique(label_cross_section[cent_ball])
            main_lbl = main_lbl[np.nonzero(main_lbl)]
        
            if len(main_lbl)!=1:
                s_inx = s_inx+direction
                continue
            bw_cross_section = label_cross_section==main_lbl
        
        
        bound = boundary_parametrization(bw_cross_section)
        
        if test_boundary_parametrization(bound, c_mesh) == False:
            s_inx = s_inx+direction
            continue
            
        
        #fig, ax=plt.subplots() 
        #ax.plot(bound[:,1], bound[:,0], '-', linewidth=2, color='black')
        if count==1:
            m_curve = mean_curve(bound, bound, 2, c_mesh, 0)            
            max_radius = np.max(np.sum((m_curve-np.array(x.shape)/2)**2, axis=1)**0.5)
            
            p_inx = s_inx
            p_bound =  bound
            p_shiftX = shiftX
            p_shiftY = shiftY
            
            count = count+1
            s_inx = s_inx+direction          
        else:
            H_dist = hausdorff_distance(bound, m_curve, len(m_curve))
            d_ratio = np.true_divide(H_dist, (H_dist+max_radius))     

            #print d_ratio            
                
            if d_ratio<H_th:
                m_curve = mean_curve(bound, m_curve, count, c_mesh, 0)
                max_radius = g_res*np.max(np.sum((m_curve-np.array(x.shape)/2)**2, axis=1)**0.5)
                
                p_inx = s_inx
                p_bound =  bound
                p_shiftX = shiftX
                p_shiftY = shiftY
                    
                count = count+1
                s_inx = s_inx+direction

            else:
                break
    return p_inx, p_bound, p_shiftX, p_shiftY
Example #37
0
 def import_data(self, fn):
     aoa_lim = 15
     beta_lim = 15
     da_lim = 21.5 / 4.
     de_lim = 25
     dr_lim = 30
     pq_lim = 1.2
     r_lim = 0.3925
     num_pts = 5
     alpha = np.linspace(-aoa_lim, aoa_lim, num_pts)
     beta = np.linspace(-beta_lim, beta_lim, num_pts)
     d_e = np.linspace(-de_lim, de_lim, num_pts)
     d_a = np.linspace(-da_lim, da_lim, num_pts)
     d_r = np.linspace(-dr_lim, dr_lim, num_pts)
     p = np.linspace(-pq_lim, pq_lim, num_pts)
     q = np.linspace(-pq_lim, pq_lim, num_pts)
     r = np.linspace(-r_lim, r_lim, num_pts)
     if fn[-4:] == '.csv':
         self.data = pd.read_csv(fn, delimiter=',')
         self.data.sort_values(
             by=['AOA', 'Beta', 'd_e', 'd_a', 'd_r', 'p', 'q', 'r'],
             inplace=True)
         self.data.to_csv("./TODatabase_linear_sorted.csv")
         self.data_array = np.zeros((5, 5, 5, 5, 5, 5, 5, 5, 6))
         it = 0
         for i in range(num_pts):
             for j in range(num_pts):
                 for k in range(num_pts):
                     for m in range(num_pts):
                         for n in range(num_pts):
                             for pp in range(num_pts):
                                 for qq in range(num_pts):
                                     for rr in range(num_pts):
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             0] = self.data.iat[it, 8]
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             1] = self.data.iat[it, 9]
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             2] = self.data.iat[it, 10]
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             3] = self.data.iat[it, 11]
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             4] = self.data.iat[it, 12]
                                         self.data_array[
                                             i, j, k, m, n, pp, qq, rr,
                                             5] = self.data.iat[it, 13]
                                         it += 1
         np.save("./TODatabase_linear.npy", self.data_array)
     elif fn[-4:] == '.npy':
         self.data_array = np.load(fn)
         mask = np.isnan(self.data_array)
         self.data_array[mask] = np.interp(np.flatnonzero(mask),
                                           np.flatnonzero(~mask),
                                           self.data_array[~mask])
     self.CX_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 0],
                     bounds_error=False,
                     fill_value=None)
     self.CY_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 1],
                     bounds_error=False,
                     fill_value=None)
     self.CZ_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 2],
                     bounds_error=False,
                     fill_value=None)
     self.Cl_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 3],
                     bounds_error=False,
                     fill_value=None)
     self.Cm_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 4],
                     bounds_error=False,
                     fill_value=None)
     self.Cn_s = rgi((alpha, beta, d_e, d_a, d_r, p, q, r),
                     self.data_array[:, :, :, :, :, :, :, :, 5],
                     bounds_error=False,
                     fill_value=None)