Example #1
0
    def undistort_image(self, img, Kundistortion=None):
        """
        Transform grayscale image such that radial distortion is removed.

        :param img: input image
        :type img: np.ndarray, shape=(n, m) or (n, m, 3)
        :param Kundistortion: camera matrix for undistorted view, None for self.K
        :type Kundistortion: array-like, shape=(3, 3)
        :return: transformed image
        :rtype: np.ndarray, shape=(n, m) or (n, m, 3)
        """
        if Kundistortion is None:
            Kundistortion = self.K
        if self.calibration_type == 'opencv':
            return cv2.undistort(img, self.K, self.opencv_dist_coeff, newCameraMatrix=Kundistortion)
        elif self.calibration_type == 'opencv_fisheye':
                return cv2.fisheye.undistortImage(img, self.K, self.opencv_dist_coeff, Knew=Kundistortion)
        else:
            xx, yy = np.meshgrid(np.arange(img.shape[1]), np.arange(img.shape[0]))
            img_coords = np.array([xx.ravel(), yy.ravel()])
            y_l = self.undistort(img_coords, Kundistortion)
            if img.ndim == 2:
                return griddata(y_l.T, img.ravel(), (xx, yy), fill_value=0, method='linear')
            else:
                channels = [griddata(y_l.T, img[:, :, i].ravel(), (xx, yy), fill_value=0, method='linear')
                            for i in xrange(img.shape[2])]
                return np.dstack(channels)
Example #2
0
def rasterize(geometry, points):
    """ Create array. """
    envelope = geometry.GetEnvelope()
    # px, py, pz = points.transpose()
    x1 = 4 * math.floor(envelope[0] / 4)
    y1 = 4 * math.floor(envelope[2] / 4)
    x2 = 4 * math.ceil(envelope[1] / 4)
    y2 = 4 * math.ceil(envelope[3] / 4)

    geo_transform = x1, A, 0, y2, 0, D
    array = np.full((4 * (y2 - y1), 4 * (x2 - x1)), NO_DATA_VALUE, 'f4')
    grid = tuple(np.mgrid[y2 + D / 2:y1 + D / 2:D,
                          x1 + A / 2:x2 + A / 2:A][::-1])

    # interpolate
    args = points[:, :2], points[:, 2], grid
    linear = interpolate.griddata(*args, method='linear')
    nearest = interpolate.griddata(*args, method='nearest')
    array = np.where(np.isnan(linear), nearest, linear).astype('f4')

    # clip and return
    kwargs = {
        'array': array[np.newaxis],
        'projection': PROJECTION,
        'no_data_value': NO_DATA_VALUE,
        'geo_transform': geo_transform,
    }
    clip(kwargs=kwargs, geometry=geometry)
    return kwargs
def get_GridFSim(x1, y1, x2, y2, img1):
    ''' Calculate estimated ice drift on first image based on feature tracking vectors'''
    
    # # initial drift inter-/extrapolation
    # linear triangulation
    x1Grid, y1Grid = np.meshgrid(range(img1.shape[1]), range(img1.shape[0]))
    x2GridFSim = griddata(np.array([y1, x1]).T, x2, np.array([y1Grid, x1Grid]).T, method='linear').T
    y2GridFSim = griddata(np.array([y1, x1]).T, y2, np.array([y1Grid, x1Grid]).T, method='linear').T
    # linear fit for entire grid
    A = np.vstack([np.ones(len(x1)), x1, y1 ]).T
    # find B in x2 = B * [x1, y1]
    Bx = np.linalg.lstsq(A, x2)[0]
    By = np.linalg.lstsq(A, y2)[0]
    # calculate simulated x2sim = B * [x1, y1]
    x1GridF = x1Grid.flatten()
    y1GridF = y1Grid.flatten()
    A = np.vstack([np.ones(len(x1GridF)), x1GridF, y1GridF]).T
    x2GridFSim_lf = np.dot(A, Bx).reshape(img1.shape)
    y2GridFSim_lf = np.dot(A, By).reshape(img1.shape)
    # fill NaN with lf
    gpi = np.isnan(x2GridFSim)
    x2GridFSim[gpi] = x2GridFSim_lf[gpi]
    y2GridFSim[gpi] = y2GridFSim_lf[gpi]

    return x2GridFSim, y2GridFSim
Example #4
0
def bin_confint_lookup(pc, nsamp, ci = .05):
  """Return the confidence interval from the lookup table.
  Inputs:
    pc - array (get back several cis) or single value (get back one ci) of percent corrects
    nsamp - number of trials used to obtain each pc
    ci - confidence level (e.g. 0.01, 0.05)
    bootstraps - number of bootstraps to use
    use_table - if true then use a precomputed table instead of doing the bootstraps

  Output:
    3xN array - first row is pc
                last two rows are lower and upper ci as expected by pylab.errorbar
  """
  points = ci_table['points']
  values_lo = ci_table['values_lo']
  values_high = ci_table['values_high']

  from scipy.interpolate import griddata
  if pylab.isscalar(pc):
    pc = pylab.array([pc])
    nsamp = pylab.array([nsamp])
  ci_a = pylab.ones(pc.size)*ci
  xi = pylab.array((pc,nsamp,ci_a)).T

  low_ci = griddata(points, values_lo, xi, method='linear')
  high_ci = griddata(points, values_high, xi, method='linear')

  return pylab.array((pc,low_ci,high_ci))
Example #5
0
def make_grid(points, values, grid, method=None):
    """Abstraction of two different versions of griddata

    points: Nx2 array of points where data is known
    values: corresponding values
    grid: Tuple of X, Y - Regular grid (e.g. obtained from meshgrid)
    """


    if griddata_version == 'scipy':
        if method is None:
            m = 'cubic'
        else:
            m = method

        return griddata(points, values, grid, method=m)
    elif griddata_version == 'pylab':
        if method is None:
            m = 'nn'
        else:
            m = method

        x = points[:,0]
        y = points[:,0]
        z = values
        X, Y = grid
        return griddata(x, y, z, X, Y, interp=m)
Example #6
0
def plot_QU_gd(x, y, Q, U, irad, Req):
    """ using griddata 
    """
    fig = _plt.figure()
    lins, cols = (1, 2)
    gs = _gridspec.GridSpec(lins, cols)

    axq = _plt.subplot(gs[0, 0])  
    axu = _plt.subplot(gs[0, 1])  

    xmin = _np.min(x)/Req
    xmax = _np.max(x)/Req
    ymin = _np.min(y)/Req
    ymax = _np.max(y)/Req
    xx, yy = _np.meshgrid(_np.linspace(xmin, xmax, 32), 
        _np.linspace(ymin, ymax, 32)[::-1])
    yo = y*_np.cos(irad)
    q = _interpolate.griddata( _np.array([x, yo]).T/Req, Q, 
        _np.array([xx.flatten(), yy.flatten()]).T )
    u = _interpolate.griddata( _np.array([x, yo]).T/Req, U, 
        _np.array([xx.flatten(), yy.flatten()]).T )

    axq.imshow(q.reshape(32, 32), origin='lower', extent=[xmin, xmax, 
        ymin, ymax])
    axu.imshow(u.reshape(32, 32), origin='lower', extent=[xmin, xmax, 
        ymin, ymax])
    return fig, [axq, axu]
Example #7
0
def mesh2grid(v, mesh):
    """ Interpolates from an unstructured coordinates (mesh) to a structured 
        coordinates (grid)
    """
    x = mesh[:,0]
    z = mesh[:,1]
    lx = x.max() - x.min()
    lz = z.max() - z.min()
    nn = v.size

    nx = np.around(np.sqrt(nn*lx/lz))
    nz = np.around(np.sqrt(nn*lz/lx))
    dx = lx/nx
    dz = lz/nz

    # construct structured grid
    x = np.linspace(x.min(), x.max(), nx)
    z = np.linspace(z.min(), z.max(), nz)
    X, Z = np.meshgrid(x, z)
    grid = stack(X.flatten(), Z.flatten())

    # interpolate to structured grid
    V = _interp.griddata(mesh, v, grid, 'linear')

    # workaround edge issues
    if np.any(np.isnan(V)):
        W = _interp.griddata(mesh, v, grid, 'nearest')
        for i in np.where(np.isnan(V)):
            V[i] = W[i]

    V = np.reshape(V, (nz, nx))
    return V, grid
Example #8
0
def test_imshow_heatmap():
    from scipy.interpolate import griddata
    from matplotlib import pyplot as plt

    mesh3D = mesh(200)
    mesh2D = proj_to_2D(mesh3D)

    data = np.zeros((3,3))
    data[0,1] += 2

    vals = np.exp(log_dirichlet_density(mesh3D,2.,data=data.sum(0)))
    temp = log_censored_dirichlet_density(mesh3D,2.,data=data)
    censored_vals = np.exp(temp - temp.max())

    xi = np.linspace(-1,1,1000)
    yi = np.linspace(-0.5,1,1000)

    plt.figure()
    plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),vals,(xi[None,:],yi[:,None]),method='cubic'))
    plt.axis('off')
    plt.title('uncensored likelihood')

    plt.figure()
    plt.imshow(griddata((mesh2D[:,0],mesh2D[:,1]),censored_vals,(xi[None,:],yi[:,None]),method='cubic'))
    plt.axis('off')
    plt.title('censored likelihood')
Example #9
0
 def plot(x,y,field,filename,c=200):
     plt.figure()
     # define grid.
     xi = np.linspace(min(x),max(x),100)
     yi = np.linspace(min(y),max(y),100)
     # grid the data.
     si_lin = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear')
     si_cub = griddata((x, y), field, (xi[None,:], yi[:,None]), method='linear')
     print np.min(field)
     print np.max(field)
     plt.subplot(211)
     # contour the gridded data, plotting dots at the randomly spaced data points.
     CS = plt.contour(xi,yi,si_lin,c,linewidths=0.5,colors='k')
     CS = plt.contourf(xi,yi,si_lin,c,cmap=plt.cm.jet)
     plt.colorbar() # draw colorbar
     # plot data points.
     #    plt.scatter(x,y,marker='o',c='b',s=5)
     plt.xlim(min(x),max(x))
     plt.ylim(min(y),max(y))
     plt.title('Lineaarinen interpolointi')
     #plt.tight_layout()
     plt.subplot(212)
     # contour the gridded data, plotting dots at the randomly spaced data points.
     CS = plt.contour(xi,yi,si_cub,c,linewidths=0.5,colors='k')
     CS = plt.contourf(xi,yi,si_cub,c,cmap=plt.cm.jet)
     plt.colorbar() # draw colorbar
     # plot data points.
     #    plt.scatter(x,y,marker='o',c='b',s=5)
     plt.xlim(min(x),max(x))
     plt.ylim(min(y),max(y))
     plt.title('Kuubinen interpolointi')
     plt.savefig(filename)
Example #10
0
def contourf_interpolate_data(all_points, data, xlabel='', ylabel='', title='', interpolation_numpoints=200, interpolation_method='linear', mask_when_nearest=True, contour_numlevels=20, show_scatter=True, show_colorbar=True, fignum=None, ax_handle=None, mask_x_condition=None, mask_y_condition=None, log_scale=False):
    '''
        Take (x,y) and z tuples, construct an interpolation with them and plot them nicely.

        all_points: Nx2
        data:       Nx1

        mask_when_nearest: trick to hide points outside the convex hull of points even when using 'nearest' method
    '''

    assert all_points.shape[1] == 2, "Give a Nx2 matrix for all_points"

    # Construct the interpolation
    param1_space_int = np.linspace(all_points[:, 0].min(), all_points[:, 0].max(), interpolation_numpoints)
    param2_space_int = np.linspace(all_points[:, 1].min(), all_points[:, 1].max(), interpolation_numpoints)

    data_interpol = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method=interpolation_method)

    if interpolation_method == 'nearest' and mask_when_nearest:
        # Let's mask the points outside of the convex hull

        # The linear interpolation will have nan's on points outside of the convex hull of the all_points
        data_interpol_lin = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method='linear')

        # Mask
        data_interpol[np.isnan(data_interpol_lin)] = np.nan

    # Mask it based on some conditions
    if not mask_x_condition is None:
        data_interpol[mask_x_condition(param1_space_int), :] = 0.0
    if not mask_y_condition is None:
        data_interpol[:, mask_y_condition(param2_space_int)] = 0.0

    # Plot it
    if ax_handle is None:
        f = plt.figure(fignum)
        ax_handle = f.add_subplot(111)
    else:
        f = ax_handle.get_figure()
        f.clf()
        ax_handle = f.add_subplot(111)

    if log_scale:
        cs = ax_handle.contourf(param1_space_int, param2_space_int, data_interpol, contour_numlevels, locator=plttic.LogLocator())   # cmap=plt.cm.jet
    else:
        cs = ax_handle.contourf(param1_space_int, param2_space_int, data_interpol, contour_numlevels)   # cmap=plt.cm.jet
    ax_handle.set_xlabel(xlabel)
    ax_handle.set_ylabel(ylabel)
    ax_handle.set_title(title)

    if show_scatter:
        ax_handle.scatter(all_points[:, 0], all_points[:, 1], marker='o', c='b', s=5)

    ax_handle.set_xlim(param1_space_int.min(), param1_space_int.max())
    ax_handle.set_ylim(param2_space_int.min(), param2_space_int.max())

    if show_colorbar:
        f.colorbar(cs)

    return ax_handle
Example #11
0
def interpolateData(binaryDataFile):
    file = open(binaryDataFile, 'rb')
    if os.name == 'nt':
        rawTimeHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
        rawStressHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
        rawStrainHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
    elif os.name == 'posix':
        rawTimeHistory = numpy.array(pickle.load(file)).transpose()
        rawStressHistory = numpy.array(pickle.load(file)).transpose()
        rawStrainHistory = numpy.array(pickle.load(file)).transpose()
    
    timeHistory = numpy.linspace(0, simulationTime, numberOfSteps+1)
    stressHistory = numpy.empty([3, numberOfSteps+1]);
    strainHistory = numpy.empty([3, numberOfSteps+1]);
    for i in range(3):
        stressHistory[i, :] = griddata(rawTimeHistory, rawStressHistory[i], timeHistory)
        strainHistory[i, :] = griddata(rawTimeHistory, rawStrainHistory[i], timeHistory)
    stressHistory = stressHistory.transpose()
    strainHistory = strainHistory.transpose()
    
    with open('output.dat', 'w') as f:
        f.write('time S11 S22 S12 LE11 LE22 LE12\n')
        for i in range(len(timeHistory)):
            f.write(str(timeHistory[i])+' ')
            for j in range(len(stressHistory[i])):
                f.write(str(stressHistory[i][j])+' ')
            for j in range(len(strainHistory[i])):
                f.write(str(strainHistory[i][j])+' ')
            f.write('\n')
Example #12
0
    def __init__(self, vmec_file, ntheta=None, nzeta=None, nr=32, nz=32):
        # Only needed here
        from scipy.interpolate import griddata, RegularGridInterpolator

        self.read_vmec_file(vmec_file, ntheta, nzeta)

        self.nr = nr
        self.nz = nz

        # Make a new rectangular grid in (R,Z)
        self.r_1D = np.linspace(self.r_stz.min(), self.r_stz.max(), nr)
        self.z_1D = np.linspace(self.z_stz.min(), self.z_stz.max(), nz)
        self.R_2D, self.Z_2D = np.meshgrid(self.r_1D, self.z_1D, indexing='ij')

        # First, interpolate the magnetic field components onto (R,Z)
        self.br_rz = np.zeros( (nr, nz, self.nzeta) )
        self.bz_rz = np.zeros( (nr, nz, self.nzeta) )
        self.bphi_rz = np.zeros( (nr, nz, self.nzeta) )
        # No need to interpolate in zeta, so do this one slice at a time
        for k, (br, bz, bphi, r, z) in enumerate(zip(self.br.T, self.bz.T, self.bphi.T, self.r_stz.T, self.z_stz.T)):
            points = np.column_stack( (r.flatten(), z.flatten()) )
            self.br_rz[...,k] = griddata(points, br.flatten(), (self.R_2D, self.Z_2D),
                                         method='linear', fill_value=0.0)
            self.bz_rz[...,k] = griddata(points, bz.flatten(), (self.R_2D, self.Z_2D),
                                         method='linear', fill_value=0.0)
            self.bphi_rz[...,k] = griddata(points, bphi.flatten(), (self.R_2D, self.Z_2D),
                                           method='linear', fill_value=1.0)

        # Now we have a regular grid in (R,Z,phi) (as zeta==phi), so
        # we can get an interpolation function in 3D
        points = ( self.r_1D, self.z_1D, self.zeta )

        self.br_interp = RegularGridInterpolator(points, self.br_rz, bounds_error=False, fill_value=0.0)
        self.bz_interp = RegularGridInterpolator(points, self.bz_rz, bounds_error=False, fill_value=0.0)
        self.bphi_interp = RegularGridInterpolator(points, self.bphi_rz, bounds_error=False, fill_value=1.0)
    def get_apriori(self, latres=0.25, lonres=0.3125):
        '''
        Read GC HCHO sigma shape factor and regrid to lat/lon res.
        temporal resolution is one month
        inputs:
            latres, lonres for resolution of GC 2x2.5 hcho columns to be regridded onto
        '''
        assert False, "Method is old and wrong currently"
        # new latitude longitude we interpolate to.
        newlats= np.arange(-90,90, latres) + latres/2.0
        newlons= np.arange(-180,180, lonres) + lonres/2.0

        # Mesh[lat,lon]
        mlons,mlats = np.meshgrid(self.lons,self.lats)
        mnewlons,mnewlats = np.meshgrid(newlons,newlats)

        ## Get sigma apriori and regrid it
        #
        newS_s = np.zeros([72,len(newlats),len(newlons)])
        newSigma = np.zeros([72,len(newlats),len(newlons)])

        # interpolate at each pressure level...
        for ii in range(72):
            newS_s[ii,:,:] = griddata( (mlats.ravel(), mlons.ravel()),
                                       self.Shape_s[ii,:,:].ravel(),
                                       (mnewlats, mnewlons),
                                       method='nearest')
            newSigma[ii,:,:]=griddata( (mlats.ravel(), mlons.ravel()),
                                     self.sigmas[ii,:,:].ravel(),
                                     (mnewlats, mnewlons),
                                     method='nearest')

        # return the normalised sigma apriori used to recalculate AMF
        return newS_s, newlats, newlons, newSigma
Example #14
0
def autocorr(
    A, B, pointsA, pointsB, nregrid, rrange=[0.0, 1.5e18], phirange=[0.0, 6.283185307179586], zrange=[-1.5e18, 1.5e18]
):
    """Calculates the angular average of <a(t)b(t+s)>"""

    print "=== Obtaining correlation ==="
    # create r_i, phi_j and z_k arrays:
    ri = np.linspace(rrange[0], rrange[1], nregrid[0])
    phij = np.linspace(phirange[0], phirange[1], nregrid[1])
    zk = np.linspace(zrange[0], zrange[1], nregrid[2])

    (xijk, yijk, zijk) = cylKernel(ri, phij, zk, np.array(nregrid, dtype=np.int32))
    # griddata to points:
    dataA = griddata(
        (pointsA[:, 0], pointsA[:, 1], pointsA[:, 2]),
        np.array(A, dtype=np.float64),
        (xijk, yijk, zijk),
        method="nearest",
    )
    dataB = griddata(
        (pointsB[:, 0], pointsB[:, 1], pointsB[:, 2]),
        np.array(B, dtype=np.float64),
        (xijk, yijk, zijk),
        method="nearest",
    )

    correlation = autocorrKernel(dataA, dataB, np.array(nregrid, dtype=np.int32))

    print "=== Done with correlation ==="
    return np.ma.masked_array(correlation, np.isnan(correlation))
Example #15
0
def velovect(u1,u2,d,minvel=1e-40,nvect=None,scalevar=None,scale=100,color='k',fig=None):
    '''Plots normalized velocity vectors'''


    if fig==None:
        ax=plt.gca()
    else:
        ax=fig.ax

    CC=d.getCenterPoints()
    n=np.sqrt(u1**2+u2**2)
    # remove zero velocity:
    m=n<minvel
    vr=np.ma.filled(np.ma.masked_array(u1/n,m),0.)
    vz=np.ma.filled(np.ma.masked_array(u2/n,m),0.)
    if scalevar != None:
        vr = vr*scalevar
        vz = vz*scalevar
    if nvect==None:
        Q=ax.quiver(CC[:,0],CC[:,1],vr,vz,pivot='middle',width=1e-3,minlength=0.,scale=scale,
                    headwidth=6)
    else:
        # regrid the data:
        tmp0=np.complex(0,nvect[0])
        tmp1=np.complex(0,nvect[1])
        grid_r, grid_z = np.mgrid[ax.get_xlim()[0]:ax.get_xlim()[1]:tmp0, ax.get_ylim()[0]:ax.get_ylim()[1]:tmp1]
        grid_vr = griddata(CC, vr, (grid_r, grid_z), method='nearest')
        grid_vz = griddata(CC, vz, (grid_r, grid_z), method='nearest')
        Q=ax.quiver(grid_r,grid_z,grid_vr,grid_vz,pivot='middle',width=2e-3,minlength=minvel,scale=scale,
                    headwidth=10,headlength=10,color=color,edgecolor=color,rasterized=True)

    plt.draw()
    return Q     
def interpolate_data_2d(all_points, data, param1_space_int=None, param2_space_int=None, interpolation_numpoints=200, interpolation_method='linear', mask_when_nearest=True, show_scatter=True, show_colorbar=True, mask_x_condition=None, mask_y_condition=None):

    # Construct the interpolation
    if param1_space_int is None:
        param1_space_int = np.linspace(all_points[:, 0].min(), all_points[:, 0].max(), interpolation_numpoints)
    if param2_space_int is None:
        param2_space_int = np.linspace(all_points[:, 1].min(), all_points[:, 1].max(), interpolation_numpoints)

    data_interpol = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method=interpolation_method)

    if interpolation_method == 'nearest' and mask_when_nearest:
        # Let's mask the points outside of the convex hull

        # The linear interpolation will have nan's on points outside of the convex hull of the all_points
        data_interpol_lin = spint.griddata(all_points, data, (param1_space_int[None, :], param2_space_int[:, None]), method='linear')

        # Mask
        data_interpol[np.isnan(data_interpol_lin)] = np.nan

    # Mask it based on some conditions
    if mask_x_condition is not None:
        data_interpol[mask_x_condition(param1_space_int), :] = 0.0
    if mask_y_condition is not None:
        data_interpol[:, mask_y_condition(param2_space_int)] = 0.0

    return data_interpol
Example #17
0
    def match_planting_harvest(self, planting_filename, harvest_filename):
        # Load both planting and harvest files
        self.planting_dataframe = pandas.read_csv(planting_filename, delimiter=',')
        self.harvest_dataframe = pandas.read_csv(harvest_filename, delimiter=',')

        # Interpolate planting data for the harvest lat/longs
        # Since we have a 2D grid and continuous values, perform bilinear interpolation,
        # which will look smoother than nearest neighbor interpolation
        # However, the "variety" is categorical and thus can't be bilinearly interpolated,
        # so instead we can use nearest neighbor
        # Interpolation turns out to be a common enough function that scipy provides it
        gd_linear = interpolate.griddata(self.planting_dataframe.values[:,:2],
                                         self.planting_dataframe.values[:,3:],
                                         self.harvest_dataframe.values[:,:2])
        gd_nearest = interpolate.griddata(self.planting_dataframe.values[:,:2],
                                          self.planting_dataframe.values[:,2:3],
                                          self.harvest_dataframe.values[:,:2],
                                          method='nearest')
        interpolated_columns = self.harvest_dataframe.columns.append(self.planting_dataframe.columns[2:])
        interpolated_array = numpy.hstack((self.harvest_dataframe.values, gd_nearest, gd_linear))
        self.interpolated_dataframe = pandas.DataFrame(interpolated_array, columns=interpolated_columns).dropna(how='any')
        # If we just want to interpolate all columns as nearest neighbor, uncomment:
        # gd = interpolate.griddata(self.planting_dataframe.values[:,:2], self.planting_dataframe.values[:,2:], self.harvest_dataframe.values[:,:2], method='nearest')
        # interpolated_array = numpy.hstack((self.harvest_dataframe.values, gd))
        # self.interpolated_dataframe = pandas.DataFrame(interpolated_array, columns=interpolated_columns)

        # Create test and validation sets
        self.train_ylabel, self.test_ylabel, self.train_Xdata, self.test_Xdata = cross_validation.train_test_split(self.interpolated_dataframe.values[:,2:3], self.interpolated_dataframe.values[:,4:-1])
        return self.interpolated_dataframe
Example #18
0
def get_reference_bim(a, t0=0, x_c=0, x0=15, verbose=True):
    if type(t0) == list:
        return np.array([r for r in imap(getReferenceBIM, repeat(a), t0, repeat(x_c))])
    
    if verbose: 
        print 'Getting a reference solution for a={} from BIM data'.format(a)
   
    numRefDir = os.path.join(os.environ['HOME'], 'work/soliton/fullPotentialSolution')
    if not(os.path.exists(numRefDir)):
        sys.exit('Numerical reference directory does not exist: '+numRefDir)

    x_c = x_c - solitonVelBIM[a]*t0 - x0
    N=200
    line = (np.ones(N)*x_c, np.linspace(-1, a, N))
    
    u, ext = postprocess.readGphov(os.path.join(numRefDir, str(a), 'u'))
    v, ext = postprocess.readGphov(os.path.join(numRefDir, str(a), 'v'))
    grid_x, grid_y = np.mgrid[ext[0]:ext[1]:u.shape[1]*1j, ext[2]:ext[3]:u.shape[0]*1j]
    
    u = u.transpose()
    v = v.transpose()
    
    ux_sampled = griddata((grid_x.flatten(), grid_y.flatten()), u.flatten(), line, method='linear', fill_value=0)
    uy_sampled = griddata((grid_x.flatten(), grid_y.flatten()), v.flatten(), line, method='linear', fill_value=0)

    return np.array(line).transpose(), np.array([ux_sampled, uy_sampled]).transpose()
Example #19
0
def interpolateData(binaryDataFile, sName):
    file = open(binaryDataFile, 'rb')
    if os.name == 'nt':
        rawTimeHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
        rawStressHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
        rawStrainHistory = numpy.array(pickle.load(file, encoding='latin1')).transpose()
    elif os.name == 'posix':
        rawTimeHistory = numpy.array(pickle.load(file)).transpose()
        rawStressHistory = numpy.array(pickle.load(file)).transpose()
        rawStrainHistory = numpy.array(pickle.load(file)).transpose()
    
    timeHistory = numpy.linspace(0, simulationTime, numberOfSteps+1)
    stressHistory = numpy.empty([3, numberOfSteps+1]);
    strainHistory = numpy.empty([3, numberOfSteps+1]);
    for i in range(3):
        stressHistory[i, :] = griddata(rawTimeHistory, rawStressHistory[i], timeHistory)
        strainHistory[i, :] = griddata(rawTimeHistory, rawStrainHistory[i], timeHistory)
    stressHistory = stressHistory.transpose()
    strainHistory = strainHistory.transpose()
            
    bundle = [timeHistory, stressHistory, strainHistory]
    bundleFileName = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'fittedHistory', sName+'_'+abaqusMaterial+'_fittedHistory.pkl')
    with open(bundleFileName, 'ab') as fittedFile:
        pickle.dump(bundle, fittedFile)           
       
    return bundle
def interp_exp_f(fname, out_dir):
    """ Used to interpolate data from experiment F. """
    print("  Beginning interpolation of " + fname)
    # The variables from the data
    print("    Reading data....")
    x, y, z_s, v_x, v_y, v_z = np.loadtxt(fname, unpack=True)

    #v_norm = np.sqrt(v_x**2 + v_y**2)
    res = 40 #int(fname.split(os.sep)[-1][5:8])
    
    # The given points
    x_pts = np.asarray(sorted(set(x)))
    y_pts = np.asarray(sorted(set(y)))
    points = (x,y)

    # The points we want
    x_out = np.arange(-50,50.0001,100.0/res)
    y_out = np.arange(-50,50.0001,100.0/res)
    out_points = [[i,j] for i in x_out for j in y_out]
    x_out = np.transpose(out_points)[0]
    y_out = np.transpose(out_points)[1]

    # Interpolate each list separately
    print("    Interpolating data....")
    z_s_i = interpolate.griddata(points, z_s, out_points)
    v_x_i = interpolate.griddata(points, v_x, out_points)
    v_y_i = interpolate.griddata(points, v_y, out_points)
    v_z_i = interpolate.griddata(points, v_z, out_points)
 
    out_file = os.path.join(out_dir, os.path.basename(fname).replace('.txt','_interp.txt'))
    print("    Writing data....")
    np.savetxt(out_file,np.transpose([x_out,y_out,z_s_i,v_x_i,v_y_i,v_z_i]))
def scipy_stuff():
  from scipy.interpolate import griddata
  from matplotlib import pylab
  import cPickle as pickle
  print "loading points"
  points, x_diff, y_diff = pickle.load(open("temp_data.pickle", "rb"))

  y_pts, x_pts = zip(*points)

  print "Creating grid points"
  grid_points = []
  for j in range(2500):
    for i in range(2500):
      grid_points.append((j, i))

  print "Gridding data"
  x_grid = griddata(points, x_diff, grid_points)
  y_grid = griddata(points, y_diff, grid_points)
  x_grid.shape = (2500, 2500)
  y_grid.shape = (2500, 2500)

  print "Plotting"
  pylab.subplot(3, 1, 1)
  pylab.imshow(x_grid)
  pylab.subplot(3, 1, 2)
  pylab.imshow(y_grid)
  pylab.subplot(3, 1, 3)
  pylab.scatter(x_pts, y_pts)
  pylab.show()
def read_movie_data(filename):
    filename = "OUTPUT_FILES/" + filename
    x, y, vx = numpy.loadtxt(filename + '.E.xyz', usecols=(0, 1, 2), unpack=True)
    z = numpy.zeros(len(x))
    x, y, vy = numpy.loadtxt(filename + '.N.xyz', usecols=(0, 1, 2), unpack=True)
    x, y, vz = numpy.loadtxt(filename + '.Z.xyz', usecols=(0, 1, 2), unpack=True)
    max_x = numpy.amax(x)
    min_x = numpy.amin(x)
    num_pixels = 1000
    step = (max_x - min_x) / num_pixels
    xs = numpy.arange(min(x), max(x), step)
    ys = numpy.arange(min(y), max(y), step)

    X, Y = numpy.meshgrid(xs, ys)

    vxs = griddata((x, y), vx, (X, Y), method='linear')
    vys = griddata((x, y), vy, (X, Y), method='linear')
    vzs = griddata((x, y), vz, (X, Y), method='linear')
    zs = griddata((x, y), z, (X, Y), method='linear')

    pgv = numpy.maximum(numpy.abs(vxs), numpy.abs(vys))
    pgv = numpy.maximum(pgv, numpy.abs(vzs))
    pgv.shape = vxs.shape
    ext = compute_extreme_val(vx, vy, vz)
    gc.collect()
    return X, Y, zs, vxs, vys, vzs, pgv, step, ext
Example #23
0
def question6b():
    fname = 'report/Figures/q6.pdf'
#    fname = tempf
    pp = PdfPages(fname)

    plt.figure(figsize = (8,6))

    w = np.sqrt(U[0,:,:]**2 + U[1,:,:]**2)
    xlocs = [1.25, 1.5, 2.0, 3.0, 5.0]
    dx_init = 5.0e-6
    nbp = 150
    yend = 1

    xloc = -10.0
    ystart = 0.0
    xn = xloc * np.ones([nbp * 2 + 1])
    yn = np.empty([nbp * 2 + 1])
    base = ((yend - ystart) / dx_init) ** (1.0/(nbp-1))
    yn[nbp] = 0.
    for j in range(nbp):
        yn[nbp - j - 1] = -(ystart + dx_init * base**j)
        yn[nbp + j + 1] = ystart + dx_init * base**j

    wslice = inter.griddata((x.flat,
                            y.flat),
                            w.flat,
                            (xn, yn),
                            method='nearest')
    for (xi, xloc) in enumerate(xlocs):
        ystart = 0.0
        xn = xloc * np.ones([nbp * 2])
        yn = np.empty([nbp * 2])
        base = ((yend - ystart) / dx_init) ** (1.0/(nbp-1))
        for j in range(nbp):
            yn[j] = -(ystart + dx_init * base**j)
            yn[nbp + j] = ystart + dx_init * base**j

        wslice = inter.griddata((x.flat,
                                y.flat),
                                w.flat,
                                (xn, yn),
                                method='linear')
        plt.plot(wslice[0:2*nbp], yn[0:2*nbp], '-',
                ms=2, label = 'x = %3.2f' %xloc)

    plt.legend(loc=4,prop={'size':6})

    plt.xlabel(r'Velocity')
    plt.ylabel(r'$y$')
    plt.ylim([-1,1])

    plt.title('Momentum Deficit')

    plt.tight_layout()
    pp.savefig(bbx_inches='tight')

    pp.close()

    return
Example #24
0
def project_bitmap(m, f, args=None, kwargs=None, n_img_pix=(800,400), for_contour=False,
                   healpy=False):
    """
    """
    if args is None:
        args = ()

    if kwargs is None:
        kwargs = {}

    if type(n_img_pix) == int:
        n_img_pix = (n_img_pix, n_img_pix)

    l, b = np.meshgrid(np.linspace(-180,180,1000),np.linspace(-90,90,1000))
    x,y = m(l,b)

    xmin, xmax = np.min(x[x < 1e30]), np.max(x[x < 1e30])
    ymin, ymax = np.min(y[y < 1e30]), np.max(y[y < 1e30])
    
    xran = xmax - xmin
    yran = ymax - ymin

    dx = xran / n_img_pix[0]
    dy = yran / n_img_pix[1]
    
    x0, y0 = np.meshgrid(np.linspace(xmin - 0.05 * xran, xmax + 0.05 * xran, n_img_pix[0]),
                         np.linspace(ymin - 0.05 * yran, ymax + 0.05 * yran, n_img_pix[1]))

    l0, b0 = m(x0, y0, inverse=True)
    x1, y1 = m(l0, b0)
    mask = (((x0 - x1) ** 2 + (y0 - y1) ** 2) < 1).flatten()
    #mask = (((x0 - x1) ** 2 + (y0 - y1) ** 2) < 1e30).flatten()

    if not healpy:
        xg, yg = np.meshgrid(np.linspace(x0[0,0] - dx / 2, x0[-1,-1] + dx / 2,
                                         n_img_pix[0] + 1),
                             np.linspace(y0[0,0] - dy / 2, y0[-1,-1] + dy / 2, 
                                         n_img_pix[1] + 1))

        z = np.zeros(l0.shape).flatten()
        z[mask] = f(l0.flatten()[mask], b0.flatten()[mask], *args, **kwargs)
        z[~mask] = np.NaN

    if not for_contour:
        zg = z.reshape((n_img_pix[1], n_img_pix[0]))
        zgm = np.ma.array(zg, mask=np.isnan(zg))
        return xg, yg, zgm
    else:
        if healpy:
            xg, yg = m(*healpy_grid(hp.npix2nside(len(args[0])), nest=kwargs))
            zg = griddata((xg, yg), args[0], (x0, y0), method='linear')
            zgm = np.ma.array(zg, mask=~mask.reshape((n_img_pix[1], n_img_pix[0])))
            return x0, y0, zgm
        else:
            zg = griddata((x0.flatten()[mask], y0.flatten()[mask]), z[mask], 
                          (x0, y0), method='cubic')
            zgm = np.ma.array(zg, mask=~mask.reshape((n_img_pix[1], n_img_pix[0])))
            return x0, y0, zgm
	def interpolation(self,x0,x1, n = 100):
		"""Interpolate eta and phi along a line from x0 to x1"""
		X = linspace(x0[0],x1[0],n) 	# Initialize x points
		Y = linspace(x0[1],x1[1],n) 	# Initialize y points

		interpolatedeta = griddata(zip(self.x, self.y), self.eta, (X, Y), method='linear')	# Interpolate eta
		interpolatedphi = griddata(zip(self.x, self.y), self.phi, (X, Y), method='linear') 	# Interpolate phi

		return [X, Y, interpolatedeta, interpolatedphi]		# return X, Y and interpolated data
Example #26
0
    def test_fill_value(self):
        x = [(0,0), (0,1), (1,0)]
        y = [1, 2, 3]

        yi = griddata(x, y, [(1,1), (1,2), (0,0)], fill_value=-1)
        assert_array_equal(yi, [-1., -1, 1])

        yi = griddata(x, y, [(1,1), (1,2), (0,0)])
        assert_array_equal(yi, [np.nan, np.nan, 1])
Example #27
0
 def eval_points(self, *points, **kwds):
     '''
     Interpolate data at points
     
     Parameters
     ----------
     points :  ndarray of float, shape (..., ndim)
         Points where to interpolate data at.
           method : {'linear', 'nearest', 'cubic'}
     method : {'linear', 'nearest', 'cubic'}
         Method of interpolation. One of
         - ``nearest``: return the value at the data point closest to
           the point of interpolation.  
         - ``linear``: tesselate the input point set to n-dimensional
           simplices, and interpolate linearly on each simplex.  
         - ``cubic`` (1-D): return the value detemined from a cubic
           spline.
         - ``cubic`` (2-D): return the value determined from a
           piecewise cubic, continuously differentiable (C1), and
           approximately curvature-minimizing polynomial surface.
     fill_value : float, optional
         Value used to fill in for requested points outside of the
         convex hull of the input points.  If not provided, then the
         default is ``nan``. This option has no effect for the
         'nearest' method.
         
     Examples
     --------
     >>> import numpy as np
     >>> x = np.arange(-2, 2, 0.4)
     >>> xi = np.arange(-2, 2, 0.1)
 
     >>> d = PlotData(np.sin(x), x, xlab='x', ylab='sin', title='sinus', plot_args=['r.'])
     >>> di = PlotData(d.eval_points(xi), xi)
     >>> hi = di.plot()
     >>> h = d.plot()
     
     See also
     --------
     scipy.interpolate.griddata
     '''
     options = dict(method='linear')
     options.update(**kwds)
     if isinstance(self.args, (list, tuple)): # Multidimensional data
         ndim = len(self.args)
         if ndim < 2:
             msg = '''Unable to determine plotter-type, because len(self.args)<2.
             If the data is 1D, then self.args should be a vector!
             If the data is 2D, then length(self.args) should be 2.
             If the data is 3D, then length(self.args) should be 3.
             Unless you fix this, the interpolation will not work!'''
             warnings.warn(msg)
         else:
             xi = np.meshgrid(*self.args)
             return interpolate.griddata(xi, self.data.ravel(), points, **options)
     else: #One dimensional data
         return interpolate.griddata(self.args, self.data, points, **options)
Example #28
0
def plotdis(IBC, UG, nodes, nn, xmin, xmax, ymin, ymax, savefigs=False):
    """Plot the nodal displacement solution using `griddata()`

    Parameters
    ----------
    IBC : ndarray (int)
      IBC (Indicator of Boundary Conditions) indicates if the nodes
      has any type of boundary conditions applied to it.
    UG : ndarray (float)
      Array with the computed displacements.
    nodes : ndarray (float)
      Array with number and nodes coordinates:
        `number coordX coordY BCX BCY`
    nn : int
      Number of nodes.
    xmin : float
      Minimum x value for the grid.
    xmax : float
      Maximum x value for the grid.
    ymin : float
      Minimum y value for the grid.
    ymax : float
      Maximum y value for the grid.

    """
    points = nodes[:, 1:3]
    grid_x, grid_y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]

    UC = np.zeros([nn, 2], dtype=np.float)
    for i in range(nn):
        for j in range(2):
            kk = IBC[i, j]
            if kk == -1:
                UC[i, j] = 0.0
            else:
                UC[i, j] = UG[kk]

    grid_z0 = griddata(points, UC[:, 0], (grid_x, grid_y), method='linear')
    grid_z1 = griddata(points, UC[:, 1], (grid_x, grid_y), method='linear')

    plt.figure("Solution: Horizontal displacement")
    plt.imshow(grid_z0.T, aspect='equal', extent=(xmin, xmax, ymin, ymax),
               origin='lower')
    plt.title(r'$u_x$')
    plt.colorbar(orientation='vertical')
    plt.grid()
    if savefigs:
        plt.savefig('numhorizo.pdf')

    plt.figure("Solution: Vertical displacement")
    plt.imshow(grid_z1.T, aspect='equal', extent=(xmin, xmax, ymin, ymax),
               origin='lower')
    plt.title(r'$u_y$')
    plt.colorbar(orientation='vertical')
    plt.grid()
    if savefigs:
        plt.savefig('numvertic.pdf')
Example #29
0
def plot_experiment_data(file_name, axes_list, coil1_abs_array, coil1_angle_array,plot_figures=None):
    expt_data_data = num.loadtxt(file_name)
    expt_data_q95 = expt_data_data[:,5]
    expt_data_betan = expt_data_data[:,3]

    interp_points = num.ones((num.max(expt_data_q95.shape),2),dtype=float)
    interp_points[:,0] = expt_data_q95
    interp_points[:,1] = expt_data_betan

    existing_points = num.ones((num.max(q95_array.shape),2),dtype=float)
    existing_points[:,0] = q95_array
    existing_points[:,1] = Bn_array

    expt_data2_points_abs = griddata(existing_points,coil1_abs_array,interp_points,method='linear')
    expt_data2_points_angle = griddata(existing_points,coil1_angle_array,interp_points,method='linear')
    tmp1, tmp2 = expt_data_data.shape
    output_data = num.ones((tmp1,tmp2+2),dtype=float)
    output_data[:,0:tmp2] = expt_data_data
    output_data[:,tmp2] = expt_data2_points_abs
    output_data[:,tmp2+1] = expt_data2_points_angle
    num.savetxt('expt_data_output.txt',output_data,fmt='%.4f',delimiter = '    ')
    for ax in axes_list:
        ax.plot(expt_data_q95, expt_data_betan,'kx')
    if plot_figures ==None:
        pass
    else:
        fig_expt_data = pt.figure()
        ax1_expt_data = fig_expt_data.add_subplot(211)
        ax2_expt_data = fig_expt_data.add_subplot(212)
        ax1_expt_data.plot(expt_data_betan,expt_data2_points_abs,'o')
        ax2_expt_data.plot(expt_data_betan,expt_data2_points_angle,'o')
        ax1_expt_data.set_ylim(clim_list[iii])
        ax2_expt_data.set_ylim([-200,200])
        ax1_expt_data.set_title(start_title+ 'Magnitude'+extra_title)
        ax2_expt_data.set_title(start_title+ 'Phase' + extra_title)
        ax2_expt_data.set_xlabel(r'$\beta_N$')
        ax1_expt_data.set_ylabel('G/kA')
        ax2_expt_data.set_ylabel('deg')

        fig_expt_data.canvas.draw()
        fig_expt_data.show()
        fig_expt_data = pt.figure()
        ax1_expt_data = fig_expt_data.add_subplot(211)
        ax2_expt_data = fig_expt_data.add_subplot(212)
        ax1_expt_data.plot(expt_data_q95,expt_data2_points_abs,'o')
        ax2_expt_data.plot(expt_data_q95,expt_data2_points_angle,'o')
        ax1_expt_data.set_ylim(clim_list[iii])
        ax2_expt_data.set_ylim([-200,200])
        ax1_expt_data.set_title(start_title + 'Magnitude'+extra_title)
        ax2_expt_data.set_title(start_title + 'Phase' + extra_title)
        ax2_expt_data.set_xlabel('q95')
        ax1_expt_data.set_ylabel('G/kA')
        ax2_expt_data.set_ylabel('deg')

        fig_expt_data.canvas.draw()
        fig_expt_data.show()
Example #30
0
def plotstrain(EG, XS, xmin, xmax, ymin, ymax, savefigs=False):
    """Plot the strain solution over the full domain

    Using griddata plots the strain solution over the full
    domain defined by the integration points. The integration
    points physical coordinates are stored in XS[] while the
    strain solution is stored in EG[].

    Parameters
    ----------
    EG : ndarray (float)
      Array that contains the strain solution for each integration
      point in physical coordinates.
    XS : ndarray (float)
      Array with the coordinates of the integration points.
    xmin : float
      Minimum x value for the grid.
    xmax : float
      Maximum x value for the grid.
    ymin : float
      Minimum y value for the grid.
    ymax : float
      Maximum y value for the grid.

    """
    grid_x, grid_y = np.mgrid[xmin:xmax:20j, ymin:ymax:20j]
    grid_z0 = griddata(XS, EG[:, 0], (grid_x, grid_y), method='linear')
    grid_z1 = griddata(XS, EG[:, 1], (grid_x, grid_y), method='linear')
    grid_z2 = griddata(XS, EG[:, 2], (grid_x, grid_y), method='linear')

    plt.figure("Solution: epsilon-xx strain")
    plt.imshow(grid_z0.T, aspect='equal', extent=(xmin, xmax, ymin, ymax),
               origin='lower')
    plt.title(r'$\epsilon_{xx}$')
    plt.colorbar(orientation='vertical')
    plt.grid()
    if savefigs:
        plt.savefig('numepsixx.pdf')

    plt.figure("Solution: epsilon-yy strain")
    plt.imshow(grid_z1.T, aspect='equal', extent=(xmin, xmax, ymin, ymax),
               origin='lower')
    plt.title(r'$\epsilon_{yy}$')
    plt.colorbar(orientation='vertical')
    plt.grid()
    if savefigs:
        plt.savefig('numepsiyy.pdf')

    plt.figure("Solution: gamma-xy strain")
    plt.imshow(grid_z2.T, aspect='equal', extent=(xmin, xmax, ymin, ymax),
               origin='lower')
    plt.title(r'$\gamma_{xy}$')
    plt.colorbar(orientation='vertical')
    plt.grid()
    if savefigs:
        plt.savefig('numgamaxy.pdf')