Пример #1
0
 def kd_tree(me):
     if me._kd_tree is None:
         me._kd_tree = KDTree(me.coords)
     return me._kdtree
Пример #2
0
    def __init__(self, modelPointCloud, dataPointCloud, **kwargs):
        '''
        Supported Signatures
        modelPointCloud
            The model point cloud is the base to which the data point cloud will be matched
        dataPointCloud
            The data point cloud is transformed so that it matches the model point cloud

        Key Word Arguments:
        maxIterations
            maximum number of iterations to perform, default is 10
            TODO: in the future provide an option to also account for minimum acceptable error
        matchingMethod
            'kdtree'        Use a KD-Tree for nearest neighbor search {default}
            'bruteforce'    Use brute force for nearest neighbor search
        minimizeMethod
            'point'            Use point to point minimization {default}
            'plane'            Use point to plane minimization
        weightMethod
            function that takes indices into the modelPointCloud and returns the weight of those indices
            By default all points are weighted equally
        modelDownsampleFactor
            integer that represents uniform sampling of model point cloud
            1 is no resampling, 2 is every other point, 3 is every third point...
        dataDownsampleFactor
            integer that represents uniform sampling of model point cloud
            1 is no resampling, 2 is every other point, 3 is every third point...

        ICP Process is five steps
            1: Input Filter
            2: Match
            3: Outlier Filter
            4: Error Minimization
            5: Check if error is less than limits
                yes: we are don
                no: go back to step 2 with new transformation function
        '''
        self.startTime = time.time()

        if 'modelDownsampleFactor' in kwargs and int(kwargs['modelDownsampleFactor']) > 1:
            factor = int(kwargs['modelDownsampleFactor'])
            temp = numpy.zeros(factor, dtype=numpy.bool)
            temp[-1] = True
            modelDownSampleIndices = numpy.tile(temp, (modelPointCloud.shape[0] / factor) + 1)[:modelPointCloud.shape[0]]
        else:
            modelDownSampleIndices = numpy.ones(modelPointCloud.shape[0], dtype=numpy.bool)
        if 'dataDownsampleFactor' in kwargs and int(kwargs['dataDownsampleFactor']) > 1:
            factor = int(kwargs['dataDownsampleFactor'])
            temp = numpy.zeros(factor, dtype=numpy.bool)
            temp[-1] = True
            dataDownSampleIndices = numpy.tile(temp, (dataPointCloud.shape[0] / factor) + 1)[:dataPointCloud.shape[0]]
        else:
            dataDownSampleIndices = numpy.ones(dataPointCloud.shape[0], dtype=numpy.bool)

        # TODO: uniform sampling of point clouds
        self.q = modelPointCloud[modelDownSampleIndices]
        self.p = dataPointCloud[dataDownSampleIndices]
        self.matlab = None

        # get kwargs
        if 'maxIterations' in kwargs:
            self.K = int(kwargs['maxIterations'])
        else:
            self.K = 10
        if 'matchingMethod' in kwargs:
            if kwargs['matchingMethod'] == 'bruteforce':
                self.matching = self.matchingBruteForce
            else:
                self.matching = self.matchingKDTree
                self.qKDTree = KDTree(self.q)
        else:
            self.matching = self.matchingKDTree
            self.qKDTree = KDTree(self.q)

        if 'minimizeMethod' in kwargs:
            if kwargs['minimizeMethod'] == 'plane':  # point to plane
                self.minimize = self.minimizePlane
            elif kwargs['minimizeMethod'] == 'fmincon':
                self.minimize = self.minimizeMatlab
                self.matlab = MatlabFmincon()
            elif kwargs['minimizeMethod'] == 'custom':
                self.minimize = self.minimizeCustom
            else:  # point to point
                self.minimize = self.minimizePoint
        else:
            self.minimize = self.minimizePoint

        if 'weightMethod' in kwargs:
            self.weightMethod = kwargs['weightMethod']
        else:
            self.weightMethod = self.weightEqual

        # initialize translation and rotation matrix
        self.transformMatrix = numpy.matrix(numpy.identity(4))
        # initialize list of translations and rotation matrix for each iteration of ICP
        self.totalTransformMatrix = [numpy.matrix(numpy.identity(4))]

        self.pt = self.p.copy()  # transformed point cloud
        self.t = []  # array of times for each iteration of ICP
        self.err = []  # error for each iteration of ICP
        self.Np = self.p.shape[0]  # number of points in data cloud

        # preprocessing finish, log time
        self.t.append(time.time() - self.startTime)
        print 'Time for preprocessing:', self.t[-1]

if __name__ == "__main__":
    ddir = '/Users/mireland/data/veloce/180919/ccd_3/'
    files = glob.glob(ddir + '*3035[678].fits')
    print("Finding a median LFC frame from 19 Sep...")
    medframe1 = get_median_frame(files)

    badpix = default_badpix()
    if os.path.isfile('peaks.txt'):
        peaks = np.loadtxt('peaks.txt', dtype=np.int16)
    else:
        #plt.imshow( (1-badpix)*medframe, aspect='auto', vmin=0, vmax=50)
        brightpix = np.where((1 - badpix) * medframe1 > 100)
        brightpix = np.array(brightpix).T
        peak_tree = KDTree(brightpix)
        npts = brightpix.shape[0]
        peaks = []
        print("Iterating over bright pixels...")
        npeaks = 0
        for pt in brightpix:
            neighbors = peak_tree.query_ball_point(pt, PEAK_SEARCH_RAD)
            fluxes = [medframe1[tuple(brightpix[n])] for n in neighbors]
            candidate = list(brightpix[neighbors[np.argmax(fluxes)]])
            if candidate not in peaks:
                peaks.append(candidate)
                npeaks += 1
                if npeaks % 100 == 0:
                    print("found {:d} peaks...".format(npeaks))

        peaks = np.array(peaks)
Пример #4
0
 def _rebuild_kdtree(self):
     self.kdtree = KDTree([ind.bd for ind in self.grid.values()])
Пример #5
0
    def __init__(self, model, T=800 * 3600 * 24, NS=14, NT=800 * 24):
        self.model = model
        self.mesh = model.space_mesh(n=NS)
        self.timeline = model.time_mesh(T=T, n=NT)

        self.GD = model.GD
        if self.GD == 2:
            self.vspace = RaviartThomasFiniteElementSpace2d(self.mesh,
                                                            p=0)  # 速度空间
        elif self.GD == 3:
            self.vspace = RaviartThomasFiniteElementSpace3d(self.mesh, p=0)

        self.pspace = self.vspace.smspace  # 压强和饱和度所属的空间, 分片常数
        self.cspace = LagrangeFiniteElementSpace(self.mesh, p=1)  # 位移空间

        # 上一时刻物理量的值
        self.v = self.vspace.function()  # 速度函数
        self.p = self.pspace.function()  # 压强函数
        self.s = self.pspace.function()  # 水的饱和度函数 默认为0, 初始时刻区域内水的饱和度为0
        self.u = self.cspace.function(dim=self.GD)  # 位移函数
        self.phi = self.pspace.function()  # 孔隙度函数, 分片常数

        # 当前时刻物理量的值, 用于保存临时计算出的值, 模型中系数的计算由当前时刻
        # 的物理量的值决定
        self.cv = self.vspace.function()  # 速度函数
        self.cp = self.pspace.function()  # 压强函数
        self.cs = self.pspace.function()  # 水的饱和度函数 默认为0, 初始时刻区域内水的饱和度为0
        self.cu = self.cspace.function(dim=self.GD)  # 位移函数
        self.cphi = self.pspace.function()  # 孔隙度函数, 分片常数

        self.isFCell = model.is_fracture_cell(self.mesh)
        # 初值
        self.p[:] = model.rock['initial pressure']  # MPa

        self.phi[~self.isFCell] = model.rock['porosity']  # 初始孔隙度
        self.phi[self.isFCell] = model.fracture['porosity']  # 初始孔隙度

        self.cp[:] = self.p  # 初始地层压强
        self.cphi[:] = self.phi  # 当前孔隙度系数

        # 源项,  TODO: 注意这里假设用的是结构网格, 换其它的网格需要修改代码

        node = self.mesh.entity('node')
        tree = KDTree(node)

        _, loc0 = tree.query(self.model.p0)
        _, loc1 = tree.query(self.model.p1)
        print(loc0)
        print(loc1)

        self.fo = self.cspace.function()
        self.fo[825] = -self.model.oil['production rate']  # 产出

        self.fw = self.cspace.function()
        self.fw[loc1] = self.model.water['injection rate']  # 注入

        # 一些常数矩阵和向量

        # 速度散度矩阵, 速度方程对应的散度矩阵, (\nabla\cdot v, w)
        self.B = self.vspace.div_matrix()

        # 压强方程对应的位移散度矩阵, (\nabla\cdot u, w) 位移散度矩阵
        # * 注意这里利用了压强空间分片常数, 线性函数导数也是分片常数的事实
        c = self.mesh.entity_measure('cell')
        c *= self.model.rock['biot']

        val = self.mesh.grad_lambda()  # (NC, TD+1, GD)
        val *= c[:, None, None]
        pc2d = self.pspace.cell_to_dof()
        cc2d = self.cspace.cell_to_dof()
        pgdof = self.pspace.number_of_global_dofs()
        cgdof = self.cspace.number_of_global_dofs()
        I = np.broadcast_to(pc2d, shape=cc2d.shape)
        J = cc2d
        self.PU0 = csr_matrix((val[..., 0].flat, (I.flat, J.flat)),
                              shape=(pgdof, cgdof))
        self.PU1 = csr_matrix((val[..., 1].flat, (I.flat, J.flat)),
                              shape=(pgdof, cgdof))

        if self.GD == 3:
            self.PU2 = csr_matrix((val[..., 2].flat, (I.flat, J.flat)),
                                  shape=(pgdof, cgdof))

        # 线弹性矩阵的右端向量
        sigma0 = self.pspace.function()
        sigma0[:] = self.model.rock['initial stress']
        self.FU = np.zeros(self.GD * cgdof, dtype=np.float64)
        self.FU[0 * cgdof:1 * cgdof] -= self.p @ self.PU0
        self.FU[1 * cgdof:2 * cgdof] -= self.p @ self.PU1

        if self.GD == 3:
            self.FU[2 * cgdof:3 * cgdof] -= self.p @ self.PU2

        # 初始应力和等效应力项
        self.FU[0 * cgdof:1 * cgdof] -= sigma0 @ self.PU0
        self.FU[1 * cgdof:2 * cgdof] -= sigma0 @ self.PU1
        if self.GD == 3:
            self.FU[2 * cgdof:3 * cgdof] -= sigma0 @ self.PU2

        # vtk 文件输出
        node, cell, cellType, NC = self.mesh.to_vtk()
        self.points = vtk.vtkPoints()
        self.points.SetData(vnp.numpy_to_vtk(node))
        self.cells = vtk.vtkCellArray()
        self.cells.SetCells(NC, vnp.numpy_to_vtkIdTypeArray(cell))
        self.cellType = cellType
Пример #6
0
def interpolate(y,
                z,
                f,
                ynew,
                znew,
                yPeriodic=0.0,
                K=31,
                R=None,
                verbose=True):
    # find zeroth derivatives of f at every point in a point cloud
    # y,z,f are 1D arrays of the known field
    # ynew, znew are 1D arrays of the field to which we interpolate
    # if yPeriodic>0., points will be copied by that distance in y
    # K is the max number of neighbors. Will use fewest possible tho

    K = int(K)
    Nf = len(f)
    Nnew = len(ynew)

    if yPeriodic > 0.0:
        ycut = np.min(y) + (yPeriodic / 2.)
        sel0 = (y < ycut)
        sel1 = (y > ycut)

        y = np.append(np.append(y, y[sel0] + yPeriodic), y[sel1] - yPeriodic)
        z = np.append(np.append(z, z[sel0]), z[sel1])
        f = np.append(np.append(f, f[sel0]), f[sel1])

    tree = KDTree(zip(y, z))  # build the tree
    # query tree and get distances and integers of K points near every point:
    queries = tree.query(zip(ynew, znew), K)
    KInds = queries[
        1]  # the integers, distances are in [0]. matrix of Nf by K indices

    # get R
    if np.any(R == None):
        t0 = time.time()
        R = np.zeros((Nnew, 3, K))
        maxmaxK = 0
        for ip in range(
                Nnew):  # only over the points for which I wanna find the deriv
            I = KInds[ip, :]  # indeces of the K neighbor points
            A, W = getAnW_firstDerivs(y[I], z[I], ynew[ip], znew[ip])
            A, W, Keff = minimizeWeights(A, W, ynew[ip], znew[ip])
            maxmaxK = np.max([maxmaxK, Keff])
            R[ip, :, :] = scipy.matmul(
                scipy.matmul(
                    scipy.linalg.inv(scipy.matmul(scipy.matmul(A.T, W), A)),
                    A.T), W)
        t1 = time.time()
        if verbose:
            print(
                'meshlessFish.interpolate: Got R in %.8f seconds. Highest number of neighbors used anywhere: %i'
                % ((t1 - t0), maxmaxK))

    # assamble the known part of the system
    known = np.zeros((Nnew, K, 1))
    for ip in range(
            Nnew):  # only over the points for which I wanna find the deriv
        I = KInds[ip, :]  # indeces of the K neighbor points
        known[ip, :, 0] = f[I]

    t0 = time.time()
    out = np.matmul(
        R, known
    )  # elementise matrix multiplication. matrices are in the last dimensions
    fnew = np.squeeze(out[:, 0])

    return fnew
Пример #7
0
def solvePoisson(y,
                 z,
                 s,
                 dyzero,
                 dzzero,
                 yb,
                 zb,
                 fb,
                 f0=None,
                 yPeriodic=0.0,
                 K=31,
                 alpha=0.5,
                 TOL=1e-10,
                 maxit=1e6,
                 R=None,
                 verbose=True):
    '''
    d2fdy2 + d2fdz2 = s ... solve for f
    y,z,s are 1D arrays
    s is the RHS of the poisson eq. A constant source term
    
    Zero-gradient boundaries:
    dyzero and dzzero are bool arrays of f.shape,
    they are true where the respective gradient is to be zero and false elsewhere
    
    Dirichlet boundaries:
    yb,zb,fb are 1D arrays for fixed boundaries. the given fb will not change.
    Points must not be the exact same as some in y,z. 
    ie. all points in ([y, yb], [z, zb]) must be unique
    Otherwise the closest neighbor has no distance and we get nans.
    
    if yPeriodic>0., points will be copied by that distance in y
    
    f0 is an initial field, optional
    
    K is the max number of neighbors. Will use fewest possible tho so cranking 
    this up only slows down the computation without changing the solution.
    The actually used number maxmaxK is printed. next time around it would be 
    OK to set K to that number, which could save computational cost. The 
    algorithm might work for an even lower number but then the result would be 
    wrong. So always try with very high values and move down from there, not
    up from lower values of K until it works, that could give wrong results.
    
    alpha is the relaxation factor. Must be positive and probably best <1.0
    
    final residual < TOL or number of iterations < maxit
    
    Could pass R from a previous run on the same mesh to save time
    '''

    K = int(K)
    Nf = len(y)  # number of field points
    Nb = len(fb)  # number of boundary points
    if np.any(f0 == None):
        f = np.zeros(Nf)  # initial field values
    else:
        f = f0

    # append dirichlet boundary, then potentially mirror for periodic boundary in y
    # dirichlet boundary points dont have zero gradients and will not be solved for
    # so whatever value is in fb will stay like that
    y = np.append(y, yb)
    z = np.append(z, zb)
    f = np.append(f, fb)

    # create ghost points for periodic boundary if yPeriodic>0.
    ghostMap = np.arange(
        Nf + Nb, dtype=int)  # the first points are mapped to themselves
    if yPeriodic > 0.0:
        ycut = np.min(y) + (yPeriodic / 2.)
        sel0 = (y < ycut)
        sel1 = (y > ycut)

        y = np.append(np.append(y, y[sel0] + yPeriodic), y[sel1] - yPeriodic)
        z = np.append(np.append(z, z[sel0]), z[sel1])
        f = np.append(np.append(f, f[sel0]), f[sel1])
        ghostMap = np.append(np.append(ghostMap, ghostMap[sel0]),
                             ghostMap[sel1])
        # added points are mapped to original points that will be updated
        # so now f=f[ghostMap] updates the ghost values if f[:Nf] have changed

    t0 = time.time()
    tree = KDTree(zip(y, z))  # build the tree
    # query tree and get distances and integers of K points near every point:
    KDists, KInds = tree.query(zip(y, z), K)
    t1 = time.time()
    if verbose:
        print('Found neighbors in %.8f seconds.' % (t1 - t0))

    # get R
    if np.any(R == None):
        t0 = time.time()
        R = np.zeros((Nf, 6, K + 2))
        maxmaxK = 0
        nUsedNeighbors = np.zeros(Nf)
        for ip in range(
                Nf):  # only over the points for which I wanna find the deriv
            I = KInds[ip, :]  # indeces of the K neighbor points
            A, W = getAnW_poisson(y[I], z[I], y[ip], z[ip])
            if dyzero[ip]:  # zero-gradient BC
                A[1, 1] = 1.0  # gradient is whatever we put in the known-array
                A[0, 3] = 0.
                A[0, 4] = 0.  # we define a gradient instead of a rhs
            if dzzero[ip]:  # zero-gradient BC
                A[1, 2] = 1.0
                A[0, 3] = 0.
                A[0, 4] = 0.  # we define a gradient instead of a rhs
            A, W, Keff = minimizeWeights(
                A, W, y[ip], z[ip]
            )  # Keff includes the two constraints, not just neighbors!
            maxmaxK = np.max([maxmaxK, Keff])
            nUsedNeighbors[ip] = Keff - 2  # two constraints
            R[ip, :, :] = scipy.matmul(
                scipy.matmul(
                    scipy.linalg.inv(scipy.matmul(scipy.matmul(A.T, W), A)),
                    A.T), W)
        t1 = time.time()
        if verbose:
            print('Got R in %.8f seconds.' % (t1 - t0))
            print('Highest number of neighbors used anywhere: %i' %
                  np.max(nUsedNeighbors))
            print('Lowest  number of neighbors used anywhere: %i' %
                  np.min(nUsedNeighbors))
            print('Average number of neighbors used: %f' %
                  np.mean(nUsedNeighbors))

    # assemble the part of the known vectors that uses the constant source term
    knownConst = np.zeros((Nf, K + 2, 1))
    for ip in range(
            Nf):  # only over the points for which I wanna find the deriv
        I = KInds[ip, :]  # indeces of the K neighbor points
        if (not dyzero[ip]) and (
                not dzzero[ip]
        ):  # define RHS for this point instead of gradient BC
            knownConst[ip, 0, 0] = s[ip]
        knownConst[ip, 1, 0] = 0.0  # gradient in z

    t0 = time.time()
    f, res = iteratePoisson(f, knownConst, R, KInds, ghostMap, Nf, K, alpha,
                            maxit, TOL, verbose)
    t1 = time.time()

    if verbose:
        # solve the full system rather than just f:
        knownf = np.zeros(
            (Nf, K + 2,
             1))  # the part of the known vector that is updated as f changes
        for ip in range(
                Nf):  # only over the points for which I wanna find the deriv
            I = KInds[ip, :]  # indeces of the K neighbor points
            knownf[ip, 2:, 0] = f[I]
        out = np.matmul(
            R, knownf + knownConst
        )  # elementise matrix multiplication. matrices are in the last dimensions

        print('Calculated f in %.6f minutes' % ((t1 - t0) / 60.))
        print('Final residual = %.6e' % res)

        d2fdy2 = np.squeeze(out[:, 3])
        d2fdz2 = np.squeeze(out[:, 4])
        err = np.mean((d2fdy2 + d2fdz2 - s)**2.)**0.5
        print('Average error between LHS and RHS = %.6e' % err)

        if np.sum(dyzero) > 0:
            sel, = np.where(dyzero == True)
            print('Average dfdy=0 = %.6e' % np.mean(
                (np.squeeze(out[:, 1])[sel])**2.)**0.5)
        if np.sum(dzzero) > 0:
            sel, = np.where(dzzero == True)
            print('Average dfdz=0 = %.6e' % np.mean(
                (np.squeeze(out[:, 2])[sel])**2.)**0.5)

    return f[:Nf], R  # could also return gradients
Пример #8
0
 def update(self,new_bd):
     oldsize=len(self.all_bd)
     self.all_bd=self.all_bd + new_bd
     self.kdtree=KDTree(self.all_bd)
Пример #9
0
 def __init__(self, lbd, k=15):
     self.all_bd=lbd
     self.kdtree=KDTree(self.all_bd)
     self.k=k
Пример #10
0
def icp(a, b, initial=np.identity(4), 
        threshold=1e-5, max_iterations=20, **kwargs):

    """
    Apply the iterative closest point algorithm to align a point cloud with 
    another point cloud or mesh. Will only produce reasonable results if the
    initial transformation is roughly correct. Initial transformation can be
    found by applying Procrustes' analysis to a suitable set of landmark
    points (often picked manually).

    Parameters
    ----------
    a              : (n,3) float, list of points in space.
    b              : (m,3) float or Trimesh, list of points in space or mesh.
    initial        : (4,4) float, initial transformation.
    threshold      : float, stop when change in cost is less than threshold
    max_iterations : int, maximum number of iterations
    kwargs         : dict, args to pass to procrustes
    
    Returns
    ----------
    matrix      : (4,4) float, the transformation matrix sending a to b
    transformed : (n,3) float, the image of a under the transformation
    cost        : float, the cost of the transformation
    """
    
    a = np.asanyarray(a, dtype=np.float64)
    if not util.is_shape(a, (-1, 3)):
        raise ValueError('points must be (n,3)!')
                
    is_mesh = isinstance(b, base.Trimesh)
    if not is_mesh:
        b = np.asanyarray(b, dtype=np.float64)
        if not util.is_shape(b, (-1, 3)):
            raise ValueError('points must be (n,3)!')
        btree = KDTree(b)

    # Transform a under initial_transformation
    a = transform_points(a, initial) 
    total_matrix = initial
    
    n_iteration = 0
    old_cost = np.inf
    while n_iteration < max_iterations:
        n_iteration += 1
        
        # Closest point in b to each point in a
        if is_mesh:
            closest, distance, faces = b.nearest.on_surface(a)
        else:
            distances, ix = btree.query(a, 1)
            closest = b[ix]
        
        # Align a with closest points
        matrix, transformed, cost = procrustes(a, closest, **kwargs)

        # Update a 
        a = transformed
        total_matrix =  np.dot(matrix, total_matrix)
        
        if old_cost - cost < threshold:
            break
        else:
            old_cost = cost
         
    return total_matrix, transformed, cost
            
        
        
Пример #11
0
    def get_evaluate_cache(self,
                           cache=None,
                           share_geometry=False,
                           verbose=False):
        """
        Get the evaluate cache for :func:`Variable.evaluate_at()
        <sfepy.discrete.variables.Variable.evaluate_at()>`.

        Parameters
        ----------
        cache : Struct instance, optional
            Optionally, use the provided instance to store the cache data.
        share_geometry : bool
            Set to True to indicate that all the evaluations will work on the
            same region. Certain data are then computed only for the first
            probe and cached.
        verbose : bool
            If False, reduce verbosity.

        Returns
        -------
        cache : Struct instance
            The evaluate cache.
        """
        import time

        try:
            from scipy.spatial import cKDTree as KDTree
        except ImportError:
            from scipy.spatial import KDTree

        from sfepy.discrete.fem.geometry_element import create_geometry_elements

        if cache is None:
            cache = Struct(name='evaluate_cache')

        tt = time.clock()
        if (cache.get('cmesh', None) is None) or not share_geometry:
            mesh = self.create_mesh(extra_nodes=False)
            cache.cmesh = cmesh = mesh.cmesh

            gels = create_geometry_elements()

            cmesh.set_local_entities(gels)
            cmesh.setup_entities()

            cache.centroids = cmesh.get_centroids(cmesh.tdim)

            if self.gel.name != '3_8':
                cache.normals0 = cmesh.get_facet_normals()
                cache.normals1 = None

            else:
                cache.normals0 = cmesh.get_facet_normals(0)
                cache.normals1 = cmesh.get_facet_normals(1)

        output('cmesh setup: %f s' % (time.clock() - tt), verbose=verbose)

        tt = time.clock()
        if (cache.get('kdtree', None) is None) or not share_geometry:
            cache.kdtree = KDTree(cmesh.coors)

        output('kdtree: %f s' % (time.clock() - tt), verbose=verbose)

        return cache
Пример #12
0
data_samples = int(np.shape(obs_data)[0] / num_pts)
wMET = []
hMET = []
subset_time = time_data[::num_pts]
for nSample in range(data_samples):
    subset_W = obs_W[nSample * num_pts:(nSample + 1) * num_pts]
    subset_H = obs_H[nSample * num_pts:(nSample + 1) * num_pts]
    aveVal_W = np.mean(subset_W)
    aveVal_H = np.mean(subset_H)
    wMET.append(aveVal_W)
    hMET.append(aveVal_H)

#get grid location of where CSU is
print('.....finding closest model location')
grid_coord_atm = np.array(list(zip(UTMy.ravel(), UTMx.ravel())))
gridTree = KDTree(grid_coord_atm)
METdist, METgrid_id = gridTree.query([rx.met_lcn[1], rx.met_lcn[0]])
METidx = np.unravel_index(METgrid_id, np.shape(UTMx))

#get windspeed and height vector at the micromet tower location location
wrf_W = w[:, :, METidx[0], METidx[1]]
z_vector = np.mean(z[:, :, METidx[0], METidx[1]], 0)

#create timeseries of WRF wind averaged to the same interval
wWRF = []
for nSample in range(rx.run_min):
    set_len = int(ave_int_W / rx.hist_int)
    subset = wrf_W[nSample * set_len:(nSample + 1) * set_len, (0, 1)]
    aveSet = np.mean(subset, 0)
    wWRF.append(aveSet)
				vectors.append(vec)
			vocab.append(word)
	return vectors, vocab, word_vector_dim

vec,vocab,dim = read_vector_file(vec_file)
vocab_index=dict()
for i in xrange(0,len(vocab)):
	vocab_index[vocab[i]]=i
num_users = len(vocab)
print "num users in train sequences", num_users
# print "users removed from vocab", len(set(users_train)-set(vocab))
# print "users in test sequences but not in vocab", len(users_test-set(vocab))

# building kd-tree
tic = time.clock()
kd = KDTree(vec, leafsize=10)
toc = time.clock()
print "kdtree tree built in", (toc-tic)*1000

def get_Nranked_list_kdtree(query_set,N):
	try:
		query_set_ind = [ vocab_index[query] for query in query_set ]
	except KeyError:
		print "query word not present"
		return
	query_vec = [vec[i] for i in query_set_ind]
	#?use distance_upper_bound for set_size queries sequentially
	#?N+1 wrong as N unique elements may not be there after merging lists from which query_set_index have been removed
	d_list,knn_list = kd.query(query_vec,k=N+len(query_set_ind)) #, eps=eps)
	#?use heap of size set_size and push top elements from set_size ranked list until N elements are popped
	index_dist_list = []
Пример #14
0
def doAll(tiPath, tiFile, oiPath, oiFile, oiImgPath, oiImgFile, savePath,
          origImgName, templateImgName):

    imgW = 800
    imgH = 800
    #imgW=2400
    #imgH=2400
    oiMatch = BlindMatch(imgW, imgH)
    tiMatch = BlindMatch(imgW, imgH)

    oiData = np.loadtxt("%s/%s" % (oiPath, oiFile))
    tiData = np.loadtxt("%s/%s" % (tiPath, tiFile))
    print("oiData=%d" % (oiData.shape[0]))
    print("tiData=%d" % (tiData.shape[0]))

    tiXY, mchIdxsTi = tiMatch.createBlindMatchFeatures(tiData)
    oiXY, mchIdxsOi = oiMatch.createBlindMatchFeatures(oiData)
    #print("mchIdxsTi=%d"%(len(mchIdxsTi)))
    #print("mchIdxsOi=%d"%(len(mchIdxsOi)))
    ''' 
    tpath = "data/tipos%d.reg"%(len(tiXY))
    print(tpath)
    tiMatch.saveReg(tiXY, tpath, radius=18, width=3, color='red')
    tpath = "data/oipos%d.reg"%(len(oiXY))
    print(tpath)
    tiMatch.saveReg(oiXY, tpath, radius=18, width=3, color='red')
    
    tfeature = []
    for td1 in mchIdxsTi:
        for td2 in td1:
            tfeature.append([td2[0],td2[1]])
    tpath = "data/tifeature%d.reg"%(len(tfeature))
    print(tpath)
    tiMatch.saveReg(tfeature, tpath, radius=12, width=3, color='green')
    
    tfeature = []
    for td1 in mchIdxsOi:
        for td2 in td1:
            tfeature.append([td2[0],td2[1]])
    tpath = "data/oifeature%d.reg"%(len(tfeature))
    print(tpath)
    tiMatch.saveReg(tfeature, tpath, radius=12, width=3, color='green')
    '''
    if len(tiXY) == 0:
        #print("%s create feature failure"%(tiFile))
        return (-1, )
    elif len(oiXY) == 0:
        #print("%s create feature failure"%(oiFile))
        return (-1, )
    else:
        tarray = np.array(mchIdxsTi)
        tDist = tarray[:, :, 2]
        tiTree = KDTree(tDist)

        totalMatchNum = 0
        mchList = []
        for i, oIdx in enumerate(mchIdxsOi):
            td = oIdx[:, 2]
            mchIdx = tiTree.query_ball_point(td, 30)  #kdTree match

            if len(mchIdx) > 0:
                for ii, tidx0 in enumerate(mchIdx):
                    tdata00 = tarray[tidx0]
                    dm, isMchOk = oiMatch.blindDistMatch(oIdx, tdata00, 1,
                                                         4)  #blind match 8
                    if isMchOk:
                        #print("query %d KDTree match %d, precisely match %dth with %d point"%(i, len(mchIdx), ii, len(dm)))
                        #print(dm)
                        omIdx = dm[:, 0]
                        tmIdx = dm[:, 1]
                        oxy01 = oiXY[i]
                        txy02 = tiXY[tidx0]
                        totalMatchNum += 1

                        opos = omIdx[:, 0:2]
                        tpos = tmIdx[:, 0:2]
                        oxy1 = np.concatenate([opos, [oxy01]])
                        txy1 = np.concatenate([tpos, [txy02]])
                        mchList.append((oxy1, txy1))
                        '''     
                        ox1 = omIdx[:,0]
                        oy1 = omIdx[:,1]
                        tx2 = tmIdx[:,0]
                        ty2 = tmIdx[:,1]
                        oiMatch.plotBlindMatch(oxy01[0],oxy01[1],ox1,oy1,txy02[0],txy02[1],tx2,ty2)
                        '''

                        break

        if len(mchList) > 1:
            print("total Match key points %d" % (totalMatchNum))
            starOiTi, xshift, yshift, xrotation, yrotation, blindStarNum = tiMatch.posTransPolynomial(
                mchList, oiData, 2)  # posTransPolynomial posTransPerspective
            print(xshift, yshift, xrotation, yrotation, blindStarNum)
            mchRadius = 4
            crossMatch = CrossMatch(imgW, imgH)
            crossMatch.createRegionIdx(tiData)
            mchPosPairs, orgPosIdxs = crossMatch.xyMatch(starOiTi, mchRadius)
            oiDataMch = oiData[orgPosIdxs]
            '''  '''
            oiMchPos = oiDataMch[:, 0:2]
            tiMchPos = mchPosPairs[:, 2:4]
            starOiTiPly2 = tiMatch.posTransPolynomial2(oiMchPos, tiMchPos,
                                                       oiData, oiImgFile,
                                                       oiImgPath, savePath,
                                                       origImgName,
                                                       templateImgName, 3)

            mchPosPairs, orgPosIdxs = crossMatch.xyMatch(starOiTiPly2, 4)

            #print(mchPosPairs.shape)
            #print(mchPosPairs[:3])
            mchRatios2, oiPosJoin2,tiPosJoin2, mchData2, xshift2,yshift2, xrms2, yrms2 \
                = crossMatch.evaluateMatchResult(starOiTiPly2, tiData, mchPosPairs)
            #print("mchRatios2, oiPosJoin2,tiPosJoin2, mchData2, xshift2,yshift2, xrms2, yrms2")
            #print((mchRatios2, oiPosJoin2,tiPosJoin2, mchData2, xshift2,yshift2, xrms2, yrms2))

            return (totalMatchNum, xshift, yshift, xrotation, yrotation,
                    blindStarNum, mchRatios2)

        else:
            print("blindmatch: no feature point match")
            return (0, )
Пример #15
0
    def __init__(self, conf, context=None, **kwargs):
        from sfepy.discrete.state import State
        from sfepy.discrete import Problem
        from sfepy.base.conf import ProblemConf, get_standard_keywords
        from scipy.spatial import cKDTree as KDTree

        ScipyDirect.__init__(self, conf, context=context, **kwargs)

        # init subproblems
        problem = self.context
        pb_vars = problem.get_variables()
        # get "master" DofInfo and last index
        pb_adi_indx = problem.equations.variables.adi.indx
        self.adi_indx = pb_adi_indx.copy()
        last_indx = -1
        for ii in six.itervalues(self.adi_indx):
            last_indx = nm.max([last_indx, ii.stop])

        # coupling variables
        self.cvars_to_pb = {}
        for jj in conf.coupling_variables:
            self.cvars_to_pb[jj] = [None, None]
            if jj in pb_vars.names:
                if pb_vars[jj].dual_var_name is not None:
                    self.cvars_to_pb[jj][0] = -1

                else:
                    self.cvars_to_pb[jj][1] = -1

        # init subproblems
        self.subpb = []
        required, other = get_standard_keywords()
        master_prefix = output.get_output_prefix()
        for ii, ifname in enumerate(conf.others):
            sub_prefix = master_prefix[:-1] + '-sub%d:' % (ii + 1)
            output.set_output_prefix(sub_prefix)
            kwargs['master_problem'] = problem
            confi = ProblemConf.from_file(ifname,
                                          required,
                                          other,
                                          define_args=kwargs)
            pbi = Problem.from_conf(confi, init_equations=True)
            sti = State(pbi.equations.variables)
            pbi.equations.set_data(None, ignore_unknown=True)
            pbi.time_update()
            pbi.update_materials()
            sti.apply_ebc()
            pbi_vars = pbi.get_variables()
            output.set_output_prefix(master_prefix)
            self.subpb.append([pbi, sti, None])

            # append "slave" DofInfo
            for jj in pbi_vars.names:
                if not (pbi_vars[jj].is_state()):
                    continue

                didx = pbi.equations.variables.adi.indx[jj]
                ndof = didx.stop - didx.start
                if jj in self.adi_indx:
                    if ndof != \
                      (self.adi_indx[jj].stop - self.adi_indx[jj].start):
                        raise ValueError('DOFs do not match!')

                else:
                    self.adi_indx.update(
                        {jj: slice(last_indx, last_indx + ndof, None)})
                    last_indx += ndof

            for jj in conf.coupling_variables:
                if jj in pbi_vars.names:
                    if pbi_vars[jj].dual_var_name is not None:
                        self.cvars_to_pb[jj][0] = ii

                    else:
                        self.cvars_to_pb[jj][1] = ii

        self.subpb.append([problem, None, None])

        self.cvars_to_pb_map = {}
        for varname, pbs in six.iteritems(self.cvars_to_pb):
            # match field nodes
            coors = []
            for ii in pbs:
                pbi = self.subpb[ii][0]
                pbi_vars = pbi.get_variables()
                fcoors = pbi_vars[varname].field.coors
                dc = nm.abs(nm.max(fcoors, axis=0)\
                            - nm.min(fcoors, axis=0))
                ax = nm.where(dc > 1e-9)[0]
                coors.append(fcoors[:, ax])

            if len(coors[0]) != len(coors[1]):
                raise ValueError('number of nodes does not match!')

            kdtree = KDTree(coors[0])
            map_12 = kdtree.query(coors[1])[1]

            pbi1 = self.subpb[pbs[0]][0]
            pbi1_vars = pbi1.get_variables()
            eq_map_1 = pbi1_vars[varname].eq_map

            pbi2 = self.subpb[pbs[1]][0]
            pbi2_vars = pbi2.get_variables()
            eq_map_2 = pbi2_vars[varname].eq_map

            dpn = eq_map_2.dpn
            nnd = map_12.shape[0]

            map_12_nd = nm.zeros((nnd * dpn, ), dtype=nm.int32)
            if dpn > 1:
                for ii in range(dpn):
                    map_12_nd[ii::dpn] = map_12 * dpn + ii
            else:
                map_12_nd = map_12

            idx = nm.where(eq_map_2.eq >= 0)[0]
            self.cvars_to_pb_map[varname] = eq_map_1.eq[map_12[idx]]
Пример #16
0
 def setup_method(self):
     Test_small.setup_method(self)
     self.kdtree = KDTree(self.data, leafsize=1)
Пример #17
0
def getDistancesFromAtoB(a, b):
    # Quick nearest-neighbor lookup
    kdTree = KDTree(a, leafsize=100)
    return kdTree.query(b, k=1, eps=0, p=2)[0]
Пример #18
0
 def setup_method(self):
     self.data = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
                           [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
     self.kdtree = KDTree(self.data)
Пример #19
0
def getFirstDerivs(y, z, f, yPeriodic=0.0, K=31, R=None, verbose=True):
    # find first derivatives of f at every point in a point cloud
    # y,z,f are 1D arrays
    # if yPeriodic>0., points will be copied by that distance in y
    # K is the max number of neighbors. Will use fewest possible tho

    K = int(K)
    Nf = len(f)

    if yPeriodic > 0.0:
        ycut = np.min(y) + (yPeriodic / 2.)
        sel0 = (y < ycut)
        sel1 = (y > ycut)

        y = np.append(np.append(y, y[sel0] + yPeriodic), y[sel1] - yPeriodic)
        z = np.append(np.append(z, z[sel0]), z[sel1])
        f = np.append(np.append(f, f[sel0]), f[sel1])

#    plt.plot(y,z,'.g')

    tree = KDTree(zip(y, z))  # build the tree
    # query tree and get distances and integers of K points near every point:
    queries = tree.query(zip(y, z), K)
    KInds = queries[
        1]  # the integers, distances are in [0]. matrix of Nf by K indices

    # get R
    if np.any(R == None):
        t0 = time.time()
        R = np.zeros((Nf, 3, K))
        maxmaxK = 0
        for ip in range(
                Nf):  # only over the points for which I wanna find the deriv
            I = KInds[ip, :]  # indeces of the K neighbor points
            A, W = getAnW_firstDerivs(y[I], z[I], y[ip], z[ip])
            A, W, Keff = minimizeWeights(A, W, y[ip], z[ip])
            maxmaxK = np.max([maxmaxK, Keff])
            R[ip, :, :] = scipy.matmul(
                scipy.matmul(
                    scipy.linalg.inv(scipy.matmul(scipy.matmul(A.T, W), A)),
                    A.T), W)
        t1 = time.time()
        if verbose:
            print(
                'Got R in %.8f seconds. Highest number of neighbors used anywhere: %i'
                % ((t1 - t0), maxmaxK))
        # about maxmaxK: next time around it would be OK to set K to that
        # number, which could save computational cost. The algorithm might work
        # for an even lower number but then the result would be wrong. So
        # always try with very high values and move down from there, not
        # up from lower values of K until it works, that could give wrong results.

    # assamble the known part of the system
    known = np.zeros((Nf, K, 1))
    for ip in range(
            Nf):  # only over the points for which I wanna find the deriv
        I = KInds[ip, :]  # indeces of the K neighbor points
        known[ip, :, 0] = f[I]

    t0 = time.time()
    out = np.matmul(
        R, known
    )  # elementise matrix multiplication. matrices are in the last dimensions
    dfdy = np.squeeze(out[:, 1])
    dfdz = np.squeeze(out[:, 2])
    t1 = time.time()
    if verbose:
        print('Elementwise matmul in %.8f seconds' % (t1 - t0))


#    t0=time.time()
#    dfdy=np.zeros(Nf)
#    dfdz=np.zeros(Nf)
#    for ip in range(Nf): # only looping over the non-repeated points
#        # indeces of the K neighbor points
#        I=KInds[ip,:]
#
#        # if I want to find the derivatives, the known LHS is just the field values of the neighbors
#        known=f[I]
#
#        out=np.matmul(R[ip,:,:],known)
#        dfdy[ip]=out[1]
#        dfdz[ip]=out[2]
#    t1=time.time()
#    print('Looped matmul in %.8f seconds' %(t1-t0))

#    Got R in 5.39548397 seconds
#    Elementwise matmul in 0.00095487 seconds
#    Looped matmul in      0.02349186 seconds

    return dfdy, dfdz, R
Пример #20
0
 def setup_method(self):
     n = 50
     m = 2
     np.random.seed(1234)
     self.T1 = KDTree(np.random.randn(n, m), leafsize=2)
     self.T2 = KDTree(np.random.randn(n, m), leafsize=2)
Пример #21
0
def get_ref_coors(field,
                  coors,
                  strategy='kdtree',
                  close_limit=0.1,
                  cache=None,
                  verbose=True):
    """
    Get reference element coordinates and elements corresponding to given
    physical coordinates.

    Parameters
    ----------
    field : Field instance
        The field defining the approximation.
    coors : array
        The physical coordinates.
    strategy : str, optional
        The strategy for finding the elements that contain the
        coordinates. Only 'kdtree' is supported for the moment.
    close_limit : float, optional
        The maximum limit distance of a point from the closest
        element allowed for extrapolation.
    cache : Struct, optional
        To speed up a sequence of evaluations, the field mesh, the inverse
        connectivity of the field mesh and the KDTree instance can be cached as
        `cache.mesh`, `cache.offsets`, `cache.iconn` and
        `cache.kdtree`. Optionally, the cache can also contain the reference
        element coordinates as `cache.ref_coors`, `cache.cells` and
        `cache.status`, if the evaluation occurs in the same coordinates
        repeatedly. In that case the KDTree related data are ignored.
    verbose : bool
        If False, reduce verbosity.

    Returns
    -------
    ref_coors : array
        The reference coordinates.
    cells : array
        The cell indices corresponding to the reference coordinates.
    status : array
        The status: 0 is success, 1 is extrapolation within `close_limit`, 2 is
        extrapolation outside `close_limit`, 3 is failure.
    """
    ref_coors = get_default_attr(cache, 'ref_coors', None)
    if ref_coors is None:
        mesh = get_default_attr(cache, 'mesh', None)
        if mesh is None:
            mesh = field.create_mesh(extra_nodes=False)

        scoors = mesh.coors
        output('reference field: %d vertices' % scoors.shape[0],
               verbose=verbose)

        iconn = get_default_attr(cache, 'iconn', None)
        if iconn is None:
            offsets, iconn = make_inverse_connectivity(mesh.conns,
                                                       mesh.n_nod,
                                                       ret_offsets=True)

            ii = nm.where(offsets[1:] == offsets[:-1])[0]
            if len(ii):
                raise ValueError('some vertices not in any element! (%s)' % ii)

        else:
            offsets = cache.offsets

        if strategy == 'kdtree':
            kdtree = get_default_attr(cache, 'kdtree', None)
            if kdtree is None:
                from scipy.spatial import cKDTree as KDTree

                tt = time.clock()
                kdtree = KDTree(scoors)
                output('kdtree: %f s' % (time.clock() - tt), verbose=verbose)

            tt = time.clock()
            ics = kdtree.query(coors)[1]
            output('kdtree query: %f s' % (time.clock() - tt), verbose=verbose)

            tt = time.clock()
            ics = nm.asarray(ics, dtype=nm.int32)

            vertex_coorss, nodess, mtx_is = [], [], []
            conns = []
            for ig, ap in field.aps.iteritems():
                ps = ap.interp.gel.interp.poly_spaces['v']

                vertex_coorss.append(ps.geometry.coors)
                nodess.append(ps.nodes)
                mtx_is.append(ps.get_mtx_i())

                conns.append(mesh.conns[ig].copy())

            # Get reference element coordinates corresponding to
            # destination coordinates.
            ref_coors = nm.empty_like(coors)
            cells = nm.empty((coors.shape[0], 2), dtype=nm.int32)
            status = nm.empty((coors.shape[0], ), dtype=nm.int32)

            find_ref_coors(ref_coors, cells, status, coors, ics, offsets,
                           iconn, scoors, conns, vertex_coorss, nodess, mtx_is,
                           1, close_limit, 1e-15, 100, 1e-8)
            output('ref. coordinates: %f s' % (time.clock() - tt),
                   verbose=verbose)

        elif strategy == 'crawl':
            raise NotImplementedError

        else:
            raise ValueError('unknown search strategy! (%s)' % strategy)

    else:
        ref_coors = cache.ref_coors
        cells = cache.cells
        status = cache.status

    return ref_coors, cells, status
Пример #22
0
def test_query_pairs_single_node():
    tree = KDTree([[0, 1]])
    assert_equal(tree.query_pairs(0.5), set())
Пример #23
0
 def _rebuild_kdtree(self):
     self.kdtree = KDTree([ind.bd for ind in self.archive])
Пример #24
0
 def waypoints_cb(self, waypoints):
     self.base_waypoints = waypoints
     self.waypoints_tree = KDTree(
         [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y]
          for waypoint in waypoints.waypoints])
Пример #25
0
#Iterate through the point list and store the coordinates in a dictionary for future reference
for i in range(0, len(x_t)):
    xval = x_t[i]
    yval = y_t[i]
    final_coords_list.append((xval, yval))
    pointlist['p{}'.format(i)] = [xval, yval]

final_coords_list = np.array(final_coords_list)
x_coords, y_coords = zip(*final_coords_list)
plt.scatter(x_coords, y_coords)
plt.show()

start_s = time.time()
#Scipy KDTree implementation
kdtree = KDTree(final_coords_list)

end_s = time.time()

print(str(end_s - start_s) + " s")

#Input the position of the observer
print("Enter the current position X")
current_posx = int(input())
print("Enter the current position Y")
current_posy = int(input())
print("Press WASD to move or press E to exit\n")
while True:
    start_s = time.time()
    #Query point is the current location with a radius set to 10 unit distance. Change 10 to whatever is desired
    coords = kdtree.query_ball_point([current_posx, current_posy], r=10)
Пример #26
0
    def _align(self, tplt, eps, max_iter):

        # Configuration
        higher = 2001
        lower = 1
        step = 100
        transforms = []
        iters = []

        # Build TriMesh Source
        tplt_tri = TriMesh(tplt).trilist

        # Generate Edge List
        tplt_edge = tplt_tri[:, [0, 1]]
        tplt_edge = np.vstack((tplt_edge, tplt_tri[:, [0, 2]]))
        tplt_edge = np.vstack((tplt_edge, tplt_tri[:, [1, 2]]))
        tplt_edge = np.sort(tplt_edge)

        # Get Unique Edge List
        b = np.ascontiguousarray(tplt_edge).view(
            np.dtype((np.void, tplt_edge.dtype.itemsize * tplt_edge.shape[1])))
        _, idx = np.unique(b, return_index=True)
        tplt_edge = tplt_edge[idx]

        # init
        m = tplt_edge.shape[0]
        n = tplt.shape[0]

        # get node-arc incidence matrix
        M = np.zeros((m, n))
        M[range(m), tplt_edge[:, 0]] = -1
        M[range(m), tplt_edge[:, 1]] = 1

        # weight matrix
        G = np.identity(self.n_dims + 1)

        # build the kD-tree
        target_2d = self.target.points
        kdOBJ = KDTree(target_2d)

        # init tranformation
        prev_X = np.zeros((self.n_dims, self.n_dims + 1))
        prev_X = np.tile(prev_X, n).T
        tplt_i = tplt

        # start nicp
        # for each stiffness
        sf = np.logspace(lower, higher, num=step, base=1.005)[-1::-1]
        sf = [10**i for i in range(5, 0, -1)] + range(
            9, 1, -1) + [1.0 / (i + 1.0) for i in range(10)]
        print(sf)
        sf_kron = np.kron(M, G)
        errs = []

        for alpha in sf:
            # get the term for stiffness
            sf_term = alpha * sf_kron
            # iterate until X converge
            niters = 0
            while niters < max_iter:
                # find nearest neighbour
                _, match = kdOBJ.query(tplt_i)

                # formulate target and template data, and distance term
                U = target_2d[match, :]

                point_size = self.n_dims + 1
                D = np.zeros((n, n * point_size))
                for k in range(n):
                    D[k, k * point_size:k * point_size + 2] = tplt_i[k, :]
                    D[k, k * point_size + 2] = 1

                # % correspondence detection for setting weight
                # add distance term
                sA = np.vstack((sf_term, D))
                sB = np.vstack((np.zeros((sf_term.shape[0], self.n_dims)), U))
                sX = np.linalg.pinv(sA).dot(sB)

                # deform template
                tplt_i = D.dot(sX)
                err = np.linalg.norm(prev_X - sX, ord='fro')
                errs.append([alpha, err])
                prev_X = sX

                transforms.append(sX)
                iters.append(tplt_i)

                niters += 1

                if err / np.sqrt(np.size(prev_X)) < eps:
                    break

        # final result
        fit_2d = tplt_i
        _, point_corr = kdOBJ.query(fit_2d)
        return fit_2d, transforms, iters, point_corr
Пример #27
0
 def waypoints_cb(self, waypoints):
     self.base_waypoints = waypoints
     if not self.waypoints_2d:
         self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
         self.waypoint_tree = KDTree(self.waypoints_2d)
Пример #28
0
def nicp(source, target, eps=1e-3, us=101, ls=1, step=5, max_iter=100):
    r"""
    Deforms the source trimesh to align with to optimally the target.
    """
    n_dims = source.n_dims
    # Homogeneous dimension (1 extra for translation effects)
    h_dims = n_dims + 1
    points = source.points
    trilist = source.trilist

    # Configuration
    upper_stiffness = us
    lower_stiffness = ls
    stiffness_step = step

    # Get a sorted list of edge pairs (note there will be many mirrored pairs
    # e.g. [4, 7] and [7, 4])
    edge_pairs = np.sort(
        np.vstack((trilist[:, [0, 1]], trilist[:, [0, 2]], trilist[:,
                                                                   [1, 2]])))

    # We want to remove duplicates - this is a little hairy, but basically we
    # get a view on the array where each pair is considered by numpy to be
    # one item
    edge_pair_view = np.ascontiguousarray(edge_pairs).view(
        np.dtype((np.void, edge_pairs.dtype.itemsize * edge_pairs.shape[1])))
    # Now we can use this view to ask for only unique edges...
    unique_edge_index = np.unique(edge_pair_view, return_index=True)[1]
    # And use that to filter our original list down
    unique_edge_pairs = edge_pairs[unique_edge_index]

    # record the number of unique edges and the number of points
    n = points.shape[0]
    m = unique_edge_pairs.shape[0]

    # Generate a "node-arc" (i.e. vertex-edge) incidence matrix.
    row = np.hstack((np.arange(m), np.arange(m)))
    col = unique_edge_pairs.T.ravel()
    data = np.hstack((-1 * np.ones(m), np.ones(m)))
    M_s = sp.coo_matrix((data, (row, col)))

    # weight matrix
    G = np.identity(n_dims + 1)

    M_kron_G_s = sp.kron(M_s, G)

    # build the kD-tree
    # print('building KD-tree for target...')
    kdtree = KDTree(target.points)

    # init transformation
    X_prev = np.zeros((n_dims, n_dims + 1))
    X_prev = np.tile(X_prev, n).T
    v_i = points

    # start nicp
    # for each stiffness
    # stiffness = range(upper_stiffness, lower_stiffness, -stiffness_step)
    stiffness = np.logspace(lower_stiffness,
                            upper_stiffness,
                            num=stiffness_step,
                            base=1.005)[-1::-1]
    stiffness = [10**i for i in range(5, 0, -1)] + range(
        9, 1, -1) + [1.0 / (i + 1.0) for i in range(10)]
    errs = []

    # we need to prepare some indices for efficient construction of the D
    # sparse matrix.
    row = np.hstack((np.repeat(np.arange(n)[:, None], n_dims,
                               axis=1).ravel(), np.arange(n)))

    x = np.arange(n * h_dims).reshape((n, h_dims))
    col = np.hstack((x[:, :n_dims].ravel(), x[:, n_dims]))

    o = np.ones(n)
    iterations = [v_i]
    for alpha in stiffness:
        # print(alpha)
        # get the term for stiffness
        alpha_M_kron_G_s = alpha * M_kron_G_s

        # iterate until X converge
        iter = 0
        while iter < max_iter:
            # find nearest neighbour
            match = kdtree.query(v_i)[1]

            # formulate target and template data, and distance term
            U = target.points[match, :]

            data = np.hstack((v_i.ravel(), o))
            D_s = sp.coo_matrix((data, (row, col)))

            # correspondence detection for setting weight
            # add distance term
            A_s = sp.vstack((alpha_M_kron_G_s, D_s)).tocsr()
            B_s = sp.vstack((np.zeros(
                (alpha_M_kron_G_s.shape[0], n_dims)), U)).tocsr()
            X_s = spsolve(A_s.T.dot(A_s), A_s.T.dot(B_s))
            X = X_s.toarray()

            # deform template
            v_i = D_s.dot(X)
            err = np.linalg.norm(X_prev - X, ord='fro')
            errs.append([alpha, err])
            X_prev = X

            iter += 1

            if err / np.sqrt(np.size(X_prev)) < eps:
                iterations.append(v_i)
                break

    # final result
    point_corr = kdtree.query(v_i)[1]
    return (v_i, iterations), point_corr
Пример #29
0
        df.Fueltype != 'Natural Gas',
        df.Technology.replace('Steam Turbine', 'OCGT').fillna('OCGT')))))

    ppl_query = snakemake.config['electricity']['powerplants_filter']
    if isinstance(ppl_query, str):
        ppl.query(ppl_query, inplace=True)

    ppl = add_custom_powerplants(ppl)  # add carriers from own powerplant files

    cntries_without_ppl = [
        c for c in countries if c not in ppl.Country.unique()
    ]

    for c in countries:
        substation_i = n.buses.query('substation_lv and country == @c').index
        kdtree = KDTree(n.buses.loc[substation_i, ['x', 'y']].values)
        ppl_i = ppl.query('Country == @c').index

        tree_i = kdtree.query(ppl.loc[ppl_i, ['lon', 'lat']].values)[1]
        ppl.loc[ppl_i, 'bus'] = substation_i.append(pd.Index([np.nan]))[tree_i]

    if cntries_without_ppl:
        logging.warning(
            f"No powerplants known in: {', '.join(cntries_without_ppl)}")

    bus_null_b = ppl["bus"].isnull()
    if bus_null_b.any():
        logging.warning(
            f"Couldn't find close bus for {bus_null_b.sum()} powerplants")

    ppl.to_csv(snakemake.output[0])
            Country=lambda df: df.Country.map(lambda x: country_mapping[x]),
        ))

    ppl_query = snakemake.config["electricity"]["powerplants_filter"]
    if isinstance(ppl_query, str):
        ppl.query(ppl_query, inplace=True)

    ppl = add_custom_powerplants(ppl)  # add carriers from own powerplant files

    cntries_without_ppl = [
        c for c in countries_codes if c not in ppl.Country.unique()
    ]

    for c in countries_codes:
        substation_i = n.buses.query("substation_lv and country == @c").index
        kdtree = KDTree(n.buses.loc[substation_i, ["x", "y"]].values)
        ppl_i = ppl.query("Country == @c").index

        tree_i = kdtree.query(ppl.loc[ppl_i, ["lon", "lat"]].values)[1]
        ppl.loc[ppl_i, "bus"] = substation_i.append(pd.Index([np.nan]))[tree_i]

    if cntries_without_ppl:
        logging.warning(
            f"No powerplants known in: {', '.join(cntries_without_ppl)}")

    bus_null_b = ppl["bus"].isnull()
    if bus_null_b.any():
        logging.warning(
            f"Couldn't find close bus for {bus_null_b.sum()} powerplants")

    ppl.to_csv(snakemake.output.powerplants)