def radius_outlier_filter(input_cloud, r=1, search_eps=0, p=2, sd_cutoff=1):
    output_cloud = []
    print("Constructing kdtree")
    tree = kdtree.KDTree(input_cloud)
    other_tree = kdtree.KDTree(input_cloud)
    print("kdtree constructed")
    print("finding neighbors")
    # neighbor_list = []
    # for point in tqdm(input_cloud, total=len(input_cloud), desc="Finding Neighbors"):
    #     neighbor_list.append(tree.query_ball_point(point, r=r, eps=search_eps))

    start = time.time()
    neighbor_list = tree.query_ball_tree(other_tree, r=r, p=p, eps=search_eps)
    end = time.time() - start
    print("Finding neighbors took %s seconds" % end)

    lengths = [len(x) for x in neighbor_list]
    mean = np.mean(lengths)
    std = np.std(lengths)
    cutoff = mean - sd_cutoff * std
    for i in trange(len(neighbor_list), desc="Removing outliers"):
        if lengths[i] >= cutoff:
            output_cloud.append(input_cloud[i])

    return output_cloud
Esempio n. 2
0
    def __Debounce(self, xs, ys, radius=4):
        if len(xs) <= 1:
            return xs, ys

        kdt = kdtree.KDTree(scipy.array([xs, ys]).T)

        xsd = []
        ysd = []

        for xi, yi in zip(xs, ys):
            neigh = kdt.query_ball_point([xi, yi], radius)

            if len(neigh) > 1:
                Ii = self.filteredData[xi, yi]

                In = self.filteredData[xs[neigh].astype('i'),
                                       ys[neigh].astype('i')].max()

                if not Ii < In:
                    xsd.append(xi)
                    ysd.append(yi)

            else:
                xsd.append(xi)
                ysd.append(yi)

        return xsd, ysd
    def smooth(self, x):
        """
        Apply K Nearest Neighbors density estimator over a grid.

        Arguments:

            1. x: 2D dataset to be smoothed. It is assumed that the rows of
                  the data matrix are the sample points.

        Returns:

            1. Smoothed function over the specified domain.

        Example:

            TODO: Write sample code...
        """
        domx, domy = self._construct_domain()
        dom = np.vstack((domx.ravel(), domy.ravel())).T

        # construct KD tree on data
        tree = kd.KDTree(x)

        # get k^{th} nearest neighbors to each point in the domain
        dist = tree.query(dom, k=self.k, p=2)[0]
        dist_knn = dist[:, self.k - 1].reshape(self.domy_params[2],
                                               self.domx_params[2])
        dist_knn = np.divide(self.k / (x.shape[0] * np.pi), dist_knn**2)

        # KNN density estimator
        return (dist_knn)
Esempio n. 4
0
    def iteration(self):
        log.info('clusters initialised ...')
        # initialise clusters
        self.initialise_clusters()

        log.info('clusters moved briefly ...')
        # move clusters based on gradiant magnitude value
        self.initial_move_clusters()

        log.info('main process started ...')
        for i in trange(self.max_iteration):
            # assign pixels to closest cluster based on total distance value
            self.assign_pixels_to_clusters()

            # update cluster centers based on new pixel to cluster array
            self.update_cluster_centers()

        log.info('main process done ...')

        if self.post_process:
            log.info('connected components started ...')

            # calculate kd_tree data
            centers = [[cluster.w, cluster.h] for cluster in self.clusters]
            self.kd_tree = kd.KDTree(centers)
            for i in trange(1):
                self.connected_component_post_process()

            log.warning('connected components done ...')

        # compute segmented image
        self.compute_segmented_image()
Esempio n. 5
0
def fitPts(pts, NFits = 0):
    xm, ym, zm = pts.min(0)
    xmx, ymx, zmx = pts.max(0)
    X, Y, Z = np.mgrid[xm:xmx:5, ym:ymx:5, zm:zmx:5]
    im = np.zeros(X.shape)
    
    kdt = kdtree.KDTree(pts)
    
    if NFits == 0:
        NFits = pts.shape[0]
    
    for i in np.arange(NFits):
        ptr = pts[kdt.query_ball_point(pts[i, :], 150),:]
        #po = mean(ptr, 0)
        po = pts[i, :]
        
        if ptr.shape[0] > 10:
            ptr = ptr - po[None, :]
            ps  = []
            mfs = []
            
            for i in range(5):
                op = leastsq(mf, list(1 + .1*np.random.normal(size=9)) + [-1], args=(ptr[:,0],ptr[:,1],ptr[:,2]), full_output=1, epsfcn=1)
                ps.append(op[0])
                mfs.append((op[2]['fvec']**2).sum())
                
            #print mfs
            p3 = ps[np.argmin(mfs)]
        
        fv = rend(p3, po, 100, X, Y, Z, im, xm, ym, zm, 5,5,5, 1.0/ptr.shape[0])
        
    return im
Esempio n. 6
0
 def learn(self,learndataset):
     """learn the KNN structure required to evaluate new instances
     
     :param learndataset: learning instances
     :type learndataset: :class:`~classifip.dataset.arff.ArffFile`
     """
     self.__init__()
     self.classes=learndataset.attribute_data['class'][:]
     #Initialize average distance for every possible class
     for i in learndataset.attribute_data['class']:
         class_set=learndataset.select_class([i])
         values=[row[0:len(row)-1] for row in class_set.data]
         if len(values) > 1000:
             valred=np.random.permutation(values)[0:1000]
             class_distances=distance.cdist(valred,valred)
         else:
             class_distances=distance.cdist(values,values)
         average=class_distances.sum()/(len(class_distances)**2
                                        -len(class_distances))
         self.av_dist.append(average)
        
     # training the whole thing
     learndata=[row[0:len(row)-1] for row in learndataset.data]
     self.truelabels=[row[-1] for row in learndataset.data]
     self.tree=kdtree.KDTree(learndata)
 def nearest_boundaries_location(self, x, y, xi, yi):
     """
     This method find the nearest points between the mother grid and the boundaries of the nested grid.
     """
     mdgrd = np.array(zip(x.ravel(), y.ravel()))
     kdt = kd.KDTree(mdgrd)
     self.mdi_dist, self.mdi = kdt.query(np.array(zip(xi, yi)))
Esempio n. 8
0
    def __init__(self, vert):
        self.num_vert = vert
        self.bowlr = 2.1 * pow(
            log(self.num_vert) / float(self.num_vert), 1 / float(NUM_DIM))
        self.delta = 0.001

        self.nodes = []
        self.point = []

        self.tree = []
        self.mydict = []

        self.truth = []
        self.observations = []
        self.estimates = []

        for i in range(self.num_vert):
            self.nodes.append(Node(False))

        # initial variance
        for i in range(self.num_vert):
            n1 = self.nodes[i]
            n1.density = normal_val(n1.x, array(init_state), init_var)

        self.normalize_density()

        self.points = [self.key(mynode.x) for mynode in self.nodes]
        self.tree = kdtree.KDTree(self.points)
Esempio n. 9
0
    def learn(self,learndataset,likovo_normalise=True):
        """learn the tree structure required to perform evaluation
        
        :param learndataset: learning instances
        :type learndataset: :class:`~classifip.dataset.arff.ArffFile`
        :param likovo_normalise: normalise the input features or not
        :type likovo_normalise: boolean

        """
        self.classes=learndataset.attribute_data['class'][:]
        learndata=[row[0:len(row)-1] for row in learndataset.data]
        data_array=np.array(learndata).astype(float)
        if likovo_normalise == True:
            self.normal.append(True)
            span=data_array.max(axis=0)-data_array.min(axis=0)
            self.normal.append(span)
            self.normal.append(data_array.min(axis=0))
            data_array=(data_array-data_array.min(axis=0))/span
        else:
            self.normal.append(False)
            
        #Initalise radius as average distance between all learning instances
        if len(data_array) > 1000:
                valred=np.random.permutation(data_array)[0:1000]
                distances=distance.cdist(valred,valred)
        else:
                distances=distance.cdist(data_array,data_array)
        self.radius=distances.sum()/(2*(len(distances)**2-len(distances)))
        self.tree=kdtree.KDTree(data_array)
        self.trueclasses=[row[-1] for row in learndataset.data]
Esempio n. 10
0
def lines_distance(lines, r=2.):
    """
    2 x 3
    :param lines:
    :return:
    """
    tree = kdtree.KDTree(np.concatenate(lines))
    tree.query_pairs(r)
Esempio n. 11
0
    def kdtree(self):
        if not hasattr(self, '_kdtree'):
            self._kdtree = None

        if self._kdtree is None:
            mat = self.__create_numpy_matrix(self.graph)
            self._kdtree = kdtree.KDTree(mat[:, iLoc.X:iLoc.Z + 1])

        return self._kdtree
Esempio n. 12
0
def hybrid_astar_planning(sx, sy, syaw, gx, gy, gyaw, ox, oy, xyreso, yawreso):
    sxr, syr = round(sx / xyreso), round(sy / xyreso)
    gxr, gyr = round(gx / xyreso), round(gy / xyreso)
    syawr = round(rs.pi_2_pi(syaw) / yawreso)
    gyawr = round(rs.pi_2_pi(gyaw) / yawreso)

    nstart = Node(sxr, syr, syawr, 1, [sx], [sy], [syaw], [1], 0.0, 0.0, -1)
    ngoal = Node(gxr, gyr, gyawr, 1, [gx], [gy], [gyaw], [1], 0.0, 0.0, -1)

    kdtree = kd.KDTree([[x, y] for x, y in zip(ox, oy)])
    P = calc_parameters(ox, oy, xyreso, yawreso, kdtree)

    hmap = astar.calc_holonomic_heuristic_with_obstacle(
        ngoal, P.ox, P.oy, P.xyreso, 1.0)
    steer_set, direc_set = calc_motion_set()
    open_set, closed_set = {calc_index(nstart, P): nstart}, {}

    qp = QueuePrior()
    qp.put(calc_index(nstart, P), calc_hybrid_cost(nstart, hmap, P))

    while True:
        if not open_set:
            return None

        ind = qp.get()
        n_curr = open_set[ind]
        closed_set[ind] = n_curr
        open_set.pop(ind)

        update, fpath = update_node_with_analystic_expantion(n_curr, ngoal, P)

        if update:
            fnode = fpath
            break

        for i in range(len(steer_set)):
            node = calc_next_node(n_curr, ind, steer_set[i], direc_set[i], P)

            if not node:
                continue

            node_ind = calc_index(node, P)

            if node_ind in closed_set:
                continue

            if node_ind not in open_set:
                open_set[node_ind] = node
                qp.put(node_ind, calc_hybrid_cost(node, hmap, P))
            else:
                if open_set[node_ind].cost > node.cost:
                    open_set[node_ind] = node
                    qp.put(node_ind, calc_hybrid_cost(node, hmap, P))

    return extract_path(closed_set, fnode, nstart)
Esempio n. 13
0
def guided_filter_kNN(input_cloud, k=50, filter_eps=.05):
    output_cloud = []
    # put all points in kdtree
    print("Constructing kdtree")
    tree = kdtree.KDTree(input_cloud)
    # other_tree = kdtree.KDTree(input_cloud)
    print("kdtree constructed")
    print("finding neighbors")

    neighbor_list = []
    for q in tqdm(input_cloud,
                  total=len(input_cloud),
                  desc="Querying neighbors"):
        _, neighbors = tree.query(
            x=q, k=k, eps=0.5
        )  # this eps allows for a little inaccuracy for speed tradeoff
        neighbor_list.append(neighbors)
    # neighbor_list = tree.query(x=input_cloud, k=k, distance_upper_bound=5)
    # print(neighbor_list)

    print("neighbors found")
    # for point in tqdm(input_cloud, total=len(input_cloud)):
    #     neighbors = tree.query_ball_point(point, radius)

    for i in trange(len(input_cloud), desc="Filtering"):
        # remember that neighbor list gives a list of indices that need to be pulled from input_list
        # step 1, as referred to in the paper
        neighbors = neighbor_list[i]
        # step 2
        k = float(len(neighbors))
        # step 3
        p_bar = np.asarray([0., 0., 0.])
        for n in neighbors:
            # print(input_cloud[n])
            try:
                p_bar += input_cloud[n]
            except IndexError:
                pdb.set_trace()
        p_bar /= k
        # print(p_bar)
        # step 4
        temp = 0
        for n in neighbors:
            temp += np.dot(input_cloud[n], input_cloud[n])
        temp /= k
        centroid_dot = np.dot(p_bar, p_bar)
        a = (temp - centroid_dot) / ((temp - centroid_dot) + filter_eps)
        # step 5
        b = p_bar - a * p_bar
        # step 6
        output_cloud.append(a * input_cloud[i] + b)

    return output_cloud
Esempio n. 14
0
 def __init__(self, points, step_size=30, tolerance=50):
     self.step_size = step_size
     np.random.seed(0)
     self.unsorted = np.array(points)
     self.n = len(points)
     self.sortedxs = []
     self.sortedys = []
     self.kdtree = kdtree.KDTree(points)
     self.tolerance = tolerance
     initpoint = self.unsorted[self.n / 2]
     i = self.step_size
     while True:
         i = i * 2
         neighbors = self.kdtree.query_ball_point(initpoint, i)
         if len(neighbors) >= self.tolerance or i > self.step_size * 3:
             break
     pca = PCA(n_components=2)
     pca.fit(self.unsorted[neighbors])
     initdirection1 = pca.components_[0]
     initdirection2 = -initdirection1
     p = initpoint
     d = initdirection1
     oldp = p
     while p is not None:
         p, d = self.searchnext(p, d)
         if p is None or (oldp[0] == p[0] and oldp[1] == p[1]):
             break
         oldp = p
         if p is not None:
             arr1 = np.zeros(1)
             arr2 = np.zeros(1)
             arr1[0] = p[0]
             arr2[0] = p[1]
             if len(self.sortedxs) == 0:
                 self.sortedxs = arr1
                 self.sortedys = arr2
             else:
                 self.sortedxs = np.concatenate((arr1, self.sortedxs))
                 self.sortedys = np.concatenate((arr2, self.sortedys))
     self.sortedxs = np.append(self.sortedxs, initpoint[0])
     self.sortedys = np.append(self.sortedys, initpoint[1])
     p = initpoint
     d = initdirection2
     oldp = p
     while p is not None:
         p, d = self.searchnext(p, d)
         if p is None or (oldp[0] == p[0] and oldp[1] == p[1]):
             break
         oldp = p
         if p is not None:
             self.sortedxs = np.append(self.sortedxs, p[0])
             self.sortedys = np.append(self.sortedys, p[1])
     self.sorted = np.transpose(np.array([self.sortedxs, self.sortedys]))
Esempio n. 15
0
 def __init__(self, data, search_type='brute-force'):
     self.__data = data
     self.__nr, self.__nc = data.shape
     if search_type == 'brute-force':
         self.__search_type = search_type
     elif search_type == 'kdtree':
         self.__search_type = search_type
         self.__kdtree = kdtree.KDTree(data)
     elif search_type == 'mykdtree':
         self.__search_type = search_type
         self.__kdtree = MyKDTree(data)
     else:
         raise Exception('unknown search type: {}'.format(search_type))
Esempio n. 16
0
    def learn(self, learn_data_set, nb_labels, learn_disc_set=None):
        """
            Warning: it should be normalized since it do an Euclidean distance
        """
        self.__init__()

        self.nb_labels = nb_labels
        self.nda_models = dict()
        self.nb_feature = len(learn_data_set.attributes[:-self.nb_labels])

        _np_data = np.array(learn_data_set.data)
        _index_features = np.array(range(self.nb_feature))
        _index_labels = np.array(
            np.arange(self.nb_feature, self.nb_feature + self.nb_labels))
        self.x_learning = np.array(_np_data[:, _index_features],
                                   dtype=np.float)
        self.y_learning = _np_data[:, _index_labels].copy()

        # procedure create kd_tree by classifier with missing instances
        self.kd_tree = dict()
        self.radius = np.ones(nb_labels)
        for label_index in range(nb_labels):
            missing_index = []
            # assuming the index learn_disc_set and learn_data_set are same (salmuz)
            for row_index, row_instance in enumerate(learn_disc_set.data):
                if row_instance[self.nb_feature + label_index] == '-1':
                    missing_index.append(row_index)
            x_marginal = np.delete(self.x_learning, missing_index, axis=0)
            _distances = distance.cdist(x_marginal, self.x_learning)
            self.radius[label_index] = np.mean(_distances[np.tril_indices(
                len(x_marginal), k=-1)])
            self.kd_tree[label_index] = kdtree.KDTree(x_marginal)

        self.learn_disc_set = learn_disc_set.make_clone()

        # vacuous skeleton arff file
        self.skeleton_learn_knn = ArffFile()
        self.skeleton_learn_knn.attribute_data = self.learn_disc_set.attribute_data.copy(
        )
        self.skeleton_learn_knn.attribute_types = self.learn_disc_set.attribute_types.copy(
        )
        self.skeleton_learn_knn.attributes = self.learn_disc_set.attributes.copy(
        )
        for label_name in learn_data_set.attributes[-self.nb_labels:]:
            del self.skeleton_learn_knn.attribute_data[label_name]
            del self.skeleton_learn_knn.attribute_types[label_name]
            self.skeleton_learn_knn.attributes.pop()
        self.skeleton_learn_knn.data = list()
        self.skeleton_learn_knn.define_attribute(name="class",
                                                 atype="nominal",
                                                 data=['0', '1'])
Esempio n. 17
0
    def generate(self, n, bounds=DobotModel.limits):
        """
        Generates (or regenerates) the PRM given a target number of samples n 
        """
        self.G = nx.Graph()

        # Sample environment
        ps = self._sample_cs(n, bounds)
        # ps = self._sample_ws(n,np.array([[0,300],[-200,200],[0,200]]))

        self.tree = kdt.KDTree(ps)

        # Connect samples
        for k in xrange(self.tree.n):
            self._connect(k, self.tree.data[k])
Esempio n. 18
0
 def cleanup(self):
     """Call after accretion; merges failed bins into neighbours"""
     self._valid_bins = np.array(self._valid_bins, dtype=np.bool)
     self._current_bin_centroids = np.array(self._current_bin_centroids)
     good_bins = np.where(self._valid_bins == True)[0]  # NOQA
     failed_bins = np.where(self._valid_bins == False)[0]  # NOQA
     # build a kdtree of good bins
     tree = kdtree.KDTree(self._current_bin_centroids[good_bins, :])
     for i, failed_idx in enumerate(failed_bins):
         # update the segmentation image
         pix_idx = np.where(self._seg_image == failed_idx)
         # self._seg_image[pix_idx] = -1
         coords = np.vstack(pix_idx).T
         dists, reassignment_indices = tree.query(coords)
         self._seg_image[pix_idx] = good_bins[reassignment_indices]
Esempio n. 19
0
def compute_normals_for_all_points(points, n_size=36):
    """

    :param n_size:
    :param points:
    :return:
    """
    tree = kdtree.KDTree(points)
    normals = []
    for point in points:
        d, i = tree.query(point, k=n_size)
        current_points = points[i] - point
        normals.append(compute_normal_from_points(current_points))

    return np.array(normals)
    def smooth(self, x):
        k = np.ceil(self.tau * x.shape[0]) if self.tau is not None else self.k
        k = int(k)
        domx, domy = self._construct_domain()
        dom = np.vstack((domx.ravel(), domy.ravel())).T

        tree = kd.KDTree(x)
        knn = tree.query(dom, k=k, p=2)[1]

        #
        Fxy = np.subtract(dom[:, np.newaxis, :], x[knn, :]).reshape(-1, 2)
        Fxy = np.linalg.norm(Fxy, axis=1).reshape(-1, k)
        Fxy = np.sqrt(Fxy.mean(axis=1))

        return (Fxy.reshape(self.domy_params[2], self.domx_params[2]))
Esempio n. 21
0
    def BridgeSubgraphs(self, graph, ANodes, BNodes):
        '''Create edges between the closest locations of isolated subgraphs'''

        matA = self.__create_numpy_matrix(ANodes)
        matB = self.__create_numpy_matrix(BNodes)

        AKDTree = kdtree.KDTree(matA[:, iLoc.X:iLoc.Z + 1])

        distances = AKDTree.query(matB[:, iLoc.X:iLoc.Z + 1], k=1)
        index_of_minB = distances[0].argmin()
        index_of_minA = distances[1][index_of_minB]

        NodeA = ANodes[index_of_minA]
        NodeB = BNodes[index_of_minB]

        graph.add_edge(NodeA, NodeB)
Esempio n. 22
0
File: decode.py Progetto: caomw/hmmf
    def __init__(self, vert):
        self.num_vert = vert
        self.bowlr = 2.1 * pow(
            log(self.num_vert) / float(self.num_vert), 1 / float(NUM_DIM))
        self.delta = 0.1

        self.nodes = []
        self.point = []

        self.tree = []
        self.mydict = []

        self.nodes.append(Node(True))
        for i in range(self.num_vert - 1):
            self.nodes.append(Node())
        self.points = [self.key(mynode.x) for mynode in self.nodes]
        self.tree = kdtree.KDTree(self.points)
Esempio n. 23
0
def get_edges_in_sphere(G,
                        centerSphere,
                        radiusSphere,
                        nkinds,
                        radiusSphereMin=0):
    """ Returns a list of edges which lie in the specified sphere. The function 
    uses the tortuous vessel properties (if one of the points of the vessel is 
    located inside the sphere the vessel is considered to be in the sphere)
    INPUT: G: main Graph
           centerSphere: the coordinates of the center of the sphere
           radiusSphere: the radius of the center of the sphere
           nkinds: list of vessel kinds which should be considered
           radiusSphereMin: default = 0, a value between [0,radiusSphere] can be
           given. vessels inside the sphereMin are not considered
    OUTPUT: edges: list of edges where at least one point along the edge is
            located inside the sphere
    """
    #Get tortuous values of all edges of interest
    rAll = []
    edgeAll = []
    for nkind in nkinds:
        for e in G.es(nkind_eq=nkind):
            for i in e['points']:
                rAll.append(i)
                edgeAll.append(e.index)

    Kdt = kdtree.KDTree(rAll, leafsize=10)
    kAll = 100
    nearestAll = Kdt.query(centerSphere, k=kAll)
    nearestDist = nearestAll[0]
    nearestIndex = nearestAll[1]
    largestDist = nearestDist[-1]
    while largestDist < radiusSphere:
        kAll += 100
        nearestAll = Kdt.query(centerSphere, k=kAll)
        nearestDist = nearestAll[0]
        nearestIndex = nearestAll[1]
        largestDist = nearestDist[-1]

    edges = []
    for i, j in zip(nearestIndex, nearestDist):
        if edgeAll[
                i] not in edges and j < radiusSphere and j >= radiusSphereMin:
            edges.append(edgeAll[i])

    return edges
Esempio n. 24
0
    def connect_vasculature_with_tissue(self, **kwargs):
        """Connects VascularGraphs G1 (vasculature) and G2 (tissue) with 
        traited 'soft links'. Each vascualar vertex connects to exactly one 
        tissue vertex (its nearest neighbor). A tissue vertex, however, may 
        connect to multiple vascular vertices. Each connection also stores 
        the sum of the products of surface area and exchange coefficient. This
        vertex property is required for the exchange of a given substance 
        between vasculature and tissue (see the Exchange class).
        INPUT: **kwargs
               substance: The name of the substance that is to be exchanged.              
        OUTPUT: None               
        """

        self.vertexG1ToVerticesG2 = {}
        self.vertexG2ToVerticesG1 = {}
        self.connections = []

        G1 = self._G1
        G2 = self._G2
        substance = kwargs['substance']

        Kdt = kdtree.KDTree(G2.vs['r'], leafsize=10)

        for vIndex in xrange(G1.vcount()):
            edgeIndices = G1.adjacent(vIndex, 'all')
            exchangeFactor = 0.0
            for edge in G1.es(edgeIndices):
                exchangeFactor = exchangeFactor + edge['length'] * np.pi * \
                                                  edge['diameter'] / 2.0 * \
                                                  edge['exchangeCoefficient'][substance]
            if 'kind' in G1.vs.attribute_names():
                if G1.vs[vIndex]['kind'] == 'u':
                    exchangeFactor = exchangeFactor + \
                                     G1.vs[vIndex]['uSurfaceArea'] * \
                                     G1.vs[vIndex]['uExchangeCoefficient'][substance]

            tissueNeighbor = int(Kdt.query(G1.vs[vIndex]['r'])[1])
            self.vertexG1ToVerticesG2[vIndex] = tissueNeighbor
            if self.vertexG2ToVerticesG1.has_key(tissueNeighbor):
                self.vertexG2ToVerticesG1[tissueNeighbor].append(vIndex)
            else:
                self.vertexG2ToVerticesG1[tissueNeighbor] = [vIndex]
            self.connections.append(self.Connection(vIndex, tissueNeighbor))
            self.connections[-1].exchangeFactor = exchangeFactor
Esempio n. 25
0
def classify_close_by(points,
                      labels,
                      from_label,
                      to_label,
                      close_to_label,
                      radius,
                      kdTree=None):
    """

    :param radius:
    :param close_to_label:
    :param to_label:
    :param from_label:
    :param labels:
    :param points:
    :param kdTree:
    :return:
    """
    if kdTree is None:
        kdTree = kdtree.KDTree(points)

    for i, point_label in enumerate(zip(points, labels)):
        point = point_label[0]
        label = point_label[1]

        if label != from_label:
            continue

        i = kdTree.query_ball_point(point, r=radius)
        qpoints = points[i]
        qlabels = labels[i]
        npoints = qpoints[qlabels == close_to_label]

        if len(npoints) == 0:
            continue

        labels[i] = to_label

    return labels
    def smooth(self, x):
        """
        Apply K Nearest Neighbors adaptive Gaussian smoother to a provided 2D dataset.

        Arguments:

            1. x: 2D dataset to be smoothed. It is assumed that the rows of
                  the data matrix are the sample points.

        Returns:

            1. Smoothed function over the specified domain.

        Example:

            TODO: Write sample code...
        """
        domx, domy = self._construct_domain()
        dom = np.vstack((domx.ravel(), domy.ravel())).T

        # construct KD tree on data
        tree = kd.KDTree(x)

        # get k nearest neighbors
        dist = tree.query(dom, k=self.k)[0]
        tp_knn = dist[:, self.k - 1].reshape(-1, 1)

        ### ADAPTIVE KERNEL SMOOTHING
        # pairwise subtraction between grid points and each data point
        # reshape from tensor to matrix (K x 2)
        Fxy = np.subtract(dom[:, np.newaxis, :],
                          x[np.newaxis, :, :]).reshape(-1, 2)
        Fxy = np.square(np.linalg.norm(Fxy, axis=1)).reshape(dom.shape[0], -1)
        Fxy = np.divide(Fxy, -2 * tp_knn**2)
        Fxy = np.divide(np.exp(Fxy), 2 * np.pi * tp_knn**2)
        Fxy = Fxy.mean(axis=1)

        return (Fxy.reshape(self.domy_params[2], self.domx_params[2]))
Esempio n. 27
0
    def learn(self, learndataset, pipp_normalise=True):
        """learn the tree structure required to perform evaluation
        
        :param learndataset: learning instances
        :type learndataset: :class:`~classifip.dataset.arff.ArffFile`
        :param pipp_normalise: normalise the input features or not
        :type pipp_normalise: boolean
        
        .. note::
    
            learndataset should come from a xarff file tailored for lable ranking
        """
        self.labels = learndataset.attribute_data['L'][:]
        learndata = [row[0:len(row) - 1] for row in learndataset.data]
        data_array = np.array(learndata).astype(float)
        if pipp_normalise == True:
            span = data_array.max(axis=0) - data_array.min(axis=0)
            self.normal.append(True)
            self.normal.append(span)
            self.normal.append(data_array.min(axis=0))
            data_array = (data_array - data_array.min(axis=0)) / span
        else:
            self.normal.append(False)

        #Initalise radius as average distance between all learning instances
        if len(data_array) > 1000:
            data_red = np.random.permutation(data_array)[0:1000]
            distances = distance.cdist(data_red, data_red)
        else:
            distances = distance.cdist(data_array, data_array)
        self.radius = distances.sum() / (2 *
                                         (len(distances)**2 - len(distances)))
        self.tree = kdtree.KDTree(data_array)
        self.truerankings = [
            ranking_matrices(row[-1], self.labels) for row in learndataset.data
        ]
Esempio n. 28
0
    def __init__(self, vert):
        self.num_vert = vert
        self.bowlr = 2.1*pow(log(self.num_vert)/float(self.num_vert),1/float(NUM_DIM))
        self.delta = 0.01

        self.nodes = []
        self.edges = []
        self.tree = []
        
        #mydict = (key = node, data = [ [in_edge_1, ... ], [ out_edge_1, ...]])
        self.mydict = dict()

        for i in range(self.num_vert):
            n1 = Node()
            self.nodes.append(n1)
        
        # initial variance
        for i in range(self.num_vert):
            n1 = self.nodes[i]
            n1.density = normal_val(n1.x, array(init_state), init_var)
        self.normalize_density()

        self.points = [self.key(mynode.x) for mynode in self.nodes]
        self.tree = kdtree.KDTree(self.points)
Esempio n. 29
0
def vertices_from_coordinates(G,
                              coordinates,
                              diameter_ll=0.0,
                              isEndpoint=False):
    """Given a list of x,y,z coordinates, locate the most closely matching set
    of verties in a vascular graph of iGraph format.
    INPUT: G:  Vascular graph in iGraph format. 
           coordinates: List of lists specifying the coordinates (i.e.: 
                        [[x1,y1,z1],[x2,y2,z2],...]).
           diameter_ll: (Optional) lower limit of edge diameter, to select only
                        those vertices bordering a sufficiently large diameter
                        edge. Default is 0.0, i.e. all vertices are considered.
           isEndpoint: Boolean whether or not the vertex searched for is 
                       required to be an endpoint. Default is 'False'.
    OUTPUT: vertex_indices: Array of vertex indices that represent the best 
                            matches.
            distances: Array of distances that the best matching vertices are 
                       separated from the supplied coordinates. Units match 
                       those of the graph vertex coordinates.
    """

    # Select vertex indices based on diameter of adjacent edges:
    si = unique(
        flatten([G.es[x].tuple
                 for x in G.es(diameter_ge=diameter_ll).indices])).tolist()
    # Optionally filter for end-points:
    if isEndpoint:
        si = [i for i in si if G.degree(i) == 1]
    # Construct k-dimensional seach-tree:
    kdt = kdtree.KDTree(G.vs[si]['r'], leafsize=10)
    search_result = kdt.query(coordinates)
    sr_v = np.ravel([search_result[1]]).tolist()
    vertex_indices = [si[x] for x in sr_v]
    distances = np.ravel([search_result[0]]).tolist()

    return vertex_indices, distances
Esempio n. 30
0
    def get_kdtree(self):
        """Build a mocked KD-Tree."""
        all_foodtrucks, points = [], []
        cur_dir = os.path.dirname(os.path.realpath(__file__))
        dir = cur_dir +  '/fixtures/foodtrucks.json'

        with open(dir, 'r') as f:
            all_foodtrucks = json.load(f)

        for foodtruck in all_foodtrucks:
            if 'x' in foodtruck and 'y' in foodtruck:
                try:
                    x = float(foodtruck['x'])
                except ValueError:
                    raise InvalidValueError('x', foodtruck['x'])
                try:
                    y = float(foodtruck['y'])
                except ValueError:
                    raise InvalidValueError('y', foodtruck['y'])
                point = (x, y)
                points.append(point)
        kd_tree = kdtree.KDTree(points)

        return kd_tree