Beispiel #1
0
    def __init__(self,
                 datadict,
                 maxR,
                 denoise_absmin=None,
                 denoise_delta=None,
                 denoise_min=None,
                 detect_planar=None):
        self.D = datadict  # dict of numpy arrays
        # self.kd_tree = KDTree(self.D['coords'])

        # linear algorithm means brute force, which means its exact nn, which we need
        # approximate nn may cause algorithm not to converge
        self.flann = FLANN()
        self.flann.build_index(self.D['coords'],
                               algorithm='linear',
                               target_precision=1,
                               sample_fraction=0.001,
                               log_level="info")
        # print "constructed kd-tree"
        self.m, self.n = datadict['coords'].shape
        self.D['ma_coords_in'] = np.empty((self.m, self.n))
        self.D['ma_coords_in'][:] = np.nan
        self.D['ma_coords_out'] = np.empty((self.m, self.n))
        self.D['ma_coords_out'][:] = np.nan
        self.D['ma_radii_in'] = np.empty((self.m))
        self.D['ma_radii_in'][:] = np.nan
        self.D['ma_radii_out'] = np.empty((self.m))
        self.D['ma_radii_out'][:] = np.nan
        self.D['ma_f1_in'] = np.zeros((self.m), dtype=np.int)
        self.D['ma_f1_in'][:] = np.nan
        self.D['ma_f1_out'] = np.zeros((self.m), dtype=np.int)
        self.D['ma_f1_out'][:] = np.nan
        self.D['ma_f2_in'] = np.zeros((self.m), dtype=np.int)
        self.D['ma_f2_in'][:] = np.nan
        self.D['ma_f2_out'] = np.zeros((self.m), dtype=np.int)
        self.D['ma_f2_out'][:] = np.nan

        # a list of lists with indices of closest points during the ball shrinking process for every point:
        self.D['ma_shrinkhist_in'] = []
        self.D['ma_shrinkhist_out'] = []

        self.SuperR = maxR

        if denoise_absmin is None:
            self.denoise_absmin = None
        else:
            self.denoise_absmin = (math.pi / 180) * denoise_absmin
        if denoise_delta is None:
            self.denoise_delta = None
        else:
            self.denoise_delta = (math.pi / 180) * denoise_delta
        if denoise_min is None:
            self.denoise_min = None
        else:
            self.denoise_min = (math.pi / 180) * denoise_min

        if detect_planar is None:
            self.detect_planar = None
        else:
            self.detect_planar = (math.pi / 180) * detect_planar
Beispiel #2
0
def assign_nearest_jobs(agent_idle, agent_job, agent_pos, blocked, jobs, left_jobs, n):
    from pyflann import FLANN
    children = []
    starts = []
    ends = []
    ends_job = []
    for left_job in left_jobs:  # this makes many children ...
        ends.append(left_job[0])
        ends_job.append(jobs.index(left_job))
    for i_a in range(len(agent_pos)):
        if agent_job[i_a]:  # has assignment
            i_j = agent_job[i_a][-1]
            starts.append(jobs[i_j][0])
        else:
            starts.append(agent_pos[i_a])
    flann = FLANN()
    result, dists = flann.nn(
        np.array(ends, dtype=float),
        np.array(starts, dtype=float),
        (n if len(ends) >= n else len(ends)),
        algorithm="kmeans",
        branching=32,
        iterations=7,
        checks=16)
    assert len(agent_pos) == len(result), "Not the right amount of results"
    for i_a in range(len(agent_pos)):
        if len(result.shape) == 1:
            result = np.array(list(map(lambda x: [x, ], result)))
        for res in result[i_a]:
            agent_job_new = agent_job.copy()
            agent_job_new[i_a] += (ends_job[res],)
            children.append(comp2state(tuple(agent_job_new),
                                       agent_idle,
                                       blocked))
    return children
Beispiel #3
0
 def __init__(self, kernel, num_neighbors, max_memory, lr):
     self.kernel = kernel
     self.num_neighbors = num_neighbors
     self.max_memory = max_memory
     self.lr = lr
     self.keys = None
     self.values = None
     self.kdtree = FLANN()
 
     # key_cache stores a cache of all keys that exist in the DND
     # This makes DND updates efficient
     self.key_cache = {}
     # stale_index is a flag that indicates whether or not the index in self.kdtree is stale
     # This allows us to only rebuild the kdtree index when necessary
     self.stale_index = True
     # indexes_to_be_updated is the set of indexes to be updated on a call to update_params
     # This allows us to rebuild only the keys of key_cache that need to be rebuilt when necessary
     self.indexes_to_be_updated = set()
 
     # Keys and value to be inserted into self.keys and self.values when commit_insert is called
     self.keys_to_be_inserted = None
     self.values_to_be_inserted = None
 
     # Move recently used lookup indexes
     # These should be moved to the back of self.keys and self.values to get LRU property
     self.move_to_back = set()
Beispiel #4
0
def get_closest(possible_starts, free_tasks_starts, grid, n):
    flann = FLANN()
    result, dists = flann.nn(possible_starts,
                             free_tasks_starts,
                             n,
                             algorithm="kmeans",
                             branching=32,
                             iterations=7,
                             checks=16)
    lengths = []
    nearestss = []
    paths = []
    INF = 2 * np.max(np.max(dists))
    for i in range(n):
        temp_nearest = np.unravel_index(np.argmin(dists),
                                        [len(possible_starts), n])
        dists[temp_nearest] = INF
        nearestss.append(temp_nearest)

        temp_i_possible_starts = result[temp_nearest]
        temp_i_free_tasks_start = temp_nearest[0]
        p, _ = path(tuple(possible_starts[temp_i_possible_starts]),
                    tuple(free_tasks_starts[temp_i_free_tasks_start]), grid,
                    [])
        if p:
            lengths.append(len(p))
        paths.append(p)
    best_path = np.argmin(lengths)
    nearest = nearestss[best_path]
    i_free_tasks_start = nearest[0]
    i_possible_starts = result[nearest]
    return i_free_tasks_start, i_possible_starts, paths[best_path]
Beispiel #5
0
 def __init__(self, model, subject_layer, distance_threshold):
     self.model = model
     self.distant_vectors = []
     self.distant_vectors_buffer = []
     self.subject_layer = subject_layer
     self.distance_threshold = distance_threshold
     self.flann = FLANN()
 def __init__(self, embedding_file, tokenize=tokenize):
     self.word2vec_file = embedding_file
     self.word2vec = KeyedVectors.load_word2vec_format(self.word2vec_file,
                                                       binary=True)
     self.embedding_dim = self.word2vec.vector_size
     self.tokenize = tokenize
     self.sentence_list = []
     self.sentence_list_tokenized = []
     self.sentence_embedding = np.array([])
     self.flann = FLANN()
Beispiel #7
0
 def __init__(self, cfg):
     self.mutual_best=cfg['mutual_best']
     self.ratio_test=cfg['ratio_test']
     self.ratio=cfg['ratio']
     self.use_cuda=cfg['cuda']
     self.flann=FLANN()
     if self.use_cuda:
         self.match_fn_1=lambda desc0,desc1: find_nearest_point_idx(desc1, desc0)
         self.match_fn_2=lambda desc0,desc1: find_first_and_second_nearest_point(desc1, desc0)
     else:
         self.match_fn_1=lambda desc0,desc1: self.flann.nn(desc1, desc0, 1, algorithm='linear')
         self.match_fn_2=lambda desc0,desc1: self.flann.nn(desc1, desc0, 2, algorithm='linear')
Beispiel #8
0
    def compute_lfs(self):
        self.ma_kd_tree = FLANN()

        # collect all ma_coords that are not NaN
        ma_coords = np.concatenate(
            [self.D['ma_coords_in'], self.D['ma_coords_out']])
        ma_coords = ma_coords[~np.isnan(ma_coords).any(axis=1)]

        self.ma_kd_tree.build_index(ma_coords, algorithm='linear')
        # we can get *squared* distances for free, so take the square root
        self.D['lfs'] = np.sqrt(
            self.ma_kd_tree.nn_index(self.D['coords'], 1)[1])
def create_affinity(X,
                    knn,
                    scale=None,
                    alg="annoy",
                    savepath=None,
                    W_path=None):
    N, D = X.shape
    if W_path is not None:
        if W_path.endswith('.mat'):
            W = sio.loadmat(W_path)['W']
        elif W_path.endswith('.npz'):
            W = sparse.load_npz(W_path)
    else:

        print('Compute Affinity ')
        start_time = timeit.default_timer()
        if alg == "flann":
            print('with Flann')
            flann = FLANN()
            knnind, dist = flann.nn(X,
                                    X,
                                    knn,
                                    algorithm="kdtree",
                                    target_precision=0.9,
                                    cores=5)
            # knnind = knnind[:,1:]
        else:
            nbrs = NearestNeighbors(n_neighbors=knn).fit(X)
            dist, knnind = nbrs.kneighbors(X)

        row = np.repeat(range(N), knn - 1)
        col = knnind[:, 1:].flatten()
        if scale is None:
            data = np.ones(X.shape[0] * (knn - 1))
        elif scale is True:
            scale = np.median(dist[:, 1:])
            data = np.exp((-dist[:, 1:]**2) / (2 * scale**2)).flatten()
        else:
            data = np.exp((-dist[:, 1:]**2) / (2 * scale**2)).flatten()

        W = sparse.csc_matrix((data, (row, col)), shape=(N, N), dtype=np.float)
        W = (W + W.transpose(copy=True)) / 2
        elapsed = timeit.default_timer() - start_time
        print(elapsed)

        if isinstance(savepath, str):
            if savepath.endswith('.npz'):
                sparse.save_npz(savepath, W)
            elif savepath.endswith('.mat'):
                sio.savemat(savepath, {'W': W})

    return W
Beispiel #10
0
def adaptive_epsilon(loader, target_epsilon, batch_size):
    # split dataset into classes
    class_dict = dict()
    for i, (X,y) in enumerate(loader):
        y = y.item()
        X = X.numpy()
        if not y in class_dict:
            class_dict[y] = [X]
        else:
            class_dict[y].append(X)

    # build flann index for each class
    flann_dict = dict()
    for y in class_dict:
        mflann = FLANN()
        class_examples = np.array(class_dict[y])
        class_size = len(class_examples)
        image_shape = class_examples.shape[1:]
        mflann.build_index(class_examples.reshape(class_size, np.prod(image_shape)))
        flann_dict[y] = mflann

    # for each example input, find distance to the closest example input of other classes
    dataset_with_dist = []

    for i, (X,y) in enumerate(loader):
        y = y.item()
        X = X.numpy()
        smallest_dist = np.inf
        for _y in class_dict:
            if _y != y:
                _, dist = np.sqrt(flann_dict[_y].nn_index(X.reshape(-1), 1))
                if dist[0] < smallest_dist:
                    smallest_dist = dist[0]

        dataset_with_dist.append(np.array([X,y, smallest_dist]))

    # scale the distance to [target_epsilon/10, target_epsilon] interval
    dataset_with_eps = np.array(dataset_with_dist)
    dataset_with_eps[:,2] = (dataset_with_eps[:,2] - np.mean(dataset_with_eps[:,2]))/np.std(dataset_with_eps[:,2])
    dataset_with_eps[:,2] = dataset_with_eps[:,2]*(target_epsilon/4) + (target_epsilon/2)
    dataset_with_eps[:,2] = np.clip(dataset_with_eps[:,2], target_epsilon/100, target_epsilon)

    # order by eps (ascending)
    new_order = np.argsort(dataset_with_eps[:,2])[::-1]
    dataset_with_eps = dataset_with_eps[new_order]

    # create and return dataset loader
    X = np.concatenate(dataset_with_eps[:,0], axis=0)
    Y = dataset_with_eps[:,1]
    eps = dataset_with_eps[:,2]

    return DataLoader(AdaptiveEpsilonDataset(X, Y, eps, batch_size), batch_size=1, shuffle=True, pin_memory=True)
Beispiel #11
0
def match(desc1, desc2, dist_ratio=0.6, num_trees=4):
    flann = FLANN()
#    result, dists = flann.nn(desc2, desc1, 2, algorithm="kmeans",
#                             branching=32, iterations=7, checks=16)
    result, dists = flann.nn(desc2, desc1, 2,
                             algorithm='kdtree', trees=num_trees)

    matchscores = zeros((desc1.shape[0]), 'int')
    for idx1, (idx2, _idx_second_nearest) in enumerate(result):
        nearest, second_nearest = dists[idx1]
        if nearest < dist_ratio * second_nearest:
            matchscores[idx1] = idx2
    return matchscores
Beispiel #12
0
    def __init__(self, n_neighbors=5,weights='uniform'):
        """hyper parameters of teh FLANN algorithm"""

        self.algrithm_choice = "kmeans"
        self.branching = 32
        self.iterations = 7
        self.checks = 16

        """Basic KNN parameters"""

        self.n_neighbors = n_neighbors
        self.weights = weights
        self.flann = FLANN()
Beispiel #13
0
    def __init__(self, maxlen, seed=0, cores=4, trees=1):
        self.flann = FLANN(
            algorithm='kdtree',
            random_seed=seed,
            cores=cores,
            trees=trees,
        )

        self.counter = 0

        self.contents_lookup = {}  #{oid: (e,q)}
        self.p_queue = collections.deque(
        )  #priority queue contains; list of (priotiry_value,oid)
        self.maxlen = maxlen
Beispiel #14
0
    def __init__(self, dcel):
        Tk.__init__(self)
        self.sizex = 700
        self.sizey = 700
        self.window_diagonal = math.sqrt(self.sizex**2 + self.sizey**2)
        self.title("DCELvis")
        self.resizable(0, 0)

        self.bind('q', self.exit)
        self.bind('h', self.print_help)
        self.bind('p', self.print_dcel)

        self.bind('e', self.iteratehedge)
        self.bind('v', self.iteratevertex)
        self.bind('f', self.iterateface)
        self.canvas = Canvas(self,
                             bg="white",
                             width=self.sizex,
                             height=self.sizey)
        self.canvas.pack()

        if WITH_FLANN:
            self.bind("<ButtonRelease>", self.remove_closest)
            self.bind("<Motion>", self.report_closest)

        self.coordstext = self.canvas.create_text(self.sizex,
                                                  self.sizey,
                                                  anchor='se',
                                                  text='')
        self.info_text = self.canvas.create_text(10,
                                                 self.sizey,
                                                 anchor='sw',
                                                 text='')

        self.tx = 0
        self.ty = 0

        self.highlight_cache = []
        self.bgdcel_cache = []

        self.draw = draw(self)

        if WITH_FLANN:
            self.kdtree = FLANN()
        self.D = None
        self.bind_dcel(dcel)
        self.print_help(None)
Beispiel #15
0
def nn_match(descs1, descs2):
    """
    Perform nearest neighbor match, using descriptors.
    
    This function uses pyflann
    
    :param descs1: descriptors from image 1, (N1, D)
    :param descs2: descriptors from image 2, (N2, D)
    :return indices: indices into keypoints from image 2, (N1, D)
    """
    # diff = descs1[:, None, :] - descs2[None, :, :]
    # diff = np.linalg.norm(diff, ord=2, axis=2)
    # indices = np.argmin(diff, axis=1)
    
    # flann = cv2.FlannBasedMatcher_create()
    # matches = flann.match(descs1.astype(np.float32), descs2.astype(np.float32))
    # indices = [x.trainIdx for x in matches]
    flann = FLANN()
    indices, _ = flann.nn(descs2, descs1, algorithm="kdtree", trees=4)
    
    return indices
Beispiel #16
0
def fit_flann(data, algorithm):
    logger.info('Fitting  FLANN...')
    from pyflann import FLANN
    matcher = FLANN(
        algorithm=algorithm,
        checks=32,
        eps=0.0,
        cb_index=0.5,
        trees=1,
        leaf_max_size=4,
        branching=32,
        iterations=5,
        centers_init='random',
        target_precision=0.9,
        build_weight=0.01,
        memory_weight=0.0,
        sample_fraction=0.1,
        log_level="warning",
        random_seed=-1,
    )
    matcher.build_index(data)
    return matcher
Beispiel #17
0
    def __init__(self,
                 word2vec,
                 tokenize,
                 target_word_list=[],
                 ngram=[1],
                 window_size=1,
                 min_count=1):
        self.w2v = word2vec
        self.embedding_dim = self.w2v.vector_size
        self.vocab = set(self.w2v.vocab.keys())
        self.target_word_list = set(target_word_list)
        for word in self.target_word_list:
            self.vocab.add(word)
        self.tokenize = tokenize
        self.ngram = ngram
        self.window_size = window_size
        self.min_count = min_count

        self.c2v = {}
        self.target_counts = Counter()
        self.alacarte = {}
        self.flann = FLANN()
Beispiel #18
0
    def __init__(self, kernel, num_neighbors, max_memory, lr):
        """
        定义 DND 的结构
        :param kernel:
        :param num_neighbors:
        :param max_memory:
        :param lr:
        """
        self.kernel = kernel
        self.num_neighbors = num_neighbors
        self.max_memory = max_memory
        self.lr = lr
        self.keys = None
        self.values = None
        self.kdtree = FLANN()

        # key_cache stores a cache of all keys that exist in the DND
        # This makes DND updates efficient
        # 这个应该是存储所有存在于 DND 中的  keys 的集合
        self.key_cache = {}
        # stale_index is a flag that indicates whether or not the index in self.kdtree is stale
        # This allows us to only rebuild the kdtree index when necessary
        # 这个是标志 KD 树是否已经需要退化操作
        self.stale_index = True
        # indexes_to_be_updated is the set of indexes to be updated on a call to update_params
        # This allows us to rebuild only the keys of key_cache that need to be rebuilt when necessary
        # 用于在仅仅需要重新建立树的情况下被更新的索引记录
        self.indexes_to_be_updated = set()

        # Keys and value to be inserted into self.keys and self.values when commit_insert is called
        # 当 commit_insert 调用的时候, 用于整体更新 keys 和 相关的 values
        self.keys_to_be_inserted = None
        self.values_to_be_inserted = None

        # Move recently used lookup indexes
        # These should be moved to the back of self.keys and self.values to get LRU property
        # LRU 置换算法的要被移到尾后的 keys 和 values
        self.move_to_back = set()
Beispiel #19
0
 def add(self, points):
     self.__clouds.append(points)
     flann = FLANN()
     flann.build_index(points, algorithm='kdtree', trees=self.__n_trees)
     self.__FLANNs.append(flann)
Beispiel #20
0
    def _fit(self, X, skip_num_points=0):
        """Fit the model using X as training data.

        Note that sparse arrays can only be handled by method='exact'.
        It is recommended that you convert your sparse array to dense
        (e.g. `X.toarray()`) if it fits in memory, or otherwise using a
        dimensionality reduction technique (e.g. TruncatedSVD).

        Parameters
        ----------
        X : array, shape (n_samples, n_features) or (n_samples, n_samples)
            If the metric is 'precomputed' X must be a square distance
            matrix. Otherwise it contains a sample per row. Note that this
            when method='barnes_hut', X cannot be a sparse array and if need be
            will be converted to a 32 bit float array. Method='exact' allows
            sparse arrays and 64bit floating point inputs.

        skip_num_points : int (optional, default:0)
            This does not compute the gradient for points with indices below
            `skip_num_points`. This is useful when computing transforms of new
            data where you'd like to keep the old data fixed.
        """
        if self.method not in ['barnes_hut', 'exact']:
            raise ValueError("'method' must be 'barnes_hut' or 'exact'")
        if self.angle < 0.0 or self.angle > 1.0:
            raise ValueError("'angle' must be between 0.0 - 1.0")
        if self.method == 'barnes_hut' and sp.issparse(X):
            raise TypeError('A sparse matrix was passed, but dense '
                            'data is required for method="barnes_hut". Use '
                            'X.toarray() to convert to a dense numpy array if '
                            'the array is small enough for it to fit in '
                            'memory. Otherwise consider dimensionality '
                            'reduction techniques (e.g. TruncatedSVD)')
        else:
            X = check_array(X,
                            accept_sparse=['csr', 'csc', 'coo'],
                            dtype=np.float64)
        random_state = check_random_state(self.random_state)

        if self.early_exaggeration < 1.0:
            raise ValueError("early_exaggeration must be at least 1, but is "
                             "%f" % self.early_exaggeration)

        if self.n_iter < 200:
            raise ValueError("n_iter should be at least 200")

        if self.metric == "precomputed":
            if isinstance(self.init, string_types) and self.init == 'pca':
                raise ValueError("The parameter init=\"pca\" cannot be used "
                                 "with metric=\"precomputed\".")
            if X.shape[0] != X.shape[1]:
                raise ValueError("X should be a square distance matrix")
            distances = X
        else:
            if self.verbose:
                print("[t-SNE] Computing pairwise distances...")

            if self.metric == "euclidean":
                distances = pairwise_distances(X,
                                               metric=self.metric,
                                               squared=True)
            else:
                distances = pairwise_distances(X, metric=self.metric)

        if not np.all(distances >= 0):
            raise ValueError("All distances should be positive, either "
                             "the metric or precomputed distances given "
                             "as X are not correct")

        # Degrees of freedom of the Student's t-distribution. The suggestion
        # degrees_of_freedom = n_components - 1 comes from
        # "Learning a Parametric Embedding by Preserving Local Structure"
        # Laurens van der Maaten, 2009.
        degrees_of_freedom = max(self.n_components - 1.0, 1)
        n_samples = X.shape[0]
        # the number of nearest neighbors to find
        k = min(n_samples - 1, int(3. * self.perplexity + 1))

        neighbors_nn = None
        if self.method == 'barnes_hut':
            if self.verbose:
                print("[t-SNE] Computing %i nearest neighbors..." % k)
            if self.metric == 'precomputed':
                # Use the precomputed distances to find
                # the k nearest neighbors and their distances
                neighbors_nn = np.argsort(distances, axis=1)[:, :k]
            elif self.rho >= 1:
                # Find the nearest neighbors for every point
                bt = BallTree(X)
                # LvdM uses 3 * perplexity as the number of neighbors
                # And we add one to not count the data point itself
                # In the event that we have very small # of points
                # set the neighbors to n - 1
                distances_nn, neighbors_nn = bt.query(X, k=k + 1)
                neighbors_nn = neighbors_nn[:, 1:]
            elif self.rho < 1:
                # Use pyFLANN to find the nearest neighbors
                myflann = FLANN()
                testset = X
                params = myflann.build_index(testset,
                                             algorithm="autotuned",
                                             target_precision=self.rho,
                                             log_level='info')
                neighbors_nn, distances = myflann.nn_index(
                    testset, k + 1, checks=params["checks"])
                neighbors_nn = neighbors_nn[:, 1:]

            P = _joint_probabilities_nn(distances, neighbors_nn,
                                        self.perplexity, self.verbose)
        else:
            P = _joint_probabilities(distances, self.perplexity, self.verbose)
        assert np.all(np.isfinite(P)), "All probabilities should be finite"
        assert np.all(P >= 0), "All probabilities should be zero or positive"
        assert np.all(P <= 1), ("All probabilities should be less "
                                "or then equal to one")

        if isinstance(self.init, np.ndarray):
            X_embedded = self.init
        elif self.init == 'pca':
            pca = PCA(n_components=self.n_components,
                      svd_solver='randomized',
                      random_state=random_state)
            X_embedded = pca.fit_transform(X)
        elif self.init == 'random':
            X_embedded = None
        else:
            raise ValueError("Unsupported initialization scheme: %s" %
                             self.init)

        return self._tsne(P,
                          degrees_of_freedom,
                          n_samples,
                          random_state,
                          X_embedded=X_embedded,
                          neighbors=neighbors_nn,
                          skip_num_points=skip_num_points)
Beispiel #21
0
    def build_index(self, X):
        flann = FLANN()
        params = flann.build_index(X)

        return flann
Beispiel #22
0
def calcTSDF_point2plane(smplVerts, smplFaces, DCMVerts):

    # TODO
    # Improve TSDF calculation. (Current code generates noisy TSDF)

    # start = time.time()

    vertFaces = smplVerts[smplFaces]
    vecAB = vertFaces[:, 1] - vertFaces[:, 0]
    vecAC = vertFaces[:, 2] - vertFaces[:, 0]

    # Calculate vertex normals
    faceNormals = np.cross(vecAB, vecAC)
    vertNormals = np.zeros((len(smplVerts), 3))
    nomalCount = np.zeros(len(smplVerts))
    for vset, facenormal in zip(smplFaces, faceNormals):
        for j in vset:
            vertNormals[j] = (vertNormals[j] * nomalCount[j] +
                              facenormal) / (nomalCount[j] + 1)
            nomalCount[j] += 1
    norms = LA.norm(vertNormals, axis=1)
    vertNormals = vertNormals / np.array([norms, norms, norms]).T

    convVal = 32767.0
    nu = 0.03

    # Find nearest neighbor
    vertIds_list = []
    Dist_p2p_list = []
    TruncatedDist_list = []
    Mask_list = []
    for i in range(1):
        flann = FLANN()
        flann.build_index(smplVerts)
        vertIds, Dist_p2p = flann.nn_index(DCMVerts, num_neighbors=1)
        vertIds_list += [vertIds]
        Dist_p2p_list += [Dist_p2p]

        print(vertIds[0:10])

        Dist_p2p /= nu
        TruncatedDist_p2p = np.minimum(1.0, np.maximum(-1.0, Dist_p2p))

        # calculate TSDF
        D = vertNormals[:,
                        0] * smplVerts[:,
                                       0] + vertNormals[:,
                                                        1] * smplVerts[:,
                                                                       1] + vertNormals[:,
                                                                                        2] * smplVerts[:,
                                                                                                       2]
        D = -D / LA.norm(vertNormals, axis=1)
        corrNormals = vertNormals[vertIds]
        Dist = corrNormals[:,
                           0] * DCMVerts[:,
                                         0] + corrNormals[:,
                                                          1] * DCMVerts[:,
                                                                        1] + corrNormals[:,
                                                                                         2] * DCMVerts[:, 2] + D[
                                                                                             vertIds]
        Dist /= nu
        TruncatedDist = np.minimum(1.0, np.maximum(-1.0, Dist))

        # Mask = np.where(np.abs(TruncatedDist_p2p)>1.0, TruncatedDist_p2p/np.abs(TruncatedDist_p2p), TruncatedDist_p2p)
        # Mask = np.where(TruncatedDist_p2p>=1.0, 0, 1)
        # Mask = np.where((Dist_p2p / Dist)>1.5, 0, 1) * Mask
        # Mask = (np.where((Dist_p2p / Dist)>1.5, 0, 1) + Mask) - np.where((Dist_p2p / Dist)>1.5, 0, 1) * Mask
        # Mask = np.where((Dist / Dist_p2p)>1.5, 0, 1)
        Mask = TruncatedDist / np.abs(TruncatedDist)
        Mask_list += [Mask]

        # TruncatedDist = Mask*TruncatedDist - (1 - Mask)

        TruncatedDist_list += [TruncatedDist]
    # TruncatedDist = np.median(TruncatedDist_list, axis=0)
    # Mask = Mask_list[0]
    # for i in range(1, len(Mask_list)):
    #     Mask = Mask * Mask_list[i]
    # Mask = np.where(np.abs(np.sum(Mask_list, axis=0))<3, 0, 1)

    # TruncatedDist = Mask*TruncatedDist - (1 - Mask)
    # TruncatedDist = np.average(TruncatedDist_list, axis=0)

    # print("Time: {}".format(time.time() - start))
    TruncatedDist = TruncatedDist_list[0]
    return TruncatedDist * convVal
Beispiel #23
0
 def setUp(self):
     self.nn = FLANN(iterations=11)
Beispiel #24
0
 def setUp(self):
     self.nn = FLANN()
Beispiel #25
0
def __pipe_match(desc1, desc2):
    flann_ = FLANN()
    flann_.build_index(desc1, **params.__VSMANY_FLANN_PARAMS__)
    fm, fs = mc2.match_vsone(desc2, flann_, 64)
    return fm, fs
 def __init__(self, k: int):
     self.k = k
     self.flann = FLANN()
Beispiel #27
0
    def __init__(self, sbapp_list, filename, densify, sigma_noise, denoise,
                 **args):
        CanvasApp.__init__(self, **args)
        self.sbapp_list = sbapp_list
        self.sbapp_list.append(self)

        self.window_diagonal = math.sqrt(self.sizex**2 + self.sizey**2)
        self.toplevel.title(
            "Shrink the balls [{}] - densify={}x, noise={}, denoise={} ".
            format(filename, densify, sigma_noise, denoise))

        self.toplevel.bind('h', self.print_help)

        self.toplevel.bind('a', self.ma_auto_stepper)
        self.toplevel.bind('b', self.draw_all_balls)
        self.toplevel.bind('t', self.toggle_inout)
        self.toplevel.bind('h', self.toggle_ma_stage_geom)

        self.inner_mode = True
        self.draw_stage_geom_mode = 'normal'

        self.toplevel.bind('i', self.draw_topo)
        self.toplevel.bind('o', self.draw_topo)
        self.toplevel.bind('u', self.draw_topo)
        self.toplevel.bind('p', self.draw_topo)

        self.toplevel.bind('z', self.spawn_mapperapp)
        self.toplevel.bind('f', self.spawn_filterapp)
        self.toplevel.bind('s', self.spawn_shrinkhistapp)

        self.toplevel.bind('1', self.draw_normal_map_lfs)
        self.toplevel.bind('2', self.draw_normal_map_theta)
        self.toplevel.bind('3', self.draw_normal_map_lam)
        self.toplevel.bind('4', self.draw_normal_map_radii)
        self.toplevel.bind('`', self.draw_normal_map_clear)

        self.toplevel.bind('c', self.clear_overlays)
        self.canvas.pack()

        self.toplevel.bind("<Motion>", self.draw_closest_ball)
        self.toplevel.bind("<Key>", self.ma_step)
        self.toplevel.bind("<ButtonRelease>", self.ma_step)
        self.coordstext = self.canvas.create_text(self.sizex,
                                                  self.sizey,
                                                  anchor='se',
                                                  text='')
        self.ball_info_text = self.canvas.create_text(10,
                                                      self.sizey,
                                                      anchor='sw',
                                                      text='')

        self.stage_cache = {1: [], 2: [], 3: []}
        self.topo_cache = []
        self.highlight_point_cache = []
        self.highlight_cache = []
        self.poly_cache = []
        self.normalmap_cache = []

        self.mapper_window = None
        self.plotter_window = None
        self.shrinkhist_window = None

        self.kdtree = FLANN()
import json
import pickle

import h5py
from pyflann import FLANN
import pandas as pd

with open("data/jawiki_split_1/dictionary.json") as f:
    dictionary = json.load(f)

with h5py.File("model/jawiki_split_1/embeddings_all_0.v50.h5", "r") as f:
    embeddings = f["embeddings"][:, :]

flann = FLANN()
flann.build_index(embeddings)

title2id = {}
with open("data/jawiki-20190901-page.sql.tsv") as f:
    for line in f:
        l = line.strip().split("\t")
        if l[1] != "0":
            continue
        title2id[l[2]] = l[0]
id2title = {v: k for k, v in title2id.items()}


def search(query_title):
    print(f"query: {query_title}")
    query_index = dictionary["entities"]["all"].index(title2id[query_title])
    for rank, result_index in enumerate(
            flann.nn_index(embeddings[query_index], num_neighbors=10)[0][0]):
Beispiel #29
0

def stacksize(since=0.0):
    """Return stack size in bytes.
    """
    return _VmB('VmStk:') - since


if __name__ == '__main__':
    print('Profiling Memory usage for pyflann; CTRL-C to stop.')
    print('Increasing total process memory, relative to the python memory, ')
    print('implies a memory leak in the external libs.')
    print('Increasing python memory implies a memory leak in the python code.')

    h = hpy()

    while True:
        s = str(h.heap())

        print('Python: %s;    Process Total: %s' % (s[: s.find('\n')], memory()))

        X1 = rand(50000, 2)
        X2 = rand(50000, 2)
        pf = FLANN()
        nnlist = pf.nn(X1, X2)
        del X1
        del X2
        del nnlist
        del pf
        gc.collect()
Beispiel #30
0
import os
from apt_importers import *
import numpy as np
from pyflann import FLANN
from pyobb.obb import OBB
from scipy.spatial import ConvexHull
from clusters import community_structure

fl = FLANN()

#import tensorflow as tf
#import matplotlib
#matplotlib.use('Agg')

import matplotlib.pyplot as plt
dpos= get_dpos('R12_Al-Sc.epos', 'ranges.rrng')
obb = OBB.build_from_points(dpos.loc[:, ['x', 'y', 'z']].values)

Sc_pos = dpos.loc[dpos.Element == 'Sc', :]

import time

t = time.time()
pos1, pos2, _ = singleton_removal(Sc_pos, k=10, alpha=0.01)
el_time = time.time() - t

viz = False
if viz is True:
    cm = plt.get_cmap('gist_rainbow')
    cmap = np.asarray([cm(1. * i / 2) for i in range(2)])
    colors = [np.tile(cmap[0, :], (len(pos1), 1)), np.tile(cmap[1, :], (len(pos2), 1))]