Exemplo n.º 1
0
def generate_pair(Tij, sid, tid, vi, vj, Ti, Tj, idx1, idx2, gt, suffix):
    width = 640
    height = 480
    #assert width == mtgt.width
    #assert height == mtgt.height
    assert height == 480
    assert width == 640

    idx1 = idx1[0, :]
    idx2 = idx2[0, :]
    tree_tgt = NN(n_neighbors=1, algorithm='kd_tree').fit(vj.T)
    #Tij = icp(vi.T, vj.T, init_pose=Tij, dmax=0.2, tree_tgt = tree_tgt, numSamples=2000)
    Rij, tij = decompose(Tij)
    label = __get_label__(Rij, tij, Ti, Tj)
    vi = (Rij.dot(vi).T + tij).T
    #print(src, tgt, label)
    #""" Compute Dist Image for Image 1 """
    #dist1, index1 = tree[tgt].kneighbors(v1)
    #dist2, index2 = tree[src].kneighbors(v2)
    #print([np.mean(sorted(dist1)[:10000*i]) for i in range(1, 8)])
    #print([np.mean(sorted(dist2)[:10000*i]) for i in range(1, 8)])
    tree_src = NN(n_neighbors=1, algorithm='kd_tree').fit(vi.T)

    #v2 = Rij.T.dot().T - Rij.T.dot(tij); idx2 = mtgt.validIdx
    dist1, index1 = tree_tgt.kneighbors(vi.T)
    dist2, index2 = tree_src.kneighbors(vj.T)

    #import ipdb; ipdb.set_trace()
    image1 = np.zeros(width * height) + dist1.max()
    #print(dist1.max())
    image1[idx1] = dist1[:, 0]
    #image1 = (image1 - image1.min()) / (image1.max() - image1.min()) * 255.0
    image1 = np.power(image1, 0.25)
    image1 = np.reshape(image1, [height, width])
    #print('frac of hole = %f' % fracOfHole)
    """ Compute Dist Image for Image 2 """
    #print(dist2.max())
    image2 = np.zeros(width * height) + dist2.max()
    image2[idx2] = dist2[:, 0]
    #image2 = (image2 - image2.min()) / (image2.max() - image2.min()) * 255.0
    image2 = np.power(image2, 0.25)
    image2 = np.reshape(image2, [height, width])
    #print('frac of hole = %f' % fracOfHole)

    #######################################################################
    #""" Save Figure """
    #image_concat = np.concatenate((image1, image2), axis=1)
    #
    ##pathlib.Path('knn_images_%d_%d_%s' % (sid, tid, gt)).mkdir(parents=True, exist_ok=True)
    #plt.imshow(image_concat, cmap='hot')
    ##plt.colorbar()
    #if gt:
    #    plt.savefig('knn_images/%d_%d_%d_%s_gt.png' % (label, sid, tid, suffix))
    #else:
    #    plt.savefig('knn_images/%d_%d_%d_%s_recover.png' % (label, sid, tid, suffix))
    #######################################################################

    image = np.stack((image1, image2), axis=0)
    assert image.shape == (2, height, width)
    return image, label, Tij
def eval_mesh2mesh(mesh_dataset, num_views, args, RESULT, pairs, PAIR_ERRORS):
    def get_result(mesh_id):
        ori_pos = []
        pred_before_reg = []
        pred_after_reg = []
        mesh = mesh_dataset[mesh_id]
        mesh_points = mesh.pos.cpu().numpy()
        for view_id in range(num_views):
            idx = mesh_id * num_views + view_id
            res_dict = sio.loadmat(RESULT.format(idx))
            ori_pos.append(res_dict['ori_pos'])
            pred_before_reg.append(res_dict['pred_before_reg'])
            pred_after_reg.append(res_dict['pred_after_reg'])
        ori_pos = np.concatenate(ori_pos, axis=0)
        tree = NN(n_neighbors=5, n_jobs=10).fit(ori_pos)
        dists, indices = tree.kneighbors(mesh_points)
        weights = (0.01**2) / (0.01**2 + dists**2)
        weights = weights / weights.sum(-1)[:, np.newaxis]
        pred_before_reg = np.concatenate(pred_before_reg, axis=0)
        pred_after_reg = np.concatenate(pred_after_reg, axis=0)
        mesh_pred_before_reg = (pred_before_reg[indices] *
                                weights[:, :, np.newaxis]).sum(-2)
        mesh_pred_after_reg = (pred_after_reg[indices] *
                               weights[:, :, np.newaxis]).sum(-2)
        return mesh_pred_before_reg, mesh_pred_after_reg

    result_dict = {}
    pairs, gt_indices_list = pairs
    for pair, gt_indices in zip(pairs, gt_indices_list):
        pair_0, pair_1 = pair
        mesh_0 = mesh_dataset[pair_0]
        mesh_1 = mesh_dataset[pair_1]
        if result_dict.get(pair_0, None) is not None:
            pred_before_reg_0, pred_after_reg_0 = result_dict[pair_0]
        else:
            pred_before_reg_0, pred_after_reg_0 = get_result(pair_0)
            result_dict[pair_0] = (pred_before_reg_0, pred_after_reg_0)
        if result_dict.get(pair_1, None) is not None:
            pred_before_reg_1, pred_after_reg_1 = result_dict[pair_1]
        else:
            pred_before_reg_1, pred_after_reg_1 = get_result(pair_1)
            result_dict[pair_1] = (pred_before_reg_1, pred_after_reg_1)
        tree1 = NN(n_neighbors=1, n_jobs=10).fit(pred_before_reg_1)
        _, indices_before_reg = tree1.kneighbors(pred_before_reg_0)
        indices_before_reg = indices_before_reg[:, 0]
        tree1 = NN(n_neighbors=1, n_jobs=10).fit(pred_after_reg_1)
        _, indices_after_reg = tree1.kneighbors(pred_after_reg_0)
        indices_after_reg = indices_after_reg[:, 0]
        mesh_points_1 = mesh_1.pos.cpu().numpy()
        errors_before_reg = np.linalg.norm(mesh_points_1[gt_indices] -
                                           mesh_points_1[indices_before_reg],
                                           2,
                                           axis=-1)
        errors_after_reg = np.linalg.norm(mesh_points_1[gt_indices] -
                                          mesh_points_1[indices_after_reg],
                                          2,
                                          axis=-1)
        errors = np.stack([errors_before_reg, errors_after_reg], axis=-1)
        np.savetxt(PAIR_ERRORS.format(pair_0, pair_1), errors, fmt='%.6f %.6f')
Exemplo n.º 3
0
    def fit(self, x, y=None):
        x = self._check(x)
        self.d0_max, self.d0_min = x[:, 0].max(), x[:, 0].min()
        self.d1_max, self.d1_min = x[:, 1].max(), x[:, 1].min()
        self.d2_max, self.d2_min = (
            x[:, 2].max(), x[:, 2].min()) if self.dim == 3 else None, None
        self.feature_names = ['d0_square', 'd1_square']
        self.feature_names += ['d2_square'] if self.dim == 3 else []
        self.feature_names += ['square']

        if self.n_regions:
            from sklearn.cluster import KMeans
            from sklearn.neighbors import NearestNeighbors as NN

            self.cluster = KMeans(n_clusters=self.n_regions).fit(x)
            self.feature_names += ['region']

            if self.distance:
                nn = {}
                clusters = np.c_[self.cluster.predict(x)]
                for i in range(self.n_regions):
                    if np.where(clusters == i)[0].shape[0] > 1:
                        self.feature_names += [
                            'nearest_dist_to_' + str(i) + '_region'
                        ]
                        nn[i] = NN(n_neighbors=2).fit(
                            x[np.where(clusters == i)[0]])
                    else:
                        nn[i] = None
                self.nn = nn

        if y is not None:
            from sklearn.neighbors import NearestNeighbors as NN

            y = self._check(y, y=True)
            if np.unique(y).shape[0] > 15:
                raise ValueError(
                    "Y can use only for classification where nunique count less than 15"
                )

            ys = {}
            for i, val in enumerate(np.unique(y)):
                if np.where(y == val)[0].shape[0] > 1:
                    self.feature_names += [
                        'nearest_dist_to_' + str(val) + '_y'
                    ]
                    ys[i] = NN(n_neighbors=2).fit(x[np.where(y == val)[0]])
                else:
                    ys[i] = None
            self.ys = ys

        return self
Exemplo n.º 4
0
def get_param_from_hdf5(hdf5, param, cols, all_param):
    '''searches hdf5 lib for rows of intrest. uses a binary like search'''
    go_on = False
    query = hdf5.read_where('(%s == %f) & (%s == %f)' %
                            (cols[0], param[0], cols[1], param[1]))
    if len(query) > 0:
        #found something check other params
        for i in query:
            for j, col in enumerate(cols):
                if not i[col] == param[j]:
                    go_on = True
                    break
            else:
                #match!
                return i['spec']
    #not match try interp
    if len(query) == 0 or go_on:
        #found nothing get nearest neightbors
        Nei_clas = NN(len(param) * 2).fit(all_param)
        index = nu.ravel(Nei_clas.kneighbors(param)[1])
        #get spec
        spec = []
        for i in index:
            spec.append(hdf5.cols.spec[i][:, 1])
        spec = nu.asarray(spec)
        #get spec and interp
        print 'interpolating'
        return nu.vstack((hdf5.cols.spec[i][:, 0],
                          n_dim_interp(all_param[index], param, spec))).T
    return []
Exemplo n.º 5
0
def skiki_NN(hdf5, col, param):
    '''Looks for with skit learn function the len(col)*2 nearest neighbors in an hdf5 database'''
    d = np.empty((rows * batches, ))
    for i in range(batches):
        nbrs = NN(n_neighbors=len(col) * 2, algorithm='ball_tree').fit(
            h5f.root.carray[i * rows:(i + 1) * rows])
        distances, indices = nbrs.kneighbors(vec)  # put in dict?
def visualize_correspondence_smoothness(points, correspondence, max_error, min_error):
  """ Visualize smoothness of predicted correspondences
  Args:
    points: o3d.geometry.TriangleMesh, contains N points.
    correspondence: [N]
  """
  pcd = getPointCloud(points)
  model = helper.loadSMPLModels()[0]
  pcd.points = o3d.utility.Vector3dVector(np.array(pcd.points)+np.array([1.0,0,0]).reshape((1, 3)))
  v = np.array(pcd.points)
  N = v.shape[0]
  tree = NN(n_neighbors=20).fit(v)
  dists, indices = tree.kneighbors(v)
  target = model.verts[correspondence, :] # [N, 3]
  centers = target[indices, :].mean(axis=1) # [N, 3]
  diff_norms = np.square(target[indices, :] - centers[:, np.newaxis, :]).sum(axis=-1).mean(axis=1) # [N]
  diff_norms[np.where(diff_norms > max_error)] = max_error
  diff_norms[np.where(diff_norms < min_error)] = min_error
  import ipdb; ipdb.set_trace()
  max_indices = np.argsort(diff_norms)[-1000:]
  edges = getEdges(v[max_indices, :], target[max_indices, :])
  colors = (diff_norms-min_error)/(max_error - min_error)
  r = np.outer(np.ones(N), np.array([1.0, 0, 0])) # [N, 3]
  b = np.outer(np.ones(N), np.array([0., 0, 1.0])) # [N, 3]
  colors = b*(1.0-colors[:, np.newaxis])+r*(colors[:, np.newaxis])
  pcd.points = o3d.utility.Vector3dVector(colors)
  o3d.draw_geometries([pcd, getTriangleMesh(model.verts, model.faces), edges])
Exemplo n.º 7
0
def predict_frame_pixel(data, def_param=(shared_v_data, shared_u_data)):
    y, x = data

    shared_delayed_v_data = create_0d_delay_coordinates(shared_v_data[:, y, x],
                                                        ddim,
                                                        tau=32)

    delayed_patched_v_data_train = shared_delayed_v_data[:trainLength]
    u_data_train = shared_u_data[:trainLength, y, x]

    delayed_patched_v_data_test = shared_delayed_v_data[
        trainLength:trainLength + testLength]
    u_data_test = shared_u_data[trainLength:trainLength + testLength, y, x]

    flat_v_data_train = delayed_patched_v_data_train.reshape(-1, ddim)
    flat_u_data_train = u_data_train.reshape(-1, 1)

    flat_v_data_test = delayed_patched_v_data_test.reshape(-1, ddim)
    flat_u_data_test = u_data_test.reshape(-1, 1)

    neigh = NN(2, n_jobs=1)  #n_jobs=26

    neigh.fit(flat_v_data_train)

    distances, indices = neigh.kneighbors(flat_v_data_test)

    pred = (
        (flat_u_data_train[indices[:, 0]] + flat_u_data_train[indices[:, 1]]) /
        2.0).ravel()

    return pred
Exemplo n.º 8
0
def masked_feature_averaging(points, features, rotations, target_points):
  """Average point features.

  Args:
    points: [N, N_points, 3], N point clouds.
    features: [N, N_points, d], point-wise features for each point cloud.
    rotations: [N, 3, 3], rotation of each point cloud.
    target_points: [M, 3], points to average features on.

  Returns:
    mask: [X] of range [0, 6890)
    target_features: [M, d] averaged features.
  """
  N = points.shape[0]
  attached_points = []
  for i in range(N):
    points_i = points[i, :, :]
    Ri = rotations[i]
    points_i = Ri.T.dot(points_i.T).T
    attached_points.append(points_i)
  attached_points = np.concatenate(attached_points, axis=0) # [N*N_points, 3]
  attached_features = features.reshape((-1, features.shape[-1])) # [N*N_points, d]
  tree = NN(n_neighbors=5).fit(attached_points)
  dists, indices = tree.kneighbors(target_points) # [M, 5]
  extracted_features = attached_features[indices, :] # [M, 5, d]
  average_features = extracted_features.mean(axis=1) # [M, d]
  mask = np.where(dists[:, 0] < 0.02)[0]
  return mask, average_features[mask, :]
Exemplo n.º 9
0
def nearestNeighbor():
    patientsArr, patients, geneTags, patientTags, meanGeneVals = makeArray()

    neigh = NN(n_neighbors=5, radius=1.0)

    neigh.fit(patientsArr)
    #    ct = 0
    print '# patients', len(patientTags)
    print '# genes', len(geneTags)
    for i in range(len(patientTags)):
        for j in range(len(geneTags)):
            if (patients[i][j] == 'NaN') or (patients[i][j] == 'NaN\n'):
                #                ct+=1
                #                if ct>2:
                #                    sys.exit()

                #                print 'patient ', i, ' has a missing value '
                nbrs = neigh.kneighbors(patientsArr[i])
                knbrs = nbrs[1]
                #                print 'nearest neighbors are ', nbrs[0],'  ||||  ' ,nbrs[1]
                #                print knbrs
                #                print 'new value for patients [', i+1, '][', j+1,'] = '
                patients[i][j] = retMissingVal(knbrs, patientsArr, j)
                #calculate the mean of the genevalues of these patients


#                pass
# """fill in missing values"""
#                patients[i][j]=meanGeneVals[j]
            else:
                patients[i][j] = float(patients[i][j])
    fh_pickle = open('imputed_data', 'w')
    pickle.dump(patients, fh_pickle)
    fh_pickle.close()
    return patients
Exemplo n.º 10
0
	def fit(self, x, y):
		# calculate sample ratio
		class_numsamples_dict = {}
		class_samples_counts = y.value_counts()
		max_samples = class_samples_counts.iloc[0]
		class_list = y.unique()
		for c in class_list:
			num_samples = class_samples_counts[c]
			class_numsamples_dict[c] = float(max_samples-num_samples)/max_samples
		sample_ratio = []
		for index, value in y.iteritems():
			c = value
			sample_ratio.append(class_numsamples_dict[c])

		# calculate knn ratio
		x = x.reset_index()
		y = y.reset_index()
		nn = NN(n_neighbors=int(self.k))
		nn.fit(x)
		knn_ratio = []
		for index, value in x.iterrows():
			if index % 100 == 0:
				print index
			num_sameclass = 0
			distance, indices = nn.kneighbors(value)
			relevant_indices = indices[0][1:]
			for ri in relevant_indices:
				if y.loc[ri].values[1] == y.loc[index].values[1]:
					num_sameclass += 1
			knn_ratio.append(num_sameclass / float(self.k))
		plt.hist(knn_ratio)
		plt.show()
def update_correspondences(fixed_points,
                           fixed_normals,
                           moving_points,
                           moving_normals,
                           correspondences,
                           weights,
                           sigma=1e-2):
    """ Compute Nearest Neighbor Distances

  """
    fixed = np.concatenate([fixed_points, 0.001 * fixed_normals], axis=-1)
    moving = np.concatenate([moving_points, 0.001 * moving_normals], axis=-1)
    tree = NN(n_neighbors=1).fit(moving)
    dists, indices = tree.kneighbors(fixed)
    dists = dists[:, 0]
    valid_idx = np.where(dists < 0.1)[0]
    dists = dists[valid_idx]
    indices = indices[valid_idx, 0].astype(np.int32)
    new_fixed_points = np.concatenate(
        [fixed_points, fixed_points[valid_idx, :]], axis=0)
    new_fixed_normals = np.concatenate(
        [fixed_normals, fixed_normals[valid_idx, :]], axis=0)
    new_correspondences = np.concatenate([correspondences, indices], axis=0)
    sigma2 = np.ones(dists.shape[0]) * sigma * sigma
    new_weights = np.concatenate(
        [weights, sigma2 / (sigma2 + np.square(dists))], axis=0)
    fixed_dict = {
        'points': new_fixed_points,
        'normals': new_fixed_normals,
        'correspondences': new_correspondences,
        'weights': new_weights
    }

    return fixed_dict
def smpl_align(points, correspondences, weights=None, max_iter=30):
    """
  Args:
    points: [N, 3] numpy array
    correspondences: [N]
  Returns:
    indices: [N] updated correspondence
  """
    N = points.shape[0]
    model = helper.loadSMPLModels()[0]
    if weights is None:
        weights = np.ones(N)
    pcd = o3d.geometry.PointCloud()
    pcd.points = o3d.utility.Vector3dVector(points)
    pcd.estimate_normals()
    normals = np.array(pcd.normals)
    params = align(points,
                   correspondences,
                   normals,
                   model,
                   weights,
                   max_iter=max_iter)
    model.update_params(params)
    tree = NN(n_neighbors=1, n_jobs=10).fit(model.verts)
    dists, indices = tree.kneighbors(points)
    indices = indices[:, 0]
    return indices
Exemplo n.º 13
0
def buildGraphSK(MatL,MatU,rbf_sigma=None,knn=0):
	nins=NN(knn,None,metric='euclidean').fit(np.vstack((MatL,MatU)))
	W=nins.kneighbors_graph(nins._fit_X,knn,mode='distance')
	W.data=np.exp(-W.data/rbf_sigma)
	affinity_UL=W[MatL.shape[0]:,:MatL.shape[0]]#.toarray()
	affinity_UU=W[MatL.shape[0]:,MatL.shape[0]:]#.toarray()
	return affinity_UL,affinity_UU
Exemplo n.º 14
0
def create_smote_vectors(df, duplicate_ratio=1.0, proj_distance=1.0, filename='smote'):
	print 'Generating Synethic Vectors (SMOTE)...'

	# remember df column names to apply to smote df at the end
	df_colnames = df.columns

	# find the number of samples that need to be created for each class
	class_numsamples_dict = {}
	class_samples_counts = df.cl.value_counts()
	max_samples = class_samples_counts.iloc[0]
	class_list = df.cl.unique()
	for c in class_list:
		num_samples = class_samples_counts[c]
		class_numsamples_dict[c] = (max_samples-num_samples) / num_samples

	# fit the knn algorithm
	xtrain = df.drop(['cl','filepath'], axis=1)
	nn = NN(n_neighbors=2)
	nn.fit(xtrain)

	# find vector shape
	vshape = xtrain.loc[0].shape

	smote_vectors = []
	# for each instance in the dataframe
	for index, value in df.iterrows():
		# print status update
		if index % 250 == 0:
			print '%s row out of %s' % (str(index), str(df.shape[0]))

		# grab the class
		cl = value.cl

		# eliminate class and filepath to find nn
		x = value.drop(['cl','filepath'])

		# find the nearest neighbor
		distance, indices = nn.kneighbors(x)
		nn_x = df.loc[indices[0][1]].drop(['cl', 'filepath'])

		# generate class_numsamples_dict[class]*duplicate_ratio samples and write to list
		diff = x - nn_x
		for i in range(int(class_numsamples_dict[cl]*duplicate_ratio)):
			r = np.random.random(size=vshape)*proj_distance
			random_difference = diff*r
			v_smote = list(random_difference + x)
			v_smote.append(cl)
			v_smote.append('synthetic')
			v_smote.append(index)
			smote_vectors.append(v_smote)

	# turn smote list into data frame
	df_smote = pd.DataFrame(smote_vectors)
	df_smote.index = df_smote.ix[:,24]
	df_smote = df_smote.drop([24], axis=1)
	df_smote.columns = df_colnames

	# write dataframe to csv
	df_smote.to_csv(DATA_PATH + '%s.csv' % str(filename))
Exemplo n.º 15
0
def nearestN():
    X = [[125, 1], [200, 0], [70, 0], [240, 1], [114, 0], [120, 0], [264, 1],
         [85, 0], [150, 0], [90, 0]]
    #    y = [ 0, 0, 0, 0, 1, 0, 0, 1, 0,1 ]
    model = NN(n_neighbors=1, radius=1)
    model.fit(X)
    y = [98., 0.]
    print model.kneighbors(y)
Exemplo n.º 16
0
 def __init__(self, X, d_max, gan_model=None):
     self.d_max = d_max
     self.gan_model = gan_model
     self.reachability_log = str(
         d_max) if gan_model is None else "Discriminator"
     # Use clean dataset
     print("Fitting node validator")
     self.nb = NN(n_neighbors=2).fit(X)
     return None
    def predict_heads_by_graph(self, X, Y_nums, label_graph, heads, tail_space, head_space, model_path=None):
        if model_path:
            self.model.load_state_dict(torch.load(model_path))
        self.model.eval()
        self.model.to(self.device)
        tail_nbrs = NN(n_neighbors=5, algorithm='auto').fit(tail_space)
        all_input_ids = torch.tensor(X)
        logits = np.zeros([len(X), tail_space.shape[1]])
        bs=self.hypes.eval_batch_size
        for step in trange(int(len(X)/bs)-1):
            input_ids = all_input_ids[step*bs:(step+1)*bs].to(self.device)
            with torch.no_grad():
                logit = self.model(input_ids)
            logits[step*bs:(step+1)*bs] = logit.cpu().detach().numpy()

        # print('Calculating Acccuracy...')
        preds_path = self.ds_path + '/knn_preds'
        if not os.path.exists(preds_path):
            _, preds = tail_nbrs.kneighbors(logits)
            with open(preds_path, 'wb') as f:
                pkl.dump(preds, f)
        else:
            with open(preds_path, 'rb') as f:
                preds = pkl.load(f)

        head_subs = {}
        num_corrects = np.zeros(5)
        for i, pred_tails in tqdm(enumerate(preds)):
            truth = Y_nums[i]
            for pred_tail in pred_tails:
                head_neis = [int(x) for x in list(label_graph.neighbors(str(pred_tail))) if int(x) in heads]
                if i in head_subs:
                    head_subs[i] += head_neis
                else:
                    head_subs[i] = head_neis

            if i in head_subs:
                voted_list = np.array(sorted(Counter(head_subs[i]).items(), reverse=True, key=lambda kv: kv[1]))[:,0]
                head_preds = voted_list[:5]
                # head_embs = head_space[head_subs[i]]
                # head_nbrs = NN(n_neighbors=5, algorithm='auto').fit(head_embs)
                # _, head_preds = head_nbrs.kneighbors([logits[i]])
                # head_preds = head_preds[0]
                if len(set(truth) & set(head_preds)):
                    for j in range(len(head_preds)):
                        if head_preds[j] in truth:
                            num_corrects[j:] += 1
            else:
                print(i, 'does not have heads')

        for i in range(1, 6):
            num_corrects[i-1] /= i

        head_precision = np.round(num_corrects / len(X), 4)
        print('Head Precisions:', head_precision)
        return head_precision
def update_nn_correspondences(correspondences, model, raw_pc,
                              max_nn_dist, nc_3d, visualize=False):
  w = 0.001
  pcd = o3d.geometry.PointCloud()
  print(raw_pc.shape)
  pcd.points = o3d.utility.Vector3dVector(raw_pc)
  o3d.estimate_normals(pcd)
  signs = np.sign(np.array(pcd.normals).dot(np.array([1.0,0,0])))
  pcd.normals = o3d.utility.Vector3dVector(signs[:, np.newaxis]*np.array(pcd.normals))
  #o3d.io.write_point_cloud('hey.ply', pcd)
  #dirc = np.array([[a*1.0,0,0] for a in range(1000)])/100.0
  #pcd0 = o3d.geometry.PointCloud()
  #pcd0.points = o3d.utility.Vector3dVector(dirc)
  #o3d.draw_geometries([pcd, pcd0])

  raw = np.concatenate([raw_pc, w*np.array(pcd.normals)], axis=1)
  mesh = o3d.geometry.TriangleMesh()
  mesh.vertices = o3d.utility.Vector3dVector(model.verts)
  mesh.triangles = o3d.utility.Vector3iVector(model.faces)
  mesh.compute_vertex_normals()
  model_v = np.concatenate([model.verts, w*np.array(mesh.vertex_normals)], axis=1)

  """
  Compute nearest neighbor correspondences
  from current mesh vertices to point cloud.
  """
  tree = NN(n_neighbors=1).fit(model_v)
  dists, indices = tree.kneighbors(raw)
  indices = indices[:, 0]
  idx = np.where(dists < max_nn_dist)[0]
  nnmask = idx
  indices = indices[idx]
  dists = np.abs(np.sum(np.multiply(raw_pc[idx, :]-model_v[indices, :3], model_v[indices, 3:]), axis=1))
  dists = np.square(dists)
  argmax = np.argmax(dists)
  nn_vertices = raw_pc[idx, :]
  #nn_vertices = model.verts[indices[idx], :]
  nn_vec = np.array(nn_vertices).reshape(-1)
  sigma2 = np.median(dists)
  #print('sigma2=%f' % sigma2)
  nn_weights=np.ones(dists.shape) # (np.ones(dists.shape)*sigma2/(np.ones(dists.shape)*sigma2+dists)) #np.exp(-dists/(2*sigma2))
  if visualize:
    #print(dists[argmax])
    #print(nn_weights[argmax])
    #import ipdb; ipdb.set_trace()
    visualize_points([model.verts, raw_pc, connection(raw_pc[idx[argmax], :], model_v[indices[argmax], :3])])

  indices3d = correspondences['indices3d'][:nc_3d]
  target3d = correspondences['target3d'][:nc_3d].reshape((-1, 3))
  weights3d = correspondences['weights3d'][:nc_3d]
  indices3d = np.concatenate([indices3d, indices], axis=0)
  target3d = np.concatenate([target3d, raw_pc], axis=0)
  weights3d = np.concatenate([weights3d, nn_weights], axis=0)
  correspondences['indices3d'] = indices3d
  correspondences['target3d'] = target3d
  correspondences['weights3d'] = weights3d
Exemplo n.º 19
0
def ball_tree(M):
    start = time.time()
    nbrs = NN(n_neighbors=k,
              algorithm='ball_tree',
              metric='pyfunc',
              metric_params={"func": js_distance},
              leaf_size=100)
    nbrs.fit(M)
    print "ball tree elapsed time: ", time.time() - start
    return nbrs
Exemplo n.º 20
0
 def fit(self, X, y):
     self.X = np.array(X)
     self.y = np.array(y)
     self.classes = np.array(list(set(self.y)))
     if self.strategy != 'my_own':
         from sklearn.neighbors import NearestNeighbors as NN
         self.nn = NN(n_neighbors=self.k,
                      algorithm=self.strategy,
                      metric=self.metric)
         self.nn.fit(X, y)
Exemplo n.º 21
0
def predict_inner_pixel(data, def_param=(shared_v_data, shared_u_data)):
    y, x = data

    shared_delayed_v_data = create_2d_delay_coordinates(
        shared_v_data[:, y - patcv_radius:y + patcv_radius + 1,
                      x - patcv_radius:x + patcv_radius +
                      1][:, ::sigma_skip, ::sigma_skip],
        ddim,
        tau=119)
    shared_delayed_patched_v_data = np.empty(
        (ndata, 1, 1, ddim * eff_sigma * eff_sigma))
    shared_delayed_patched_v_data[:, 0, 0] = shared_delayed_v_data.reshape(
        -1, ddim * eff_sigma * eff_sigma)

    delayed_patched_v_data_train = shared_delayed_patched_v_data[:trainLength,
                                                                 0, 0]
    u_data_train = shared_u_data[:trainLength, y, x]

    delayed_patched_v_data_test = shared_delayed_patched_v_data[
        trainLength:trainLength + testLength, 0, 0]
    u_data_test = shared_u_data[trainLength:trainLength + testLength, y, x]

    flat_v_data_train = delayed_patched_v_data_train.reshape(
        -1, shared_delayed_patched_v_data.shape[3])
    flat_u_data_train = u_data_train.reshape(-1, 1)

    flat_v_data_test = delayed_patched_v_data_test.reshape(
        -1, shared_delayed_patched_v_data.shape[3])
    flat_u_data_test = u_data_test.reshape(-1, 1)

    neigh = NN(k, n_jobs=1, algorithm='kd_tree')  #n_jobs=26

    neigh.fit(flat_v_data_train)

    distances, indices = neigh.kneighbors(flat_v_data_test)

    with np.errstate(divide='ignore'):
        weights = np.divide(1.0, distances)

    infinity_mask = np.isinf(weights)
    infinity_row_mask = np.any(infinity_mask, axis=1)
    weights[infinity_row_mask] = infinity_mask[infinity_row_mask]

    denominator = np.repeat(np.sum(weights, axis=1), k).reshape((-1, k))
    weights /= denominator

    pred = 0
    for i in range(k):
        pred += np.multiply(weights[:, i, np.newaxis],
                            flat_u_data_train[indices[:, i]])
    pred = pred.ravel()

    #pred = ((flat_u_data_train[indices[:, 0]] + flat_u_data_train[indices[:, 1]])/2.0).ravel()

    return pred
Exemplo n.º 22
0
def check_link_deg(vecs, v, v_id, degrees):
    deg = degrees[str(v_id + 1)]
    # print(deg)
    nh = NN(n_neighbors=deg, algorithm='ball_tree')
    nh.fit(vecs)
    v = [list(v)]
    ng = nh.kneighbors(v, return_distance=False)[0]
    g = open('new_graph_deg.txt', 'a')
    for index in ng:
        g.write(str(v_id + 1) + ' ' + str(index + 1) + '\n')
    g.close()
Exemplo n.º 23
0
def nearest_kdtree(probes, link_points, n=10, is_filter=True):
    _n = n * 3 if is_filter else n
    nn = NN(n_neighbors=_n, algorithm="kd_tree").fit(link_points)
    knn_candidates = nn.kneighbors(probes, return_distance=False)
    if not is_filter:
        return knn_candidates
    knns = []
    for probe, cands in izip(probes, knn_candidates):
        ret = nearest_probe_force(probe, link_points[cands], n)
        knns.append(cands[ret])
    return np.array(knns)
def get_minimum_eps(X, metric='euclidean'):
    """
    Return the maximum value that should be used for minimum samples -- floor(0.15*len(X))
    Return the minimum value such that at least 1 point has K neighbors for K = 1 .. floor(0.15*len(X)) 
    """
    minPts = int(np.floor(0.15 * len(X)))
    nn = NN(minPts + 1)
    nn.fit(X)
    knn, idx = nn.kneighbors(X)
    minEps = [knn[:, i].min() for i in range(1, minPts + 1)]
    return (minPts, minEps)
Exemplo n.º 25
0
def parallel_check_link_dis(vecs, v, v_id, threshold):
    g = open('new_graph.txt', 'a')
    nhr = NN(radius=threshold)
    nhr.fit(vecs)
    v = [list(v)]
    rng = nhr.radius_neighbors(v, return_distance=False)[0]
    for index in rng:
        if v_id == index:
            continue
        # assume the graph is undirected.
        g.write(str(v_id + 1) + ' ' + str(index + 1) + '\n')
    g.close()
Exemplo n.º 26
0
def icp(points1, points2_prime, M_init, threshold=0.021):
    M = M_init
    nn = NN(n_neighbors=1).fit(points1)
    for i in xrange(1000):
        dist, idx = find_nearest(nn, points2_prime, M)
        avg_dist = np.mean(dist)
        print "Iteration %d, average distance %f" % (i, avg_dist)
        if avg_dist < threshold:
            break
        M = np.dot(np.linalg.pinv(points2_prime), points1[idx, :])
    dist, idx = find_nearest(nn, points2_prime, M)
    print "Final result, average distance %f" % (np.mean(dist))
    print "M ", M
    print "M transpose", M.T
Exemplo n.º 27
0
def icp_reweighted(source,
                   target,
                   sigma=0.01,
                   max_iter=100,
                   stopping_threshold=1e-4):
    """ If target has no normals, estimate """
    if np.array(target.normals).shape[0] == 0:
        search_param = o3d.geometry.KDTreeSearchParamHybrid(radius=0.2,
                                                            max_nn=30)
        o3d.estimate_normals(target, search_param=search_param)

    tree = NN(n_neighbors=1, algorithm='kd_tree', n_jobs=10)
    tree = tree.fit(np.array(target.points))
    n = np.array(source.points).shape[0]
    normals = np.array(target.normals)
    points = np.array(target.points)
    weights = np.zeros(n)
    errors = []
    transform = np.eye(4)

    for itr in range(max_iter):
        p = np.array(source.points)
        R, trans = gutil.unpack(transform)
        p = (R.dot(p.T) + trans.reshape((3, 1))).T
        _, indices = tree.kneighbors(p)
        """ (r X pi + pi + t - qi)^T ni """
        """( <r, (pi X ni)> + <t, ni> + <pi-qi, ni> )^2"""
        """ (<(r; t), hi> + di)^2 """
        nor = normals[indices[:, 0], :]
        q = points[indices[:, 0], :]
        d = np.sum(np.multiply(p - q, nor), axis=1)  #[n]
        h = np.zeros((n, 6))
        h[:, :3] = np.cross(p, nor)
        h[:, 3:] = nor
        weight = (sigma**2) / (np.square(d) + sigma**2)
        H = np.multiply(h.T, weight).dot(h)
        g = -h.T.dot(np.multiply(d, weight))
        delta = np.linalg.solve(H, g)
        errors = np.abs(d)
        print('iter=%d, delta=%f, mean error=%f, median error=%f' %
              (itr, np.linalg.norm(delta,
                                   2), np.mean(errors), np.median(errors)))
        if np.linalg.norm(delta, 2) < stopping_threshold:
            break
        trans = delta[3:]
        R = gutil.rodrigues(delta[:3])
        T = gutil.pack(R, trans)
        transform = T.dot(transform)

    return transform
Exemplo n.º 28
0
 def query_nearest_neighbors(self, query):
     if not hasattr(self, 'nbrs_model'):
         print('No nearest neighbor model detected, running initial model.'
               'This may take a minute.')
         self.nbrs_model = NN(n_neighbors=10, algorithm='ball_tree').fit(
             self.word_vecs_norm)
     vocab = list(self.tok2indx.keys())
     if query in vocab:
         distances, indices = self.nbrs_model.kneighbors(
             self.word_vecs_norm[self.tok2indx[query], :].reshape(-1, 1).T)
         for dist, indx in zip(distances[0], indices[0]):
             print('{} : {} \n'.format(vocab[indx], dist))
     else:
         print('query token does not exist in vocabulary')
Exemplo n.º 29
0
def parallel_check_link_deg(V, vecs, degrees, fname):
    # print('3ㅅ3')
    v = array(V[0])
    v_id = V[1][0]
    # print('vid: ', v_id+1)
    deg = degrees[str(v_id + 1)]
    # print(deg)
    nh = NN(n_neighbors=deg, algorithm='ball_tree')
    nh.fit(vecs)
    v = [list(v)]
    ng = nh.kneighbors(v, return_distance=False)[0]
    g = open(fname, 'a')
    for index in ng:
        g.write(str(v_id + 1) + ' ' + str(index + 1) + '\n')
    g.close()
Exemplo n.º 30
0
    def knn(self, vin, vout, thresh=None):

        nn = NN()
        nn.fit(vin)
        dist, ix = nn.kneighbors(vout, n_neighbors=1)

        if thresh:
            ix_out = np.array(np.where(dist.T[0] < thresh))[0]
            print(type(ix_out))
            ix_in = ix.T[0][ix_out]
        else:
            ix_out = np.arange(len(ix))
            ix_in = ix.T[0]
        print(ix_in.shape, ix_out.shape)
        return ix_in, ix_out