示例#1
0
    def get_contact(self, i, simulator):
        '''
        This function selects a proportion of indexes representing contagious 
        particles (exposed, infected, severe infected) based on their disease
        transmittion rates and retrieves the contacted particles based on their IDs. 
        With accordance whether the app was installed, particles ids and the time state
        are saved in the contact_cell.
        '''

        exposed_id = np.array(np.nonzero(self.states['exposed'])[0])
        ind_end_exp = np.random.randint(
            len(exposed_id),
            size=int(
                np.ceil(simulator.TRANSMISSION_RATE_EXPOSED *
                        len(exposed_id))))

        infected_id = np.array(np.nonzero(self.states['infected'])[0])

        sev_infected_id = np.array(np.nonzero(self.states['severe_inf'])[0])
        ind_end_sev = np.random.randint(
            len(sev_infected_id),
            size=int(
                np.ceil(simulator.TRANSMISSION_RATE_SEVERE_INFECT *
                        len(sev_infected_id))))

        isolated_id = np.array(np.nonzero(self.states['true_iso'])[0])
        ind_end_iso = np.random.randint(
            len(isolated_id),
            size=int(
                np.ceil(simulator.TRANSMISSION_RATE_QUA * len(isolated_id))))

        array = np.hstack(
            (exposed_id[ind_end_exp], infected_id,
             sev_infected_id[ind_end_sev], isolated_id[ind_end_iso])).ravel()
        if len(array) > 1:
            contacts = [
                int(i)
                for sublist in KDTree(self.x, leaf_size=2, metric="manhattan").
                query_radius(self.x[array], r=simulator.init_cont_threshold)
                for i in sublist
            ]

            contact_ids = KDTree(self.x, leaf_size=2,
                                 metric="manhattan").query_radius(
                                     self.x[array],
                                     r=simulator.init_cont_threshold,
                                     count_only=False)

            for k in range(len(contact_ids)):
                if len(contact_ids[k]) <= 0:
                    continue
                curr_ind = array[k]
                if ((self.app[curr_ind] == 1) &
                    ((self.epidemic_state[curr_ind] == 1) |
                     (self.epidemic_state[curr_ind] == 0) |
                     (self.epidemic_state[curr_ind] == 2) |
                     (self.epidemic_state[curr_ind] == 7))):
                    temp_cont = []
                    temp_time = []
                    for j in range(1, len(contact_ids[k])):
                        cont_ind = contact_ids[k][j]
                        if ((self.app[cont_ind] == 1) &
                            (((self.epidemic_state[cont_ind] == 1)) |
                             (self.epidemic_state[cont_ind] == 0) |
                             (self.epidemic_state[cont_ind] == 2) |
                             (self.epidemic_state[cont_ind] == 7))):
                            temp_cont.append(cont_ind)
                            temp_time.append(i * simulator.delta_t)
                        self.contact_cell[curr_ind, 0] = temp_cont
                        self.contact_cell[curr_ind, 1] = temp_time
            self.temp[contacts] = 1
            contact_ids = np.where((self.temp == 1)
                                   & (self.epidemic_state == 0))

            return contacts
示例#2
0
    KDTree_path_out = join(seq_path_out, 'KDTree')
    os.makedirs(seq_path_out) if not exists(seq_path_out) else None
    os.makedirs(pc_path_out) if not exists(pc_path_out) else None
    os.makedirs(KDTree_path_out) if not exists(KDTree_path_out) else None

    if int(seq_id) < 11:
        label_path = join(seq_path, 'labels')
        label_path_out = join(seq_path_out, 'labels')
        os.makedirs(label_path_out) if not exists(label_path_out) else None
        scan_list = np.sort(os.listdir(pc_path))
        for scan_id in scan_list:
            print(scan_id)
            points = DP.load_pc_kitti(join(pc_path, scan_id))
            labels = DP.load_label_kitti(join(label_path, str(scan_id[:-4]) + '.label'), remap_lut)
            sub_points, sub_labels = DP.grid_sub_sampling(points, labels=labels, grid_size=grid_size)
            search_tree = KDTree(sub_points)
            KDTree_save = join(KDTree_path_out, str(scan_id[:-4]) + '.pkl')
            np.save(join(pc_path_out, scan_id)[:-4], sub_points)
            np.save(join(label_path_out, scan_id)[:-4], sub_labels)
            with open(KDTree_save, 'wb') as f:
                pickle.dump(search_tree, f)
            if seq_id == '08':
                proj_path = join(seq_path_out, 'proj')
                os.makedirs(proj_path) if not exists(proj_path) else None
                proj_inds = np.squeeze(search_tree.query(points, return_distance=False))
                proj_inds = proj_inds.astype(np.int32)
                proj_save = join(proj_path, str(scan_id[:-4]) + '_proj.pkl')
                with open(proj_save, 'wb') as f:
                    pickle.dump([proj_inds], f)
    else:
        proj_path = join(seq_path_out, 'proj')
示例#3
0
def nearest_neighboors(x, y, z, r):
    D = np.array([x, y, z])
    D = D.T
    tree = KDTree(D, leaf_size=2500)
    dist, ind = tree.query(r, k=33)
    return dist[0], ind[0]
示例#4
0
     y = [X[h][1], X[n][1]]
     z = [X[h][2], X[n][2]]
     ax.plot_wireframe(x, y, z, color='black')
 
 ax.set_xlim(minX, maxX)
 ax.set_ylim(minY, maxY)
 ax.set_zlim(minZ, maxZ)
 ax.set_title(name + " - NaNE: " + str(num))
 ax.view_init(30, 50)
 
 # Save the figure and show
 plt.tight_layout()
 fig.savefig("tests/exp1/graphs/" + name + '/Graph - NaN: ' + str(num) + '.png')
 print(name + " graph generated! NaN:", num)
 
 tree = KDTree(X)
 fig = plt.figure()
 ax = fig.add_subplot(111, projection='3d')
 for k in ks:
     for h, i in enumerate(X):
         ns = tree.query([i], k=k+1, return_distance=False)
         for n in list(ns[0])[1:]:
             x = [X[h][0], X[n][0]]
             y = [X[h][1], X[n][1]]
             z = [X[h][2], X[n][2]]
             ax.plot_wireframe(x, y, z, color='black')
     ax.set_xlim(minX, maxX)
     ax.set_ylim(minY, maxY)
     ax.set_zlim(minZ, maxZ)
     ax.set_title(name + " - kNN: " + str(k))
     ax.view_init(30, 50)
    t2_cpu = time.process_time()
    print('APBS time: {}\n'.format(t2 - t1))
    print('APBS cpu time: {}\n'.format(t2_cpu - t1_cpu))

iface = np.zeros(len(regular_mesh.vertices))
if 'compute_iface' in masif_opts and masif_opts['compute_iface']:
    # Compute the surface of the entire complex and from that compute the interface.
    v3, f3, _, _, _ = computeMSMS(pdb_filename,\
        protonate=True)
    # Regularize the mesh
    mesh = pymesh.form_mesh(v3, f3)
    full_regular_mesh = fix_mesh(mesh, masif_opts['mesh_res'])
    # Find the vertices that are in the iface.
    v3 = full_regular_mesh.vertices
    # Find the distance between every vertex in regular_mesh.vertices and those in the full complex.
    kdt = KDTree(v3)
    d, r = kdt.query(regular_mesh.vertices)
    d = np.square(
        d)  # Square d, because this is how it was in the pyflann version.
    assert (len(d) == len(regular_mesh.vertices))
    iface_v = np.where(d >= 2.0)[0]
    iface[iface_v] = 1.0
    # Convert to ply and save.
    save_ply(out_filename1+".ply", regular_mesh.vertices,\
                        regular_mesh.faces, normals=vertex_normal, charges=vertex_charges,\
                        normalize_charges=True, hbond=vertex_hbond, hphob=vertex_hphobicity,\
                        iface=iface)

else:
    # Convert to ply and save.
    save_ply(out_filename1+".ply", regular_mesh.vertices,\
示例#6
0
def Extend(data, limit=0.04, num_neighbours=18):
    
    submission = CreateSubmission(0, data)
    hits = data[['hit_id', 'x', 'y', 'z', 'volume_id', 'layer_id', 'module_id']]
    
    df = submission.merge(hits,  on=['hit_id'], how='left')
    df = df.assign(d = np.sqrt( df.x**2 + df.y**2 + df.z**2 ))
    df = df.assign(r = np.sqrt( df.x**2 + df.y**2))
    df = df.assign(arctan2 = np.arctan2(df.z, df.r))

    for angle in range(-90,90,1):

        #df1 = df.loc[(df.arctan2>(angle-0.5)/180*np.pi) & (df.arctan2<(angle+0.5)/180*np.pi)]
        df1 = df.loc[(df.arctan2>(angle-1.5)/180*np.pi) & (df.arctan2<(angle+1.5)/180*np.pi)]

        min_num_neighbours = len(df1)
        if min_num_neighbours<3: continue

        hit_ids = df1.hit_id.values
        x,y,z = df1[['x', 'y', 'z']].values.T
        r  = (x**2 + y**2)**0.5
        r  = r/1000
        a  = np.arctan2(y,x)
        c = np.cos(a)
        s = np.sin(a)
        tree = KDTree(np.column_stack([c, s, r]), metric='euclidean')


        track_ids = list(df1.track_id.unique())
        num_track_ids = len(track_ids)
        min_length=3

        for i in range(num_track_ids):
            p = track_ids[i]
            if p==0: continue

            idx = np.where(df1.track_id==p)[0]
            if len(idx)<min_length: continue

            if angle>0:
                idx = idx[np.argsort( z[idx])]
            else:
                idx = idx[np.argsort(-z[idx])]

            ## start and end points  ##
            idx0,idx1 = idx[0],idx[-1]
            a0 = a[idx0]
            a1 = a[idx1]
            r0 = r[idx0]
            r1 = r[idx1]
            c0 = c[idx0]
            c1 = c[idx1]
            s0 = s[idx0]
            s1 = s[idx1]

            da0 = a[idx[1]] - a[idx[0]]  #direction
            dr0 = r[idx[1]] - r[idx[0]]
            direction0 = np.arctan2(dr0,da0)

            da1 = a[idx[-1]] - a[idx[-2]]
            dr1 = r[idx[-1]] - r[idx[-2]]
            direction1 = np.arctan2(dr1,da1)

            ## extend start point
            ns = tree.query([[c0, s0, r0]], k=min(num_neighbours, min_num_neighbours), return_distance=False)
            ns = np.concatenate(ns)

            direction = np.arctan2(r0 - r[ns], a0 - a[ns])
            diff = 1 - np.cos(direction - direction0)
            ns = ns[(r0 - r[ns] > 0.01) & (diff < (1 - np.cos(limit)))]
            for n in ns: df.loc[df.hit_id == hit_ids[n], 'track_id'] = p

            ## extend end point
            ns = tree.query([[c1, s1, r1]], k=min(num_neighbours, min_num_neighbours), return_distance=False)
            ns = np.concatenate(ns)

            direction = np.arctan2(r[ns] - r1, a[ns] - a1)
            diff = 1 - np.cos(direction - direction1)
            ns = ns[(r[ns] - r1 > 0.01) & (diff < (1 - np.cos(limit)))]
            for n in ns:  df.loc[df.hit_id == hit_ids[n], 'track_id'] = p

    df = df[['hit_id', 'track_id']]
    temp = data.merge(df, how='left', on='hit_id', suffixes=('_old', ''))
    data.update(temp.track_id)
    
    return data
示例#7
0
def test_perigee_period_given_apogee():
    # This test uses KNN to answer two BQL queries.
    # SIMULATE perigee_km, period_minutes GIVEN apogee_km = 500;
    # SIMULATE apogee_km, period_minutes;
    # The outputs of the query are scattered on a plot.

    rng = gu.gen_rng(1)

    # Load the satellites dataset.
    filename = os.path.join(os.path.dirname(__file__),
                            'graphical/resources/satellites.csv')
    satellites = pd.read_csv(filename)

    # Extract target columns of interest.
    D = satellites[['Apogee_km', 'Perigee_km', 'Period_minutes']].dropna()
    X = np.asarray(D)

    # Extract the nearest neighbors given A=500.
    tree = KDTree(X[:, 0].reshape(-1, 1))
    _, neighbors = tree.query([[500]], k=20)
    perigees = X[neighbors[0][:10], 1]
    periods = X[neighbors[0][:10], 2]

    # Learn the joint distribution by assuming P,T|A are independent.
    perigees_ind = rng.normal(np.mean(perigees), np.std(perigees), size=20)
    periods_ind = rng.normal(np.mean(periods), np.std(perigees), size=20)

    # Create a KNN.
    distargs = {
        'outputs': {
            'stattypes': ['numerical', 'numerical', 'numerical'],
            'statargs': [{}, {}, {}]
        }
    }
    knn = MultivariateKnn([0, 1, 2], None, distargs=distargs, K=30, rng=rng)
    for i, row in enumerate(X):
        knn.incorporate(i, dict(zip([0, 1, 2], row)))

    # Sample from the dependent KNN.
    samples_dep = knn.simulate(-1, [1, 2], {0: 500}, N=20)
    logpdfs = [knn.logpdf(-1, s, {0: 500}) for s in samples_dep]
    assert all(not np.isinf(l) for l in logpdfs)

    # Create an axis.
    _fig, ax = plt.subplots()

    # Scatter the actual neighborhood.
    ax.scatter(perigees, periods, color='b', label='Actual Satellites')

    # Plot the independent knn.
    ax.scatter(perigees_ind,
               periods_ind,
               color='r',
               alpha=.5,
               label='Independent KNN')

    # Plot the dependent knn.
    ax.scatter([s[1] for s in samples_dep], [s[2] for s in samples_dep],
               color='g',
               alpha=.5,
               label='Dependent KNN')

    # Prepare the axes.
    ax.set_title('SIMULATE Perigee_km, Period_minutes GIVEN Apogee_km = 500',
                 fontweight='bold')
    ax.set_xlabel('Perigee', fontweight='bold')
    ax.set_ylabel('Period', fontweight='bold')
    ax.grid()
    ax.legend(framealpha=0, loc='upper left')

    # Now simulate from the joint distributions of apogee, perigee.
    samples_joint = knn.simulate(-1, [0, 2], N=100)

    # Create an axis.
    _fig, ax = plt.subplots()

    # Scatter the actual data.
    ax.scatter(X[:, 0], X[:, 2], color='b', label='Actual Satellites')

    # Scatter the simulated data.
    ax.scatter([s[0] for s in samples_joint], [s[2] for s in samples_joint],
               color='r',
               label='Dependent KNN')

    # Prepare the axes.
    ax.set_title('SIMULATE period_minutes, apogee_km LIMIT 500',
                 fontweight='bold')
    ax.set_xlabel('Apogee', fontweight='bold')
    ax.set_ylabel('Period', fontweight='bold')
    ax.set_xlim([-500, 50000])
    ax.set_ylim([-100, 1800])
    ax.grid()
    ax.legend(framealpha=0, loc='upper left')

    # Reveal!
    plt.close('all')
        else:
            int_features = np.vstack((data['vert_ind'], data['class'])).T
        sub_points, sub_colors, sub_int_features = DP.grid_sub_sampling(
            points,
            features=colors,
            labels=int_features,
            grid_size=subsampling_parameter)
        sub_colors = sub_colors / 255
        if cloud_split == 'test':
            sub_vert_inds = np.squeeze(sub_int_features)
            sub_labels = None
        else:
            sub_vert_inds = sub_int_features[:, 0]
            sub_labels = sub_int_features[:, 1]

        search_tree = KDTree(sub_points, leaf_size=50)

        with open(KDTree_file, 'wb') as f:
            pickle.dump(search_tree, f)

        if cloud_split == 'test':
            write_ply(sub_ply_file, [sub_points, sub_colors, sub_vert_inds],
                      ['x', 'y', 'z', 'red', 'green', 'blue', 'vert_ind'])
        else:
            write_ply(
                sub_ply_file,
                [sub_points, sub_colors, sub_labels, sub_vert_inds],
                ['x', 'y', 'z', 'red', 'green', 'blue', 'class', 'vert_ind'])
    if isfile(proj_file):
        print('{:s} proj_file already exists\n'.format(cloud_name))
    else:
示例#9
0
    if labels_train[ii] == 1
]

#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color="b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color="r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################

### your code here!  name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
from sklearn.neighbors import KDTree
kdt = KDTree(features_train, leaf_size=30, metric='euclidean')
t0 = time()
kdt.query(features_train, k=2, return_distance=False)
print("training time:", round(time() - t0, 3), "s")

t0 = time()
print(kdt.score(features_test))

try:
    #    prettyPicture(clf, features_test, labels_test)
    prettyPicture(kdt, features_test, labels_test)
except NameError:
    pass
示例#10
0
def periodic_voronoi(structure, logfile=sys.stdout):
    """
    :param ASE.Atoms structure:
    """

    pbcc = PBCCalculator(structure.cell)

    # Make a 3x3x3 supercell
    supercell = structure.repeat((3, 3, 3))

    qhull_output = None

    logfile.write("Qvoronoi ---")

    # Run qhull
    with tempfile.NamedTemporaryFile('w',
                                     prefix = 'qvor',
                                     suffix='.in', delete = False) as infile, \
         tempfile.NamedTemporaryFile('r',
                                     prefix = 'qvor',
                                     suffix='.out',
                                     delete=True) as outfile:
        #  -- Write input file --
        infile.write("3\n")  # num of dimensions
        infile.write("%i\n" % len(supercell))  # num of points
        np.savetxt(infile, supercell.get_positions(), fmt='%.16f')
        infile.flush()

        cmdline = [
            "qvoronoi", "TI", infile.name, "FF", "Fv", "TO", outfile.name
        ]
        process = subprocess.Popen(cmdline,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        retcode = process.wait()
        logfile.write(process.stdout.read())
        if retcode != 0:
            raise RuntimeError("qvoronoi returned exit code %i" % retcode)

        qhull_output = outfile.read()

    facets_regex = re.compile(
        """
                -[ \t](?P<facetkey>f[0-9]+)  [\n]
                [ \t]*-[ ]flags: .* [\n]
                [ \t]*-[ ]normal: .* [\n]
                [ \t]*-[ ]offset: .* [\n]
                [ \t]*-[ ]center:(?P<center>([ ][\-]?[0-9]*[\.]?[0-9]*(e[-?[0-9]+)?){3}) [ \t] [\n]
                [ \t]*-[ ]vertices:(?P<vertices>([ ]p[0-9]+\(v[0-9]+\))+) [ \t]? [\n]
                [ \t]*-[ ]neighboring[ ]facets:(?P<neighbors>([ ]f[0-9]+)+)
                """, re.X | re.M)

    vertices_re = re.compile('(?<=p)[0-9]+')

    # Allocate stuff
    centers = []
    vertices = []
    facet_indexes_taken = set()

    facet_index_to_our_index = {}
    all_facets_centers = []

    # ---- Read facets
    facet_index = -1
    next_our_index = 0
    for facet_match in facets_regex.finditer(qhull_output):
        center = np.asarray(map(float, facet_match.group('center').split()))
        facet_index += 1

        all_facets_centers.append(center)

        if not pbcc.is_in_image_of_cell(center, (1, 1, 1)):
            continue

        verts = map(int, vertices_re.findall(facet_match.group('vertices')))
        verts_in_main_cell = tuple(v % len(structure) for v in verts)

        facet_indexes_taken.add(facet_index)

        centers.append(center)
        vertices.append(verts_in_main_cell)

        facet_index_to_our_index[facet_index] = next_our_index

        next_our_index += 1

        end_of_facets = facet_match.end()

    facet_count = facet_index + 1

    logfile.write("  qhull gave %i vertices; kept %i" %
                  (facet_count, len(centers)))

    # ---- Read ridges
    qhull_output_after_facets = qhull_output[end_of_facets:].strip()
    ridge_re = re.compile('^\d+ \d+ \d+(?P<verts>( \d+)+)$', re.M)

    ridges = [[int(v) for v in match.group('verts').split()]
              for match in ridge_re.finditer(qhull_output_after_facets)]
    # only take ridges with at least 1 facet in main unit cell.
    ridges = [r for r in ridges if any(f in facet_indexes_taken for f in r)]

    # shift centers back into normal unit cell
    centers -= np.sum(structure.cell, axis=0)

    nearest_center = KDTree(centers)

    ridges_in_main_cell = set()
    threw_out = 0
    for r in ridges:
        ridge_centers = np.asarray(
            [all_facets_centers[f] for f in r if f < len(all_facets_centers)])
        if not pbcc.all_in_unit_cell(ridge_centers):
            continue

        pbcc.wrap_points(ridge_centers)
        dists, ridge_centers_in_main = nearest_center.query(
            ridge_centers, return_distance=True)

        if np.any(dists > 0.00001):
            threw_out += 1
            continue

        assert ridge_centers_in_main.shape == (
            len(ridge_centers), 1), "%s" % ridge_centers_in_main.shape
        ridge_centers_in_main = ridge_centers_in_main[:, 0]

        ridges_in_main_cell.add(frozenset(ridge_centers_in_main))

    logfile.write("  Threw out %i ridges" % threw_out)

    logfile.flush()

    return centers, vertices, ridges_in_main_cell
示例#11
0
def compute(dim_map,
            dim_x,
            f,
            n_niches=1000,
            max_evals=1e5,
            params=cm.default_params,
            log_file=None,
            variation_operator=cm.variation):
    """CVT MAP-Elites
       Vassiliades V, Chatzilygeroudis K, Mouret JB. Using centroidal voronoi tessellations to scale up the multidimensional archive of phenotypic elites algorithm. IEEE Transactions on Evolutionary Computation. 2017 Aug 3;22(4):623-30.

       Format of the logfile: evals archive_size max mean median 5%_percentile, 95%_percentile

    """
    # setup the parallel processing pool
    num_cores = multiprocessing.cpu_count()
    pool = multiprocessing.Pool(num_cores)

    # create the CVT
    c = cm.cvt(n_niches, dim_map, params['cvt_samples'],
               params['cvt_use_cache'])
    kdt = KDTree(c, leaf_size=30, metric='euclidean')
    cm.__write_centroids(c)

    archive = {}  # init archive (empty)
    n_evals = 0  # number of evaluations since the beginning
    b_evals = 0  # number evaluation since the last dump

    # main loop
    while (n_evals < max_evals):
        to_evaluate = []
        # random initialization
        if len(archive) <= params['random_init'] * n_niches:
            for i in range(0, params['random_init_batch']):
                x = np.random.uniform(low=params['min'],
                                      high=params['max'],
                                      size=dim_x)
                to_evaluate += [(x, f)]
        else:  # variation/selection loop
            keys = list(archive.keys())
            # we select all the parents at the same time because randint is slow
            rand1 = np.random.randint(len(keys), size=params['batch_size'])
            rand2 = np.random.randint(len(keys), size=params['batch_size'])
            for n in range(0, params['batch_size']):
                # parent selection
                x = archive[keys[rand1[n]]]
                y = archive[keys[rand2[n]]]
                # copy & add variation
                z = variation_operator(x.x, y.x, params)
                to_evaluate += [(z, f)]
        # evaluation of the fitness for to_evaluate
        s_list = cm.parallel_eval(__evaluate, to_evaluate, pool, params)
        # natural selection
        for s in s_list:
            __add_to_archive(s, s.desc, archive, kdt)
        # count evals
        n_evals += len(to_evaluate)
        b_evals += len(to_evaluate)

        # write archive
        if b_evals >= params['dump_period'] and params['dump_period'] != -1:
            print("[{}/{}]".format(n_evals, int(max_evals)),
                  end=" ",
                  flush=True)
            cm.__save_archive(archive, n_evals)
            b_evals = 0
        # write log
        if log_file != None:
            fit_list = np.array([x.fitness for x in archive.values()])
            log_file.write("{} {} {} {} {} {} {}\n".format(
                n_evals, len(archive.keys()), fit_list.max(),
                np.mean(fit_list), np.median(fit_list),
                np.percentile(fit_list, 5), np.percentile(fit_list, 95)))
            log_file.flush()
    cm.__save_archive(archive, n_evals)
    return archive
示例#12
0
# =============================================================================
# End of Feature Creation
# =============================================================================

#Now combine the feature vectors - use only doc2vec, tfidf for docs, and LDA for KNN
all_txt_features=np.hstack([TSVD_reduced,lda_tx,docvec_array])

# =============================================================================
# Modeling
# =============================================================================
#from sklearn.neighbors import KNeighborsClassifier

#Try K-D Tree instead 
from sklearn.neighbors import KDTree
# row_nb_list - use this for indexing
kdt = KDTree(all_txt_features, leaf_size=30, metric='euclidean')
#TODO: low-priority for now, consider updating to another distance metric (cosine)

#Using KDTree to find articles similar to that of our original documents
# returned_list=[]
# for el in row_nb_list:
dist, idx = kdt.query(all_txt_features,k=50)

#TODO: if we want to get fancy, since there is document overlap in the retrieved
#results, we could have a function that would dynamically run and retrieve 
# until we get the exact amount the user specifies 

#TODO: consider adding input upfront for user to specify exactly number of 
# articles they want to have retrieved 

#pickle and save 
示例#13
0
if (ext == ".nii" or ext == ".nrrd" or ext == ".nhdr"):

    intact_volume = sitk.ReadImage(m_string3)
    intact_array = sitk.GetArrayFromImage(intact_volume)
else:
    intact_volume = RIM.dicom_series_reader(m_string3)
    intact_array = itk.GetArrayFromImage(intact_volume)
# intact_volume=RIM.dicom_series_reader(str(unicode('\\\\samba.cs.ucalgary.ca\\fatemeh.yazdanbakhsh\Documents\Data_Sets\Calgary\TBone-2015\TBoneCBCT-2015-10\L2963L','utf-8')))

intact_array_original = intact_array
##########################################################################
data = intact_array

data = np.where(data == 255.0)
X = np.asarray(data).transpose()
tree = KDTree(X, leaf_size=10)
tree.kernel_density(X[0:15], h=0.1, kernel='gaussian')
# print(tree.query_radius(X[:1], r=0.3, count_only=True))
print(tree.query_radius(X[0:15], r=0.3))
# ax.scatter(data[0], data[1],data[2], c='b', **plot_kwds)
#
# # plt.show()
#
# import matplotlib.pyplot as plt
# import pandas as pd
#
# import numpy as np
# # import scipy.cluster.hierarchy as shc
# #
# # plt.figure(figsize=(10, 7))
# # plt.title("Customer Dendograms")
示例#14
0
def generate_label_views(kzip_path,
                         ssd_version,
                         gt_type,
                         n_voting=40,
                         nb_views=2,
                         ws=(256, 128),
                         comp_window=8e3,
                         out_path=None,
                         verbose=False):
    """

    Parameters
    ----------
    kzip_path : str
    gt_type :  str
    ssd_version : str
    n_voting : int
        Number of collected nodes during BFS for majority vote (label smoothing)
    nb_views : int
    ws: Tuple[int]
    comp_window : float
    initial_run : bool
        if True, will copy SSV from default SSD to SSD with version=gt_type
    out_path : str
        If given, export mesh colored accoring to GT labels
    verbose : bool
        Print additional information

    Returns
    -------
    Tuple[np.array]
        raw, label and index views
    """
    assert gt_type in ["axgt",
                       "spgt"], "Currently only spine and axon GT is supported"
    n_labels = 3 if gt_type == "axgt" else 4
    palette = generate_palette(n_labels)
    sso_id = int(re.findall("/(\d+).", kzip_path)[0])
    sso = SuperSegmentationObject(sso_id, version=ssd_version)
    if initial_run:  # use default SSD version
        orig_sso = SuperSegmentationObject(sso_id)
        orig_sso.copy2dir(dest_dir=sso.ssv_dir)
    if not sso.attr_dict_exists:
        msg = 'Attribute dict of original SSV was not copied successfully ' \
              'to target SSD.'
        raise ValueError(msg)
    sso.load_attr_dict()
    indices, vertices, normals = sso.mesh

    # # Load mesh
    vertices = vertices.reshape((-1, 3))

    # load skeleton
    skel = load_skeleton(kzip_path)["skeleton"]
    skel_nodes = list(skel.getNodes())

    node_coords = np.array(
        [n.getCoordinate() * sso.scaling for n in skel_nodes])
    node_labels = np.array(
        [str2intconverter(n.getComment(), gt_type) for n in skel_nodes],
        dtype=np.int)
    node_coords = node_coords[(node_labels != -1)]
    node_labels = node_labels[(node_labels != -1)]

    # create KD tree from skeleton node coordinates
    tree = KDTree(node_coords)
    # transfer labels from skeleton to mesh
    dist, ind = tree.query(vertices, k=1)
    vertex_labels = node_labels[ind]  # retrieving labels of vertices
    if n_voting > 0:
        vertex_labels = bfs_smoothing(vertices,
                                      vertex_labels,
                                      n_voting=n_voting)
    color_array = palette[vertex_labels].astype(np.float32) / 255.

    # for getting vertices of individual SSO
    # np.save("/wholebrain/u/pschuber/spiness_skels/sso_%d_vertlabels.k.zip" % sso.id, vertex_labels)
    # np.save("/wholebrain/u/pschuber/spiness_skels/sso_%d_verts.k.zip" % sso.id, vertices)

    if out_path is not None:
        if gt_type == 'spgt':  #
            colors = [[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
                      [0.1, 0.1, 0.1, 1], [0.05, 0.6, 0.6, 1],
                      [0.9, 0.9, 0.9, 1]]
        else:  # dendrite, axon, soma, background
            colors = [[0.6, 0.6, 0.6, 1], [0.9, 0.2, 0.2, 1],
                      [0.1, 0.1, 0.1, 1], [0.9, 0.9, 0.9, 1]]
        colors = (np.array(colors) * 255).astype(np.uint8)
        color_array_mesh = colors[
            vertex_labels][:,
                           0]  # TODO: check why only first element, maybe colors introduces an additional axis
        write_mesh2kzip("{}/sso_{}_gtlabels.k.zip".format(out_path, sso.id),
                        sso.mesh[0],
                        sso.mesh[1],
                        sso.mesh[2],
                        color_array_mesh,
                        ply_fname="gtlabels.ply")

    # Initializing mesh object with ground truth coloring
    mo = MeshObject("neuron", indices, vertices, color=color_array)

    # use downsampled locations for view locations, only if they are close to a
    # labeled skeleton node
    locs = np.concatenate(sso.sample_locations(cache=False))
    dist, ind = tree.query(locs)
    locs = locs[dist[:, 0] < 2000]  #[::3][:5]  # TODO add as parameter

    # # # To get view locations
    # dest_folder = os.path.expanduser("~") + \
    #               "/spiness_skels/{}/view_imgs_{}/".format(sso_id, n_voting)
    # if not os.path.isdir(dest_folder):
    #     os.makedirs(dest_folder)
    # loc_text = ''
    # for i, c in enumerate(locs):
    #     loc_text += str(i) + "\t" + str((c / np.array([10, 10, 20])).astype(np.int)) +'\n' #rescalling to the voxel grid
    # with open("{}/viewcoords.txt".format(dest_folder), "w") as f:
    #     f.write(loc_text)
    # # # DEBUG PART END
    label_views, rot_mat = _render_mesh_coords(locs,
                                               mo,
                                               depth_map=False,
                                               return_rot_matrices=True,
                                               ws=ws,
                                               smooth_shade=False,
                                               nb_views=nb_views,
                                               comp_window=comp_window,
                                               verbose=verbose)
    label_views = remap_rgb_labelviews(label_views, palette)[:, None]
    index_views = render_sso_coords_index_views(sso,
                                                locs,
                                                rot_mat=rot_mat,
                                                verbose=verbose,
                                                nb_views=nb_views,
                                                ws=ws,
                                                comp_window=comp_window)
    raw_views = render_sso_coords(sso,
                                  locs,
                                  nb_views=nb_views,
                                  ws=ws,
                                  comp_window=comp_window,
                                  verbose=verbose,
                                  rot_mat=rot_mat)
    return raw_views, label_views, index_views
示例#15
0
# a weekday, then False.
# Use weekday() function in pandas, and 0 is Monday, 5 is Saturday, and 6
# is Sunday
df['If_Weekend'] = df['DateTime'].map(
    lambda x: x.weekday() == 5 or x.weekday() == 6)

# Add zip code column
# Use KDTree to map the latitude and longitude to the zipcode areas


def get_zipcode(tree, value, ziplist):
    dist, ind = tree.query(value, k=3)
    return str(int(ziplist[ind[0, 0]][0]))

ziplist = np.loadtxt("ExternalData/USZIPCODE.txt", delimiter=',')
tree = KDTree(ziplist[:, 1:], leaf_size=2)
a = np.concatenate((np.array(df['Latitude']).reshape(-1, 1),
                    np.array(df['Longitude']).reshape(-1, 1)), axis=1)

final_list = []
count = 1
for i in a:
    count += 1
    if count % 10000 == 0:
        print(count)
    final_list.append(get_zipcode(tree, i.reshape(1,-1), ziplist))
df['ZipCode'] = final_list

# Save the cleaned data file
df.to_csv("Cleaned_data_updated_zipcode.csv", index=False)
示例#16
0
import numpy as np
import cv2
from sklearn.neighbors import KDTree
from kdTreeNNeighbors import createM

orb = cv2.ORB()
frame = cv2.imread('dp.png')
kp = orb.detect(frame, None)

kp, des1 = orb.compute(frame, kp)

frame = cv2.imread('dp2.png')
kp = orb.detect(frame, None)

kp, des2 = orb.compute(frame, kp)

X = np.array(des2)
tree = KDTree(X, leaf_size=2, metric='euclidean')
for each in des1:
    dist, ind = tree.query(each, k=size(X))
    print ind
knnProfileVars = [
    "AGE", "YRS_CLIMBING", "HEIGHT", "APEINDEX", "WEIGHT", "BMI", "B_AVG",
    "S_AVG"
]
knnData = normData[knnProfileVars]
knnData.describe()

# The reason I propose this variables is because it contains their current level, and descriptive variables that they cannot change, they simply describe their current phisique and experience. In order to create groups of climbers with similar phisiques, experience and current performance, regardless of the way they train, how often they climb, how they approach improvement, what they eat, etc. because those are the variables that the climber can actually change in order to produce a change in their performance.
#
# Now lets use a KDTree (the algorithm inside the KNN algorithm) to find the nearest neighbors of a random climber, lets say the 10th climber in the list.

# In[22]:

climberID = 155
randomClimber = knnData.loc[climberID, :]
tree = KDTree(knnData)
dist, ids = tree.query([randomClimber], k=int(len(knnData.index) / 3))

closestClimbers = knnData.loc[ids[0], :]

comparison = pd.DataFrame()
comparison["SUBJECT_CLIMBER"] = randomClimber
comparison["AVERAGE_CLIMBER"] = knnData.mean()
comparison["KNN_CLOSEST_AVG"] = closestClimbers.mean()

print("After finding the", str(int(len(knnData.index) / 3)),
      "nearest neighbors we see this behavior in the data distribution")
#display(comparison)

# As you can see, the new group is conformed of climbers with a profile that is closer in similarity to the climber we care about. So now, learning the effect of particular actions on perforance makes more sense, since climbers with very similar bodies and experience intuitively would benefit from similar actions. So if a climber with a similar profile had certain benefit from an action, it would suggest that you probably should too.
#
示例#18
0
def extend_sample(model,
                  labeled,
                  unlabeled,
                  val,
                  test,
                  augmentation=None,
                  times_augmentation=1,
                  batch_size=32):
    model.train(False)

    labeled_loader = DataLoader(labeled, batch_size=batch_size, shuffle=False)
    labeled_X = create_embeddings(model, labeled_loader)
    labeled_y = labeled.get_targets()
    if augmentation is not None:
        labeled.transform = augmentation
        aug_labeled_X = []
        aug_labeled_loader = DataLoader(labeled,
                                        batch_size=batch_size,
                                        shuffle=False)
        for _ in range(times_augmentation):
            aug_labeled_X.append(create_embeddings(model, aug_labeled_loader))
            aug_labeled_y.append(labeled_y)
        aug_labeled_X = np.vstack(aug_labeled_X)
        aug_labeled_y = np.hstack(labeled_y)
    unlabeled_loader = DataLoader(unlabeled,
                                  batch_size=batch_size,
                                  shuffle=False)
    unlabeled_X = create_embeddings(model, unlabeled_loader)

    extended_samples = {}
    extended_samples[1] = {'X': labeled_X, 'y': labeled_y}
    index = KDTree(unlabeled_X)
    for k_neighbors in K_NEIGHBORS:
        extended_indices = index.query(labeled_X,
                                       return_distance=False,
                                       k=k_neighbors)
        extended_y = np.tile(labeled_y.reshape(-1, 1),
                             (1, k_neighbors)).ravel()
        extended_X = []
        for i in tqdm(extended_indices.ravel()):
            extended_X.append(np.array(index.data[i]))

        extended_X = np.stack(extended_X)
        extended_X = np.vstack([labeled_X, extended_X])
        extended_y = np.hstack([labeled_y, extended_y])
        extended_samples[k_neighbors + 1] = {'X': extended_X, 'y': extended_y}

    val_loader = DataLoader(val, batch_size=batch_size, shuffle=False)
    val_X = create_embeddings(model, val_loader)
    val_y = val.get_targets()

    test_loader = DataLoader(test, batch_size=batch_size, shuffle=False)
    test_X = create_embeddings(model, test_loader)
    test_y = test.targets.cpu().data.numpy()

    result = {
        'extended': extended_samples,
        'val': {
            'X': val_X,
            'y': val_y
        },
        'test': {
            'X': test_X,
            'y': test_y
        }
    }
    return result
示例#19
0
def icp_point_to_point_stochastic(data,
                                  ref,
                                  max_iter,
                                  RMS_threshold,
                                  sampling_limit,
                                  final_overlap=1.):
    '''
    Iterative closest point algorithm with a point to point strategy.
    Inputs :
        data = (d x N_data) matrix where "N_data" is the number of points and "d" the dimension
        ref = (d x N_ref) matrix where "N_ref" is the number of points and "d" the dimension
        max_iter = stop condition on the number of iterations
        RMS_threshold = stop condition on the distance
        sampling_limit = maximum number of points to use to compute transformations
        final_overlap = overlap parameter
    Returns :
        data_aligned = data aligned on reference cloud
        R_list = list of the (d x d) rotation matrices found at each iteration
        T_list = list of the (d x 1) translation vectors found at each iteration
        neighbors_list = At each iteration, you search the nearest neighbors of each data point in
        the ref cloud and this obtain a (1 x N_data) array of indices. This is the list of those
        arrays at each iteration
           
    '''

    # Variable for aligned data
    data_aligned = np.copy(data)

    # Initiate lists
    d, n = data.shape
    R_list = [np.eye(d)]
    T_list = [np.zeros((d, 1))]
    neighbors_list = []
    RMS_list = []

    kdtree = KDTree(ref.T)

    n_samples = min(n, sampling_limit)
    n_overlap = int(final_overlap * n_samples)

    for i in range(max_iter):
        # Sampling points
        data_idx = np.random.choice(n, n_samples, replace=False)

        # Matching points
        dist, neighbors = kdtree.query(data_aligned[:, data_idx].T, k=1)
        neighbors = neighbors.squeeze()
        if n_overlap != n_samples:
            dist = dist.squeeze()
            best_neighbors = np.argpartition(dist, n_overlap)[:n_overlap]
            neighbors = neighbors[best_neighbors]
            data_idx = data_idx[best_neighbors]

        # Estimating the best transform
        R, T = best_rigid_transform(data_aligned[:, data_idx], ref[:,
                                                                   neighbors])

        # Computing the full transform
        T = R @ T_list[-1] + T
        R = R @ R_list[-1]

        # Store everything
        T_list.append(T)
        R_list.append(R)
        neighbors_list.append(neighbors)

        # Aligne the data
        data_aligned = R @ data + T

        # Check the RMS threshold
        rms = RMS(data_aligned[:, data_idx], ref[:, neighbors].squeeze())
        RMS_list.append(rms)
        if rms < RMS_threshold:
            return data_aligned, R_list[1:], T_list[
                1:], neighbors_list, RMS_list
    return data_aligned, R_list[1:], T_list[1:], neighbors_list, RMS_list
示例#20
0
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches)
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError('subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = os.path.join(self.path, 'input_{:.3f}'.format(subsampling_parameter))
        if not os.path.exists(tree_path):
            os.makedirs(tree_path)

        # Initiate containers
        self.input_trees = {'training': [], 'validation': []}
        self.input_colors = {'training': [], 'validation': []}
        self.input_labels = {'training': [], 'validation': []}

        for i, file_path in enumerate(self.train_files):

            # Restart timer
            t0 = time.time()

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            if self.all_splits[i] == self.validation_split:
                cloud_split = 'validation'
            else:
                cloud_split = 'training'

            # Name of the input files
            KDTree_file = os.path.join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = os.path.join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if os.path.isfile(KDTree_file):
                print('\nFound KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, subsampling_parameter))

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_colors = np.vstack((data['red'], data['green'], data['blue'])).T
                sub_labels = data['class']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:
                print(
                    '\nPreparing KDTree for cloud {:s}, subsampled at {:.3f}'.format(cloud_name, subsampling_parameter))

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack((data['x'], data['y'], data['z'])).T
                colors = np.vstack((data['red'], data['green'], data['blue'])).T
                labels = data['class']

                # Subsample cloud
                sub_points, sub_colors, sub_labels = grid_subsampling(points,
                                                                      features=colors,
                                                                      labels=labels,
                                                                      sampleDl=subsampling_parameter)

                # Rescale float color and squeeze label
                sub_colors = sub_colors / 255
                sub_labels = np.squeeze(sub_labels)

                # Get chosen neighborhoods
                search_tree = KDTree(sub_points, leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                write_ply(sub_ply_file,
                          [sub_points, sub_colors, sub_labels],
                          ['x', 'y', 'z', 'red', 'green', 'blue', 'class'])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_colors]
            self.input_labels[cloud_split] += [sub_labels]

            size = sub_colors.shape[0] * 4 * 7
            print('{:.1f} MB loaded in {:.1f}s'.format(size * 1e-6, time.time() - t0))

        print('\nPreparing reprojection indices for testing')

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        i_val = 0
        for i, file_path in enumerate(self.train_files):

            # Restart timer
            t0 = time.time()

            # Get info on this cloud
            cloud_name = file_path.split('/')[-1][:-4]

            # Validation projection and labels
            if self.all_splits[i] == self.validation_split:
                proj_file = os.path.join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if os.path.isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = data['class']

                    # Compute projection inds
                    proj_inds = np.squeeze(self.input_trees['validation'][i_val].query(points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                i_val += 1
                print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))

        return
示例#21
0
    def _get_local_region(self, X_test_norm):
        """ Get local region for each test instance

        Parameters
        ----------
        X_test_norm : numpy array, shape (n_samples, n_features)
            Normalized test data

        Returns
        -------
        final_local_region_list : List of lists, shape [n_samples, [local_region]]
            Indices of training samples in the local region of each test sample
        """

        # Initialize the local region list
        local_region_list = [[]] * X_test_norm.shape[0]

        if self.local_max_features > 1.0:
            warnings.warn(
                "Local max features greater than 1.0, reducing to 1.0")
            self.local_max_features = 1.0

        if self.X_train_norm_.shape[1] * self.local_min_features < 1:
            warnings.warn(
                "Local min features smaller than 1, increasing to 1.0")
            self.local_min_features = 1.0

        # perform multiple iterations
        for _ in range(self.local_region_iterations):

            # if min and max are the same, then use all features
            if self.local_max_features == self.local_min_features:
                features = range(0, self.X_train_norm_.shape[1])
                warnings.warn("Local min features equals local max features; "
                              "use all features instead.")

            else:
                # randomly generate feature subspaces
                features = generate_bagging_indices(
                    self.random_state,
                    bootstrap_features=False,
                    n_features=self.X_train_norm_.shape[1],
                    min_features=int(self.X_train_norm_.shape[1] *
                                     self.local_min_features),
                    max_features=int(self.X_train_norm_.shape[1] *
                                     self.local_max_features))

            # build KDTree out of training subspace
            tree = KDTree(self.X_train_norm_[:, features])

            # Find neighbors of each test instance
            _, ind_arr = tree.query(X_test_norm[:, features],
                                    k=self.local_region_size)

            # add neighbors to local region list
            for j in range(X_test_norm.shape[0]):
                local_region_list[j] = local_region_list[j] + \
                                       ind_arr[j, :].tolist()

        # keep nearby points which occur at least local_region_threshold times
        final_local_region_list = [[]] * X_test_norm.shape[0]
        for j in range(X_test_norm.shape[0]):
            final_local_region_list[j] = [
                item for item, count in collections.Counter(
                    local_region_list[j]).items()
                if count > self.local_region_threshold
            ]

        return final_local_region_list
示例#22
0
        return False


f = open("test.ply", "r")
points = []

for line in f:
    words = line.split()
    if (isfloat(words[0])):
        # print(str(words[0]))
        points.append([float(words[0]), float(words[1]), float(words[2])])

f.close()

points = np.array(points)
tree = KDTree(points, leaf_size=50)

distance = []
for x in points:
    # print x[0]
    dist, ind = tree.query([x], k=50)
    distance.append(np.average(dist))
    # print 'the average distance for the first point is', np.average(dist)
distance = np.array(distance)
upper = np.percentile(distance, 80)

indToRemove = []
for x in range(len(distance)):
    if (distance[x] > upper):
        indToRemove.append(x)
# print indToRemove
示例#23
0
def prune_pins(pins):
    '''
    prunes stop list, left_turns, right_turns
    
    @param pins: list of points of interest pins to prune
    
    @return [kept_stop_signs,kept_left, kept_right]
    '''
    stops = []
    left = []
    right = []
    for x in pins:
        if x[2] == 0:
            stops.append(x)
        if x[2] == 1:
            left.append(x)
        if x[2] == 2:
            right.append(x)

    #cluster points in each class
    clustered_right = cluster_pins(right, 3)
    clustered_left = cluster_pins(left, 3)
    clustered_stop = cluster_pins(stops, 0, 1.0 / 75000.0)

    #remove turn clusters with speed > 35MPH or other
    kept_left = []
    kept_right = []
    for cluster in clustered_right:
        if cluster[3] < 35:
            kept_right.append(cluster)

    for cluster in clustered_left:
        if cluster[3] < 35:
            kept_left.append(cluster)

    #remove stop signs near tunrs
    left_array = np.array(kept_left)[:][:, 0:3]  #ignore speed dimension
    left_array[:,
               2] = left_array[:,
                               2] * 100  #scale the class so that you always get closest neighbor of desired class
    right_array = np.array(kept_right)[:][:, 0:3]  #ignore speed dimension
    right_array[:,
                2] = right_array[:,
                                 2] * 100  #scale the class so that you always get closest neighbor of desired clas
    inp = np.concatenate((left_array, right_array), axis=0)  #input

    kd_tree = KDTree(inp)
    kept_stop_signs = []
    for index, stop in enumerate(clustered_stop):
        cur_loc = np.array(stop)[0:3]
        cur_stop_coords = cur_loc[0:2]

        left_index = cur_loc.copy()
        left_index[
            2] = 100  #scale the class so that you always get closest neighbor of desired class
        right_index = cur_loc.copy()
        right_index[2] = 200

        #get closet turn in class by querying kdtree
        right_dist, close_right_index = kd_tree.query(
            right_index.reshape(1, -1))  #index of the closest right point
        left_dist, close_left_index = kd_tree.query(left_index.reshape(
            1, -1))  #index of the closest right point

        closest_left_coords = inp[close_left_index[0]][0]
        closest_right_coords = inp[close_right_index[0]][0]

        #haversine more accurate than distance generated from tree query
        closest_left = haversine(cur_stop_coords[0], cur_stop_coords[1],
                                 closest_left_coords[0],
                                 closest_left_coords[1])
        closest_right = haversine(cur_stop_coords[0], cur_stop_coords[1],
                                  closest_right_coords[0],
                                  closest_right_coords[1])

        if closest_left > 0.2 and closest_right > 0.2:  #about a tenth of a mile
            kept_stop_signs.append(clustered_stop[index])

    return [kept_stop_signs, kept_left, kept_right]
示例#24
0
文件: NPM3D.py 项目: zkwalt/KPConv
    def load_subsampled_clouds(self, subsampling_parameter):
        """
        Presubsample point clouds and load into memory (Load KDTree for neighbors searches
        """

        if 0 < subsampling_parameter <= 0.01:
            raise ValueError(
                'subsampling_parameter too low (should be over 1 cm')

        # Create path for files
        tree_path = join(self.path,
                         'input_{:.3f}'.format(subsampling_parameter))
        if not exists(tree_path):
            makedirs(tree_path)

        # List of training files
        self.train_files = np.sort([
            join(self.train_path, f) for f in listdir(self.train_path)
            if f[-4:] == '.ply'
        ])

        # Add test files
        self.test_files = np.sort([
            join(self.test_path, f) for f in listdir(self.test_path)
            if f[-4:] == '.ply'
        ])
        files = np.hstack((self.train_files, self.test_files))

        # Initiate containers
        self.input_trees = {'training': [], 'validation': [], 'test': []}
        self.input_colors = {'training': [], 'validation': [], 'test': []}
        self.input_labels = {'training': [], 'validation': []}

        # Advanced display
        N = len(files)
        progress_n = 30
        fmt_str = '[{:<' + str(progress_n) + '}] {:5.1f}%'
        print('\nPreparing KDTree for all scenes, subsampled at {:.3f}'.format(
            subsampling_parameter))

        for i, file_path in enumerate(files):

            # Restart timer
            t0 = time.time()

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]
            if 'train' in cloud_folder:
                if self.all_splits[i] == self.validation_split:
                    cloud_split = 'validation'
                else:
                    cloud_split = 'training'
            else:
                cloud_split = 'test'

            if (cloud_split != 'test'
                    and self.load_test) or (cloud_split == 'test'
                                            and not self.load_test):
                continue

            # Name of the input files
            KDTree_file = join(tree_path, '{:s}_KDTree.pkl'.format(cloud_name))
            sub_ply_file = join(tree_path, '{:s}.ply'.format(cloud_name))

            # Check if inputs have already been computed
            if isfile(KDTree_file):

                # read ply with data
                data = read_ply(sub_ply_file)
                sub_reflectance = np.expand_dims(data['reflectance'], 1)
                if cloud_split == 'test':
                    sub_labels = None
                else:
                    sub_labels = data['class']

                # Read pkl with search tree
                with open(KDTree_file, 'rb') as f:
                    search_tree = pickle.load(f)

            else:

                # Read ply file
                data = read_ply(file_path)
                points = np.vstack(
                    (data['x'], data['y'], data['z'])).astype(np.float32).T
                reflectance = np.expand_dims(data['reflectance'],
                                             1).astype(np.float32)
                if cloud_split == 'test':
                    int_features = None
                else:
                    int_features = data['class']

                # Saturate reflectance
                reflectance = np.minimum(reflectance, 50.0)

                # Subsample cloud
                sub_data = grid_subsampling(points,
                                            features=reflectance,
                                            labels=int_features,
                                            sampleDl=subsampling_parameter)

                # Rescale and saturate float reflectance
                sub_reflectance = sub_data[1] / 50.0

                # Get chosen neighborhoods
                search_tree = KDTree(sub_data[0], leaf_size=50)

                # Save KDTree
                with open(KDTree_file, 'wb') as f:
                    pickle.dump(search_tree, f)

                # Save ply
                if cloud_split == 'test':
                    sub_labels = None
                    write_ply(sub_ply_file, [sub_data[0], sub_reflectance],
                              ['x', 'y', 'z', 'reflectance'])
                else:
                    sub_labels = np.squeeze(sub_data[2])
                    write_ply(sub_ply_file,
                              [sub_data[0], sub_reflectance, sub_labels],
                              ['x', 'y', 'z', 'reflectance', 'class'])

            # Fill data containers
            self.input_trees[cloud_split] += [search_tree]
            self.input_colors[cloud_split] += [sub_reflectance]
            if cloud_split in ['training', 'validation']:
                self.input_labels[cloud_split] += [sub_labels]

            print('', end='\r')
            print(fmt_str.format('#' * (((i + 1) * progress_n) // N),
                                 100 * (i + 1) / N),
                  end='',
                  flush=True)

        # Get number of clouds
        self.num_training = len(self.input_trees['training'])
        self.num_validation = len(self.input_trees['validation'])
        self.num_test = len(self.input_trees['test'])

        # Get validation and test reprojection indices
        self.validation_proj = []
        self.validation_labels = []
        self.test_proj = []
        self.test_labels = []
        i_val = 0
        i_test = 0

        # Advanced display
        N = max(self.num_validation + self.num_test, 1)
        print('', end='\r')
        print(fmt_str.format('#' * progress_n, 100), flush=True)
        print('\nPreparing reprojection indices for validation and test')

        for i, file_path in enumerate(files):

            # get cloud name and split
            cloud_name = file_path.split('/')[-1][:-4]
            cloud_folder = file_path.split('/')[-2]

            # Validation projection and labels
            if (not self.load_test
                ) and 'train' in cloud_folder and self.all_splits[
                    i] == self.validation_split:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds, labels = pickle.load(f)
                else:

                    # Get original points
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T
                    labels = data['class']

                    # Compute projection inds
                    proj_inds = np.squeeze(
                        self.input_trees['validation'][i_val].query(
                            points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump([proj_inds, labels], f)

                self.validation_proj += [proj_inds]
                self.validation_labels += [labels]
                i_val += 1

            # Test projection
            if self.load_test and 'test' in cloud_folder:
                proj_file = join(tree_path, '{:s}_proj.pkl'.format(cloud_name))
                if isfile(proj_file):
                    with open(proj_file, 'rb') as f:
                        proj_inds = pickle.load(f)
                else:

                    # Get original points
                    data = read_ply(file_path)
                    points = np.vstack((data['x'], data['y'], data['z'])).T

                    # Compute projection inds
                    proj_inds = np.squeeze(
                        self.input_trees['test'][i_test].query(
                            points, return_distance=False))
                    proj_inds = proj_inds.astype(np.int32)

                    # Save
                    with open(proj_file, 'wb') as f:
                        pickle.dump(proj_inds, f)

                self.test_proj += [proj_inds]
                self.test_labels += [np.zeros(0, dtype=np.int32)]
                i_test += 1

            print('', end='\r')
            print(fmt_str.format('#' * (((i_val + i_test) * progress_n) // N),
                                 100 * (i_val + i_test) / N),
                  end='',
                  flush=True)

        print('\n')

        return
示例#25
0
def extrapolate_racmo_firn(base_dir,
                           EPSG,
                           MODEL,
                           tdec,
                           X,
                           Y,
                           SEARCH='BallTree',
                           NN=10,
                           POWER=2.0,
                           SIGMA=1.5,
                           VARIABLE='zs',
                           FILL_VALUE=None,
                           REFERENCE=False):

    #-- set parameters based on input model
    FIRN_FILE = {}
    if (MODEL == 'FGRN11'):
        #-- filename and directory for input FGRN11 file
        FIRN_FILE['zs'] = 'FDM_zs_FGRN11_1960-2016.nc'
        FIRN_FILE['FirnAir'] = 'FDM_FirnAir_FGRN11_1960-2016.nc'
        FIRN_DIRECTORY = ['RACMO', 'FGRN11_1960-2016']
    elif (MODEL == 'FGRN055'):
        #-- filename and directory for input FGRN055 file
        FIRN_FILE['zs'] = 'FDM_zs_FGRN055_1960-2017_interpol.nc'
        FIRN_FILE['FirnAir'] = 'FDM_FirnAir_FGRN055_1960-2017_interpol.nc'
        FIRN_DIRECTORY = ['RACMO', 'FGRN055_1960-2017']
    elif (MODEL == 'XANT27'):
        #-- filename and directory for input XANT27 file
        FIRN_FILE['zs'] = 'FDM_zs_ANT27_1979-2016.nc'
        FIRN_FILE['FirnAir'] = 'FDM_FirnAir_ANT27_1979-2016.nc'
        FIRN_DIRECTORY = ['RACMO', 'XANT27_1979-2016']
    elif (MODEL == 'ASE055'):
        #-- filename and directory for input ASE055 file
        FIRN_FILE['zs'] = 'FDM_zs_ASE055_1979-2015.nc'
        FIRN_FILE['FirnAir'] = 'FDM_FirnAir_ASE055_1979-2015.nc'
        FIRN_DIRECTORY = ['RACMO', 'ASE055_1979-2015']
    elif (MODEL == 'XPEN055'):
        #-- filename and directory for input XPEN055 file
        FIRN_FILE['zs'] = 'FDM_zs_XPEN055_1979-2016.nc'
        FIRN_FILE['FirnAir'] = 'FDM_FirnAir_XPEN055_1979-2016.nc'
        FIRN_DIRECTORY = ['RACMO', 'XPEN055_1979-2016']

    #-- Open the RACMO NetCDF file for reading
    ddir = os.path.join(base_dir, *FIRN_DIRECTORY)
    fileID = netCDF4.Dataset(os.path.join(ddir, FIRN_FILE[VARIABLE]), 'r')
    #-- Get data from each netCDF variable and remove singleton dimensions
    fd = {}
    fd[VARIABLE] = np.squeeze(fileID.variables[VARIABLE][:].copy())
    fd['lon'] = fileID.variables['lon'][:, :].copy()
    fd['lat'] = fileID.variables['lat'][:, :].copy()
    fd['time'] = fileID.variables['time'][:].copy()
    #-- invalid data value
    fv = np.float(fileID.variables[VARIABLE]._FillValue)
    #-- input shape of RACMO firn data
    nt, ny, nx = np.shape(fd[VARIABLE])
    #-- close the NetCDF files
    fileID.close()

    #-- indices of specified ice mask
    i, j = np.nonzero(fd[VARIABLE][0, :, :] != fv)

    #-- use a gaussian filter to smooth mask
    gs = {}
    gs['mask'] = scipy.ndimage.gaussian_filter(fd['mask'],
                                               SIGMA,
                                               mode='constant',
                                               cval=0)
    #-- indices of smoothed ice mask
    ii, jj = np.nonzero(np.ceil(gs['mask']) == 1.0)
    #-- use a gaussian filter to smooth each firn field
    gs[VARIABLE] = np.ma.zeros((nt, ny, nx), fill_value=fv)
    gs[VARIABLE].mask = np.ma.zeros((nt, ny, nx), dtype=np.bool)
    for t in range(nt):
        #-- replace fill values before smoothing data
        temp1 = np.zeros((ny, nx))
        #-- reference to first firn field
        if REFERENCE:
            temp1[i, j] = fd[VARIABLE][t, i, j] - fd[VARIABLE][0, i, j]
        else:
            temp1[i, j] = fd[VARIABLE][t, i, j].copy()
        #-- smooth firn field
        temp2 = scipy.ndimage.gaussian_filter(temp1,
                                              SIGMA,
                                              mode='constant',
                                              cval=0)
        #-- scale output smoothed firn field
        gs[VARIABLE][t, ii, jj] = temp2[ii, jj] / gs['mask'][ii, jj]
        #-- replace valid firn values with original
        gs[VARIABLE][t, i, j] = temp1[i, j]
        #-- set mask variables for time
        gs[VARIABLE].mask[t, :, :] = (gs['mask'] == 0.0)

    #-- convert RACMO latitude and longitude to input coordinates (EPSG)
    proj1 = pyproj.Proj("+init={0}".format(EPSG))
    proj2 = pyproj.Proj("+init=EPSG:{0:d}".format(4326))
    xg, yg = pyproj.transform(proj2, proj1, fd['lon'], fd['lat'])

    #-- construct search tree from original points
    #-- can use either BallTree or KDTree algorithms
    xy1 = np.concatenate((xg[ii, jj, None], yg[ii, jj, None]), axis=1)
    tree = BallTree(xy1) if (SEARCH == 'BallTree') else KDTree(xy1)

    #-- output interpolated arrays of firn variable (height or firn air content)
    npts = len(tdec)
    extrap_data = np.ma.zeros((npts), fill_value=fv, dtype=np.float)
    extrap_data.data[:] = extrap_data.fill_value
    extrap_data.mask = np.zeros((npts), dtype=np.bool)
    #-- type designating algorithm used (1:interpolate, 2:backward, 3:forward)
    extrap_data.interpolation = np.zeros((npts), dtype=np.uint8)

    #-- find days that can be interpolated
    if np.any((tdec >= fd['time'].min()) & (tdec < fd['time'].max())):
        #-- indices of dates for interpolated days
        ind, = np.nonzero((tdec >= fd['time'].min())
                          & (tdec < fd['time'].max()))
        #-- reduce x, y and t coordinates
        xind, yind, tind = (X[ind], Y[ind], tdec[ind])
        #-- find indices for linearly interpolating in time
        f = scipy.interpolate.interp1d(fd['time'],
                                       np.arange(nt),
                                       kind='linear')
        date_indice = f(tind).astype(np.int)
        #-- for each unique firn date
        #-- linearly interpolate in time between two firn maps
        #-- then then inverse distance weighting to extrapolate in space
        for k in np.unique(date_indice):
            kk, = np.nonzero(date_indice == k)
            count = np.count_nonzero(date_indice == k)
            #-- query the search tree to find the NN closest points
            xy2 = np.concatenate((xind[kk, None], yind[kk, None]), axis=1)
            dist, indices = tree.query(xy2, k=NN, return_distance=True)
            #-- normalized weights if POWER > 0 (typically between 1 and 3)
            #-- in the inverse distance weighting
            power_inverse_distance = dist**(-POWER)
            s = np.sum(power_inverse_distance, axis=1)
            w = power_inverse_distance / np.broadcast_to(
                s[:, None], (count, NN))
            #-- firn height or air content for times before and after tdec
            firn1 = gs[VARIABLE][k, ii, jj]
            firn2 = gs[VARIABLE][k + 1, ii, jj]
            #-- linearly interpolate to date
            dt = (tind[kk] - fd['time'][k]) / (fd['time'][k + 1] -
                                               fd['time'][k])
            #-- spatially extrapolate using inverse distance weighting
            extrap_data[kk] = (1.0-dt)*np.sum(w*firn1[indices],axis=1) + \
                dt*np.sum(w*firn2[indices], axis=1)
        #-- set interpolation type (1: interpolated in time)
        extrap_data.interpolation[ind] = 1

    #-- check if needing to extrapolate backwards in time
    count = np.count_nonzero(tdec < fd['time'].min())
    if (count > 0):
        #-- indices of dates before firn model
        ind, = np.nonzero(tdec < fd['time'].min())
        #-- query the search tree to find the NN closest points
        xy2 = np.concatenate((X[ind, None], Y[ind, None]), axis=1)
        dist, indices = tree.query(xy2, k=NN, return_distance=True)
        #-- normalized weights if POWER > 0 (typically between 1 and 3)
        #-- in the inverse distance weighting
        power_inverse_distance = dist**(-POWER)
        s = np.sum(power_inverse_distance, axis=1)
        w = power_inverse_distance / np.broadcast_to(s[:, None], (count, NN))
        #-- calculate a regression model for calculating values
        #-- read first 10 years of data to create regression model
        N = 365
        #-- spatially interpolate firn elevation or air content to coordinates
        FIRN = np.zeros((count, N))
        T = np.zeros((N))
        #-- create interpolated time series for calculating regression model
        for k in range(N):
            #-- time at k
            T[k] = gs['time'][k]
            #-- spatially extrapolate firn elevation or air content
            firn1 = fd[VARIABLE][k, ii, jj]
            FIRN[:, k] = np.sum(w * firn1[indices], axis=1)
        #-- calculate regression model
        for n, v in enumerate(ind):
            extrap_data[v] = regress_model(
                T,
                FIRN[n, :],
                tdec[v],
                ORDER=2,
                CYCLES=[0.25, 0.5, 1.0, 2.0, 4.0, 5.0],
                RELATIVE=T[0])
        #-- set interpolation type (2: extrapolated backwards in time)
        extrap_data.interpolation[ind] = 2

    #-- check if needing to extrapolate forward in time
    count = np.count_nonzero(tdec >= fd['time'].max())
    if (count > 0):
        #-- indices of dates after firn model
        ind, = np.nonzero(tdec >= fd['time'].max())
        #-- query the search tree to find the NN closest points
        xy2 = np.concatenate((X[ind, None], Y[ind, None]), axis=1)
        dist, indices = tree.query(xy2, k=NN, return_distance=True)
        #-- normalized weights if POWER > 0 (typically between 1 and 3)
        #-- in the inverse distance weighting
        power_inverse_distance = dist**(-POWER)
        s = np.sum(power_inverse_distance, axis=1)
        w = power_inverse_distance / np.broadcast_to(s[:, None], (count, NN))
        #-- calculate a regression model for calculating values
        #-- read last 10 years of data to create regression model
        N = 365
        #-- spatially interpolate firn elevation or air content to coordinates
        FIRN = np.zeros((count, N))
        T = np.zeros((N))
        #-- create interpolated time series for calculating regression model
        for k in range(N):
            kk = nt - N + k
            #-- time at k
            T[k] = fd['time'][kk]
            #-- spatially extrapolate firn elevation or air content
            firn1 = gs[VARIABLE][kk, ii, jj]
            FIRN[:, k] = np.sum(w * firn1[indices], axis=1)
        #-- calculate regression model
        for n, v in enumerate(ind):
            extrap_data[v] = regress_model(
                T,
                FIRN[n, :],
                tdec[v],
                ORDER=2,
                CYCLES=[0.25, 0.5, 1.0, 2.0, 4.0, 5.0],
                RELATIVE=T[-1])
        #-- set interpolation type (3: extrapolated forward in time)
        extrap_data.interpolation[ind] = 3

    #-- complete mask if any invalid in data
    invalid, = np.nonzero(extrap_data.data == extrap_data.fill_value)
    extrap_data.mask[invalid] = True
    #-- replace fill value if specified
    if FILL_VALUE:
        extrap_data.fill_value = FILL_VALUE
        extrap_data.data[extrap_data.mask] = extrap_data.fill_value

    #-- return the interpolated values
    return extrap_data
示例#26
0
    def update_top_activations(self, features, label, l_points, input_points, radius, max_computed=60):

        top_num = self.top_features.shape[0]

        # Compute top indice for each feature
        max_indices = np.argmax(features, axis=0)

        # get top_point neighborhoods
        for features_i, idx in enumerate(max_indices[:max_computed]):
            if features[idx, features_i] <= self.top_features[-1, features_i]:
                continue
            # if label in self.top_classes[:, features_i]:
            #     ind0 = np.where(self.top_classes[:, features_i] == label)[0][0]
            #     if features[idx, features_i] <= self.top_features[ind0, features_i]:
            #         continue
            #     elif ind0 < top_num - 1:
            #         self.top_features[ind0:-1, features_i] = self.top_features[ind0 + 1:, features_i]
            #         self.top_classes[ind0:-1, features_i] = self.top_classes[ind0 + 1:, features_i]
            #         for next_i in range(ind0 + 1, top_num):
            #             old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
            #             new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
            #             if exists(old_f):
            #                 if exists(new_f):
            #                     remove(new_f)
            #                 rename(old_f, new_f)

            # Find indice where new top should be placed
            top_i = np.where(features[idx, features_i] > self.top_features[:, features_i])[0][0]

            # # Update top features
            # if top_i < top_num - 1:
            #     self.top_features[top_i + 1:, features_i] = self.top_features[top_i:-1, features_i]
            #     self.top_features[top_i, features_i] = features[idx, features_i]
            #     self.top_classes[top_i + 1:, features_i] = self.top_classes[top_i:-1, features_i]
            #     self.top_classes[top_i, features_i] = label

            # Find in which batch lays the point
            if self.saving:

                # Get inputs
                l_features = features[:, features_i]
                point = l_points[idx, :]
                dist = np.linalg.norm(input_points - point, axis=1)
                influence = (radius - dist) / radius

                # Project response on input cloud
                if l_points.shape[0] == input_points.shape[0]:
                    responses = l_features
                else:
                    tree = KDTree(l_points, leaf_size=50)
                    nn_k = min(l_points.shape[0], 10)
                    interp_dists, interp_inds = tree.query(input_points, nn_k, return_distance=True)
                    tukeys = np.square(1 - np.square(interp_dists / radius))
                    tukeys[interp_dists > radius] = 0
                    responses = np.sum(l_features[interp_inds] * tukeys, axis=1)

                # Handle last examples
                for next_i in range(top_num - 1, top_i, -1):
                    old_f = join(self.visu_path, self.fmt_str.format(features_i, next_i))
                    new_f = join(self.visu_path, self.fmt_str.format(features_i, next_i + 1))
                    if exists(old_f):
                        if exists(new_f):
                            remove(new_f)
                        rename(old_f, new_f)

                # Save
                filename = join(self.visu_path, self.fmt_str.format(features_i, top_i + 1))
                write_ply(filename,
                          [input_points, influence, responses],
                          ['x', 'y', 'z', 'influence', 'responses'])
示例#27
0
    """ from DOI 10.1007/s10851-009-0161-2, #4 """

    dist = np.zeros((len(a), len(b)))

    for bi in prange(len(b)):

        dist[:, bi] = 1 - np.abs(np.dot(a, b[bi]))

    return dist


""" use sklearn KDTree for reduction of points for query (euclidean) """
from sklearn.neighbors import KDTree
qgrid_pos = np.copy(qgrid)
qgrid_pos[qgrid_pos[:, 0] < 0] *= -1
tree = KDTree(qgrid_pos)

# rad = ( 1 - np.cos(theta) ) / 2
# euc_rad = 4*np.sin(theta)**2

rad = np.sqrt(2 * (1 - np.cos(0.5 * theta)))
euc_rad = np.sqrt(4 * np.sin(0.25 * theta)**2)

fibre_marc = {}


def calcFibre(symHKL, yset, qgrid, phi, rad, tree, euc_rad, quatSymOps):

    cphi = np.cos(phi / 2)
    sphi = np.sin(phi / 2)
示例#28
0
    def show_deformable_kernels(self, model, dataset, deform_idx=0):

        ##########################################
        # First choose the visualized deformations
        ##########################################

        # List all deformation ops
        all_ops = [op for op in tf.get_default_graph().get_operations() if op.name.startswith('KernelPointNetwork')
                   and op.name.endswith('deformed_KP')]

        if len(all_ops) > 0:
            print('\nPossible deformed indices:')
            for i, t in enumerate(all_ops):
                print(i, ': ', t.name)
        else:
            raise ValueError('No deformable convolution found in this network')

        # Chosen deformations
        deformed_KP_tensor = all_ops[deform_idx].outputs[0]

        # Layer index
        layer_idx = int(all_ops[deform_idx].name.split('/')[1].split('_')[-1])

        # Original kernel point positions
        KP_vars = [v for v in tf.global_variables() if 'kernel_points' in v.name]
        tmp = np.array(all_ops[deform_idx].name.split('/'))
        test = []
        for v in KP_vars:
            cmp = np.array(v.name.split('/'))
            l = min(len(cmp), len(tmp))
            cmp = cmp[:l]
            tmp = tmp[:l]
            test += [np.sum(cmp == tmp)]
        chosen_KP = np.argmax(test)

        print('You chose to visualize the output of operation named: ' + all_ops[deform_idx].name)

        print('\n****************************************************************************')

        # Run model on all test examples
        # ******************************

        # Initialise iterator with test data
        self.sess.run(dataset.test_init_op)
        count = 0

        while True:
            try:

                # Run one step of the model
                t = [time.time()]
                ops = (deformed_KP_tensor,
                       model.inputs['points'],
                       model.inputs['features'],
                       model.inputs['pools'],
                       model.inputs['in_batches'],
                       KP_vars)
                stacked_deformed_KP, \
                all_points, \
                all_colors, \
                all_pools, \
                in_batches, \
                original_KPs = self.sess.run(ops, {model.dropout_prob: 1.0})
                t += [time.time()]
                count += in_batches.shape[0]

                # Stack all batches
                max_ind = np.max(in_batches)
                stacked_batches = []
                for b_i, b in enumerate(in_batches):
                    stacked_batches += [b[b < max_ind - 0.5] * 0 + b_i]
                stacked_batches = np.hstack(stacked_batches)

                # Find batches at wanted layer
                for l in range(model.config.num_layers - 1):
                    if l >= layer_idx:
                        break
                    stacked_batches = stacked_batches[all_pools[l][:, 0]]

                # Get each example and update top_activations
                in_points = []
                in_colors = []
                deformed_KP = []
                points = []
                lookuptrees = []
                for b_i, b in enumerate(in_batches):
                    b = b[b < max_ind - 0.5]
                    in_points += [all_points[0][b]]
                    deformed_KP += [stacked_deformed_KP[stacked_batches == b_i]]
                    points += [all_points[layer_idx][stacked_batches == b_i]]
                    lookuptrees += [KDTree(points[-1])]
                    if all_colors.shape[1] == 4:
                        in_colors += [all_colors[b, 1:]]
                    else:
                        in_colors += [None]

                print('New batch size : ', len(in_batches))

                ###########################
                # Interactive visualization
                ###########################

                # Create figure for features
                fig1 = mlab.figure('Features', bgcolor=(1.0, 1.0, 1.0), size=(1280, 920))
                fig1.scene.parallel_projection = False

                # Indices
                global obj_i, point_i, plots, offsets, p_scale, show_in_p, aim_point
                p_scale = 0.03
                obj_i = 0
                point_i = 0
                plots = {}
                offsets = False
                show_in_p = 2
                aim_point = np.zeros((1, 3))

                def picker_callback(picker):
                    """ Picker callback: this get called when on pick events.
                    """
                    global plots, aim_point

                    if 'in_points' in plots:
                        if plots['in_points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
                            point_rez = \
                            plots['in_points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[0]
                            new_point_i = int(np.floor(picker.point_id / point_rez))
                            if new_point_i < len(plots['in_points'].mlab_source.points):
                                # Get closest point in the layer we are interested in
                                aim_point = plots['in_points'].mlab_source.points[new_point_i:new_point_i + 1]
                                update_scene()

                    if 'points' in plots:
                        if plots['points'].actor.actor._vtk_obj in [o._vtk_obj for o in picker.actors]:
                            point_rez = plots['points'].glyph.glyph_source.glyph_source.output.points.to_array().shape[
                                0]
                            new_point_i = int(np.floor(picker.point_id / point_rez))
                            if new_point_i < len(plots['points'].mlab_source.points):
                                # Get closest point in the layer we are interested in
                                aim_point = plots['points'].mlab_source.points[new_point_i:new_point_i + 1]
                                update_scene()

                def update_scene():
                    global plots, offsets, p_scale, show_in_p, aim_point, point_i

                    # Get the current view
                    v = mlab.view()
                    roll = mlab.roll()

                    #  clear figure
                    for key in plots.keys():
                        plots[key].remove()

                    plots = {}

                    # Plot new data feature
                    p = points[obj_i]

                    # Rescale points for visu
                    p = (p * 1.5 / model.config.in_radius)

                    # Show point cloud
                    if show_in_p <= 1:
                        plots['points'] = mlab.points3d(p[:, 0],
                                                        p[:, 1],
                                                        p[:, 2],
                                                        resolution=8,
                                                        scale_factor=p_scale,
                                                        scale_mode='none',
                                                        color=(0, 1, 1),
                                                        figure=fig1)

                    if show_in_p >= 1:

                        # Get points and colors
                        in_p = in_points[obj_i]
                        in_p = (in_p * 1.5 / model.config.in_radius)

                        # Color point cloud if possible
                        in_c = in_colors[obj_i]
                        if in_c is not None:

                            # Primitives
                            scalars = np.arange(len(in_p))  # Key point: set an integer for each point

                            # Define color table (including alpha), which must be uint8 and [0,255]
                            colors = np.hstack((in_c, np.ones_like(in_c[:, :1])))
                            colors = (colors * 255).astype(np.uint8)

                            plots['in_points'] = mlab.points3d(in_p[:, 0],
                                                               in_p[:, 1],
                                                               in_p[:, 2],
                                                               scalars,
                                                               resolution=8,
                                                               scale_factor=p_scale * 0.8,
                                                               scale_mode='none',
                                                               color=(0.8, 0.8, 0.8),
                                                               figure=fig1)
                            # plots['in_points'].module_manager.scalar_lut_manager.lut.table = colors

                        else:

                            plots['in_points'] = mlab.points3d(in_p[:, 0],
                                                               in_p[:, 1],
                                                               in_p[:, 2],
                                                               resolution=8,
                                                               scale_factor=p_scale * 0.8,
                                                               scale_mode='none',
                                                               figure=fig1)

                    # Get KP locations
                    rescaled_aim_point = aim_point * model.config.in_radius / 1.5
                    point_i = lookuptrees[obj_i].query(rescaled_aim_point, return_distance=False)[0][0]
                    if offsets:
                        KP = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
                        scals = np.ones_like(KP[:, 0])
                    else:
                        KP = points[obj_i][point_i] + original_KPs[chosen_KP]
                        scals = np.zeros_like(KP[:, 0])

                    KP = (KP * 1.5 / model.config.in_radius)

                    plots['KP'] = mlab.points3d(KP[:, 0],
                                                KP[:, 1],
                                                KP[:, 2],
                                                scals,
                                                colormap='autumn',
                                                resolution=8,
                                                scale_factor=1.2 * p_scale,
                                                scale_mode='none',
                                                vmin=0,
                                                vmax=1,
                                                figure=fig1)

                    if True:
                        plots['center'] = mlab.points3d(p[point_i, 0],
                                                        p[point_i, 1],
                                                        p[point_i, 2],
                                                        scale_factor=1.1 * p_scale,
                                                        scale_mode='none',
                                                        color=(0, 1, 0),
                                                        figure=fig1)

                        # New title
                        plots['title'] = mlab.title(str(obj_i), color=(0, 0, 0), size=0.3, height=0.01)
                        text = '<--- (press g for previous)' + 50 * ' ' + '(press h for next) --->'
                        plots['text'] = mlab.text(0.01, 0.01, text, color=(0, 0, 0), width=0.98)
                        plots['orient'] = mlab.orientation_axes()

                    # Set the saved view
                    mlab.view(*v)
                    mlab.roll(roll)

                    return

                def animate_kernel():
                    global plots, offsets, p_scale, show_in_p

                    # Get KP locations

                    KP_def = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
                    KP_def = (KP_def * 1.5 / model.config.in_radius)
                    KP_def_color = (1, 0, 0)

                    KP_rigid = points[obj_i][point_i] + original_KPs[chosen_KP]
                    KP_rigid = (KP_rigid * 1.5 / model.config.in_radius)
                    KP_rigid_color = (1, 0.7, 0)

                    if offsets:
                        t_list = np.linspace(0, 1, 150, dtype=np.float32)
                    else:
                        t_list = np.linspace(1, 0, 150, dtype=np.float32)

                    @mlab.animate(delay=10)
                    def anim():
                        for t in t_list:
                            plots['KP'].mlab_source.set(x=t * KP_def[:, 0] + (1 - t) * KP_rigid[:, 0],
                                                        y=t * KP_def[:, 1] + (1 - t) * KP_rigid[:, 1],
                                                        z=t * KP_def[:, 2] + (1 - t) * KP_rigid[:, 2],
                                                        scalars=t * np.ones_like(KP_def[:, 0]))

                            yield

                    anim()

                    return

                def keyboard_callback(vtk_obj, event):
                    global obj_i, point_i, offsets, p_scale, show_in_p

                    if vtk_obj.GetKeyCode() in ['b', 'B']:
                        p_scale /= 1.5
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['n', 'N']:
                        p_scale *= 1.5
                        update_scene()

                    if vtk_obj.GetKeyCode() in ['g', 'G']:
                        obj_i = (obj_i - 1) % len(deformed_KP)
                        point_i = 0
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['h', 'H']:
                        obj_i = (obj_i + 1) % len(deformed_KP)
                        point_i = 0
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['k', 'K']:
                        offsets = not offsets
                        animate_kernel()

                    elif vtk_obj.GetKeyCode() in ['z', 'Z']:
                        show_in_p = (show_in_p + 1) % 3
                        update_scene()

                    elif vtk_obj.GetKeyCode() in ['0']:

                        print('Saving')

                        # Find a new name
                        file_i = 0
                        file_name = 'KP_{:03d}.ply'.format(file_i)
                        files = [f for f in listdir('KP_clouds') if f.endswith('.ply')]
                        while file_name in files:
                            file_i += 1
                            file_name = 'KP_{:03d}.ply'.format(file_i)

                        KP_deform = points[obj_i][point_i] + deformed_KP[obj_i][point_i]
                        KP_normal = points[obj_i][point_i] + original_KPs[chosen_KP]

                        # Save
                        write_ply(join('KP_clouds', file_name),
                                  [in_points[obj_i], in_colors[obj_i]],
                                  ['x', 'y', 'z', 'red', 'green', 'blue'])
                        write_ply(join('KP_clouds', 'KP_{:03d}_deform.ply'.format(file_i)),
                                  [KP_deform],
                                  ['x', 'y', 'z'])
                        write_ply(join('KP_clouds', 'KP_{:03d}_normal.ply'.format(file_i)),
                                  [KP_normal],
                                  ['x', 'y', 'z'])
                        print('OK')

                    return

                # Draw a first plot
                pick_func = fig1.on_mouse_pick(picker_callback)
                pick_func.tolerance = 0.01
                update_scene()
                fig1.scene.interactor.add_observer('KeyPressEvent', keyboard_callback)
                mlab.show()




            except tf.errors.OutOfRangeError:
                break
示例#29
0
            points_target_dawnsample.normals)
        points_source_iss = iss(data=points_source_dawnsample_numpy,
                                radius=original_voxel_size * 2.5,
                                nms_radius=original_voxel_size * 10)
        points_target_iss = iss(data=points_target_dawnsample_numpy,
                                radius=original_voxel_size * 2.5,
                                nms_radius=original_voxel_size * 10)
        print('source iss shape:', points_source_iss.shape)
        print('target iss shape:', points_target_iss.shape)
        #pointCloudShow(points_source_dawnsample_numpy,points_source_dawnsample_numpy[points_source_iss])
        #pointCloudShow(points_target_dawnsample_numpy,points_target_dawnsample_numpy[points_target_iss])

        # step4  build kdtree and compute RNN
        leaf_size = 4
        radius = original_voxel_size * 4
        source_search_tree = KDTree(points_source_dawnsample_numpy, leaf_size)
        target_search_tree = KDTree(points_target_dawnsample_numpy, leaf_size)
        source_nearest_idx = source_search_tree.query_radius(
            points_source_dawnsample_numpy, radius)  # 求解每个点的最邻近点
        target_nearest_idx = target_search_tree.query_radius(
            points_target_dawnsample_numpy, radius)  # 求解每个点的最邻近点

        # step5 description:FPFH
        Bin = 5
        points_source_fpfh = np.asarray([
            describe(points_source_dawnsample_numpy,
                     points_source_dawnsample_numpy_normal, source_nearest_idx,
                     keypoint_id, radius, Bin)
            for keypoint_id in points_source_iss
        ])
        points_target_fpfh = np.asarray([
示例#30
0
    rms = np.sqrt(total_sqrms.sum() / len(total_sqrms))

    return rms


def read_pts(file_path, dim=3):

    my_file = read_ply(file_path)
    my_pts = np.vstack(
        (my_file['x'], my_file['y'], my_file['z'])) if dim == 3 else np.vstack(
            (my_file['x'], my_file['y']))

    return my_pts


if __name__ == '__main__':

    file_path_ref = "../data/Notre_Dame_Des_Champs_1.ply"
    file_path_data = "../data/NDDC_transform.ply"

    pts_ref = read_pts(file_path_ref)
    pts_data = read_pts(file_path_data)

    ref_tree = KDTree(pts_ref.T, leaf_size=10)
    idx = np.squeeze(ref_tree.query(pts_data.T, k=1, return_distance=False))

    print("RMS on whole point cloud: %f" % RMS(pts_data, pts_ref[:, idx]))
    print("RMS on 90%% point cloud: %f" % RMS(pts_data, pts_ref[:, idx], 0.9))
    print("RMS on 70%% point cloud: %f" % RMS(pts_data, pts_ref[:, idx], 0.7))
    print("RMS on 50%% point cloud: %f" % RMS(pts_data, pts_ref[:, idx], 0.5))