Example #1
0
def create_images_record(dataset_path, scale = 1.0):
    cameras, images, _ = read_model(os.path.join(dataset_path,'dense/sparse'),'.bin')
    pairs = [(cameras[images[i][3]], images[i]) for i in images]
    records = []
    for cam, img in pairs:
        image_path = os.path.join(dataset_path,'dense/images',img[4])
        if not os.path.exists(image_path):
            raise RuntimeError("Image {} not found in dataset".format(img[4]))
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        height, width, _ = image.shape 
        real_scale = [1.0, 1.0]
        if scale != 1.0:
            new_height = int(height * scale)
            new_width = int(width * scale)
            real_scale = [new_width / width,new_height / height]
            image = cv2.resize(src, (new_width,new_height))
        focals, principles  = parse_intrinsic(cam, real_scale)
        rotation = Rotation.from_quat(img[1])
        image = image.astype(np.float32)
        image /= 255.0
        records.append({
            'focals': focals,
            'principles': principles, 
            'rotation': rotation.as_dcm(),
            'translation': img[2],
            'pixels': image,
            'width': width,
            'height': height,
            'name': img[4]
        })
    return records   
Example #2
0
def extract_all_to_dir(sparse_dir, out_dir, ext='.bin'):
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    camera_dict_file = os.path.join(out_dir, 'kai_cameras.json')
    xyz_file = os.path.join(out_dir, 'kai_points.txt')
    track_file = os.path.join(out_dir, 'kai_tracks.json')
    keypoints_file = os.path.join(out_dir, 'kai_keypoints.json')

    colmap_cameras, colmap_images, colmap_points3D = read_model(
        sparse_dir, ext)

    camera_dict = parse_camera_dict(colmap_cameras, colmap_images)
    with open(camera_dict_file, 'w') as fp:
        json.dump(camera_dict, fp, indent=2, sort_keys=True)

    all_tracks, all_points, view_keypoints = parse_tracks(
        colmap_images, colmap_points3D)
    all_points = np.array(all_points)
    np.savetxt(xyz_file,
               all_points,
               header='# format: x, y, z, reproj_err, track_len, color(RGB)',
               fmt='%.6f')

    mesh = trimesh.Trimesh(vertices=all_points[:, :3].astype(np.float32),
                           vertex_colors=all_points[:, -3:].astype(np.uint8))
    mesh.export(os.path.join(out_dir, 'kai_points.ply'))

    with open(track_file, 'w') as fp:
        json.dump(all_tracks, fp)

    with open(keypoints_file, 'w') as fp:
        json.dump(view_keypoints, fp)
Example #3
0
    def auto_reconstruct(self, reference_reconstruct=None):
        if reference_reconstruct is not None:

            if not (self.folder.path / 'combined').exists():
                shutil.copytree(reference_reconstruct.path,
                                self.folder.path / 'combined')

                combined_reconstruct_folder = ColmapFolder(self.folder.path /
                                                           'combined')

                images = os.listdir(self.folder.images_path)

                for image in images:
                    shutil.copy2(self.folder.images_path / image,
                                 combined_reconstruct_folder.images_path)

            combined_reconstruct_folder = ColmapFolder(self.folder.path /
                                                       'combined')
            combined_reconstruct = Reconstruct(self.delegate,
                                               combined_reconstruct_folder)
            combined_reconstruct.sparse_reconstruct()

            _, comb_images, _ = read_write_model.read_model(
                combined_reconstruct_folder.sparse_path / '0', '.bin')

            with self.folder.geo_reg_path.open('w') as f:
                for image in comb_images.values():
                    if image.name in images:
                        f.write(
                            f"{image.name} {' '.join(map(str, image.transformation_matrix[0:3, 3]))}\n"
                        )

        self.sparse_reconstruct()
        self.dense_reconstruct()
Example #4
0
def main(args):
    cameras, images, points3D = read_model(args.input, '.bin')
    if not os.path.exists(args.image_output):
        os.makedirs(args.image_output)
    if not os.path.exists(args.model_output):
        os.makedirs(args.model_output)
    new_cameras, new_images = crop_image(cameras, images, args)
    write_model(new_cameras, new_images, points3D, args.model_output, '.bin')
Example #5
0
def main():
    import sys
    if len(sys.argv) != 3:
        print("Usage: python read_model.py "
              "path/to/model/folder/txt path/to/model/folder/bin")
        return

    print("Comparing text and binary models ...")

    path_to_model_txt_folder = sys.argv[1]
    path_to_model_bin_folder = sys.argv[2]
    cameras_txt, images_txt, points3D_txt = \
        read_model(path_to_model_txt_folder, ext=".txt")
    cameras_bin, images_bin, points3D_bin = \
        read_model(path_to_model_bin_folder, ext=".bin")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... text and binary models are equal.")
    print("Saving text model and reloading it ...")

    tmpdir = mkdtemp()
    write_model(cameras_bin, images_bin, points3D_bin, tmpdir, ext='.txt')
    cameras_txt, images_txt, points3D_txt = \
        read_model(tmpdir, ext=".txt")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... saved text and loaded models are equal.")
    print("Saving binary model and reloading it ...")

    write_model(cameras_bin, images_bin, points3D_bin, tmpdir, ext='.bin')
    cameras_bin, images_bin, points3D_bin = \
        read_model(tmpdir, ext=".bin")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... saved binary and loaded models are equal.")
def main():
    os.path.join(IMAGE_DIRECTORY, REFERNCE_IMAGE)
    cameras, images, points3D = read_model(COLMAP_SPARSE_DIRECTORY, '.bin')
    ref_image = [images[i] for i in images
                 if images[i][4] == REFERNCE_IMAGE][0]
    ref_image_pixel = get_image(ref_image[4])
    target_image = [images[i] for i in images
                    if images[i][4] == TARGET_IMAGE][0]
    target_image_pixel = get_image(target_image[4])
    source_images = [images[i] for i in images if images[i][4] in SOURCE_IMAGE]
    source_images_pixel = np.dstack([get_image(s[4]) for s in source_images])
    ref_camera = cameras[ref_image[3]]
    intrinsic = np.ones((3, 3))

    scale = get_scale()
    if ref_camera[1] == 'PINHOLE':  #COLMAP UNDISTORTED
        fx, fy, cx, cy = ref_camera[4]
        intrinsic[0, 0] = fx * scale
        intrinsic[1, 1] = fy * scale
        intrinsic[2, 0] = cx * scale
        intrinsic[2, 1] = cy * scale
    elif ref_camera[1] == 'SIMPLE_RADIAL':  #COLMAP DEFAULT
        f, cx, cy, k1 = ref_camera[4]
        intrinsic[0, 0] = f * scale
        intrinsic[1, 1] = f * scale
        intrinsic[2, 0] = cx * scale
        intrinsic[2, 1] = cy * scale
        intrinsic[
            0,
            1] = k1  # this is aspect ratio resize, so no need to update distortion

    ref_pose = get_extrinsic(ref_image[1], ref_image[2])
    target_pose = get_extrinsic(target_image[1], target_image[2])
    source_poses = np.dstack(
        [get_extrinsic(s[1], s[2]) for s in source_images])
    source_poses = np.moveaxis(source_poses, -1, 0)

    np.savez_compressed(
        OUTPUT_PATH,
        intrinsics=np.array([intrinsic]).astype(np.float32),
        src_poses=np.array([source_poses]).astype(np.float32),
        ref_pose=np.array([ref_pose]).astype(np.float32),
        tgt_pose=np.array([target_pose]).astype(np.float32),
        src_images=np.array([source_images_pixel]).astype(np.float32),
        ref_image=np.array([ref_image_pixel]).astype(np.float32),
        tgt_image=np.array([target_image_pixel]).astype(np.float32))
def main(args):
    point2d_lookup = get_keypoint_from_db(args.database)
    cameras, images, points3d = read_model(args.sparse,'.bin')
    point_len = 0
    # build 9 camera parameter
    print(images)
    exit()
    # build point data
    for point_id in points3d:
        pid, xyz, rgb, err, img_ids, kpt_ids = points3d[point_id]
        point3d_ids.append(pid)
        for i in range(len(img_ids)):
            pint2ds_data.append({
                'id': point_len,
                'point2d':point2d_lookup[img_ids[i]][kpt_ids[i]]
            })
        point_len = point_len + 1
Example #8
0
def extract_all_to_dir(mvs_dir, out_dir, ext='.bin'):
    makedirs(out_dir, exist_ok=True)

    sparse_dir = os.path.join(mvs_dir, 'sparse')
    camera_dict_file = os.path.join(out_dir, 'kai_cameras.json')
    xyz_file = os.path.join(out_dir, 'kai_points.txt')
    track_file = os.path.join(out_dir, 'kai_tracks.json')
    keypoints_file = os.path.join(out_dir, 'kai_keypoints.json')

    colmap_cameras, colmap_images, colmap_points3D = read_model(
        sparse_dir, ext)

    undistorted_img_dir = os.path.join(mvs_dir, 'images')
    posed_img_dir_link = os.path.join(out_dir, 'images')
    if os.path.exists(posed_img_dir_link):
        shutil.rmtree(posed_img_dir_link)

    resize_images_cameras(colmap_cameras, colmap_images, (900, 600),
                          undistorted_img_dir, posed_img_dir_link)

    camera_dict = parse_camera_dict(colmap_cameras, colmap_images)
    with open(camera_dict_file, 'w') as fp:
        json.dump(camera_dict, fp, indent=2, sort_keys=True)

    all_tracks, all_points, view_keypoints = parse_tracks(
        colmap_images, colmap_points3D)
    all_points = np.array(all_points)
    np.savetxt(xyz_file,
               all_points,
               header='# format: x, y, z, reproj_err, track_len, color(RGB)',
               fmt='%.6f')

    mesh = trimesh.Trimesh(vertices=all_points[:, :3].astype(np.float32),
                           vertex_colors=all_points[:, -3:].astype(np.uint8))
    mesh.export(os.path.join(out_dir, 'kai_points.ply'))

    with open(track_file, 'w') as fp:
        json.dump(all_tracks, fp)

    with open(keypoints_file, 'w') as fp:
        json.dump(view_keypoints, fp)
Example #9
0
    def __init__(self,dataset_dir, scale = 1.0):
        self.dataset_dir = dataset_dir
        self.image_dir = os.path.join(dataset_dir,'dense/images')
        self.sparse_dir = os.path.join(dataset_dir,'dense/sparse')
        if not os.path.exists(self.image_dir) or not os.path.exists(self.sparse_dir):
            raise RuntimeError('Directory dataset \'{}\'maltform!'.format(dataset_dir))
        intrinsics, extrinsics, _ = read_model(self.sparse_dir)
        self.extrinsics_index = []
        self.extrinsics = {}
        self.intrinsics = {}
        self.pixels = None
        self.pixel_loc = None
        scale_x = scale_y = 1.0
        self.image_len = 0
        for i in extrinsics:
            self.extrinsics_index.append(i) #in case extrinsic id not sort.
            extrinsic = extrinsics[i]
            self.extrinsics[i] = {
                'rotation': Rotation.from_quat(extrinsic[1]).as_matrix(),
                'translation': extrinsics[2],
                'intrinsic_id': extrinsic[3]
            }
            image, scale_x, scale_y = self.read_image(extrinsic[4])
            grid = self.get_grid(image.shape)
            image = image.reshape(-1,3)
            self.image_len = image.shape[0]
            if self.pixels is None:
                self.pixels = image
                self.pixel_loc = grid
            else:
                self.pixels = np.vstack(self.pixels,image)
                self.grid = np.vstack(self.pixel_loc,grid)

        for i in intrinsics:
            fx, fy, px, py = self.parse_camera(intrinsics[i]) 
            self.intrinsics[i] = np.array([
                [fx*scale_x,        0.0,    px*scale_x],
                [       0.0, fy*scale_y,    py*scale_y],
                [       0.0,        0.0,            1.0]
            ])
def convert_COLMAP_to_log(filename, logfile_out, input_images, formatp):
    dirname = os.path.dirname(filename)
    cameras, images, points3D = read_write_model.read_model(dirname, '.bin')
    jpg_list = glob.glob(input_images + '/*.' + formatp)
    jpg_list.sort()
    nr_of_images = len(jpg_list)

    T = []
    i_map = []
    TF = []
    i_mapF = []

    ii = 0
    for key, im in images.items():
        qvec = im[1]
        r = quat2rotmat(qvec)
        translation = im[2]
        w = np.zeros((4, 4))
        w[3, 3] = 1
        w[0:3, 0:3] = r
        w[0:3, 3] = translation
        A = matrix(w)
        T.append(A.I)
        image_name = im[4]
        matching = [i for i, s in enumerate(jpg_list) if image_name in s]
        ii = im[0]
        i_map.append([ii, matching[0], 0])
    idm = np.identity(4)
    # log file needs an entry for every input image, if image is not part of
    # the SfM bundle it will be assigned to the identity matrix
    for k in range(0, nr_of_images):
        try:
            # find the id of view nr. k
            view_id = [i for i, item in enumerate(i_map) if k == item[1]][0]
            i_mapF.append(np.array([k, k, 0.0], dtype='int'))
            TF.append(T[view_id])
        except:
            i_mapF.append(np.array([k, -1, 0.0], dtype='int'))
            TF.append(idm)
    write_SfM_log(TF, i_mapF, logfile_out)
Example #11
0
def main():
    cameras, images, points3D = read_model(MODEL_DIR, '.bin')
    focal = 0.0
    width = 0.0
    height = 0.0
    for cam_id in cameras:
        width += cameras[cam_id][2]
        height += cameras[cam_id][3]
        focal += cameras[cam_id][4][0]
    width /= len(cameras)
    height /= len(cameras)
    focal /= len(cameras)
    """
    if height > width:
        focal *= SQUARE_SIZE / height
    else:
        focal *=SQUARE_SIZE / width
    """
    print("FOCAL = ", focal)
    exit()
    images_pixels = np.zeros((len(images), SQUARE_SIZE, SQUARE_SIZE, 3),
                             dtype=np.float32)
    poses = np.zeros((len(images), 4, 4), dtype=np.float32)
    for image_id in [102]:
        img_path = os.path.join(IMAGE_DIR, images[image_id][4])
        img = plt.imread(img_path)
        img = image_square(img, 100)
        img = img.astype(np.float32)
        img /= 255.0
        images_pixels[image_id - 1, :, :] = img
        qvec = images[image_id][1]
        tvec = images[image_id][2]
        rot = Rotation.from_quat([qvec[1], qvec[2], qvec[3], qvec[0]])
        poses[image_id - 1, :3, :3] = rot.as_matrix()
        poses[image_id - 1, :3, 3] = tvec
        poses[image_id - 1, 3, 3] = 1.0
        plt.imshow(img)
        plt.show()
    #np.savez(OUTPUT_NPZ, focal=focal, poses=poses, images=images_pixels)
    """
    for i in range(n_test_batches):
        cost_batch, output_batch = test_model(i)
        prediction_batch = list(np.argmax(output_batch, axis = 1))
        #cost_batch = test_model(i)
        costs += [cost_batch]
        predictions+= prediction_batch

    return (np.mean(costs), predictions)
    #return (np.mean(costs))


# In[11]:
#get_output = theano.function([sym_batch_index], net_output)

read_model(l_dec_x, model_filename_read)

if do_train_model:
    # Training Loop
    for epoch in range(num_epochs):
        start = time.time()
        
        #shuffle train data, train model and test model
        s1 = np.arange(train_x.shape[0])
        np.random.shuffle(s1)
        sh_x_train.set_value(train_x[s1])
        sh_y_train.set_value(train_y_one_hot[s1])
        #sh_y_target_train.set_value(train_y[s])
        train_cost, predictions_train = train_epoch(lr)
        train_acc = np.sum(predictions_train==train_y[s1])/train_x.shape[0]
        
def main(args):
    start_timer = timer()
    extrinsics = {}
    model_extension = ''
    if detect_model(args.input, '.bin'):
        output_extension = '.bin'
    elif detect_model(args.input, '.txt'):
        output_extension = '.txt'
    else:
        raise RuntimeError(
            'Cannot find colmap sparse model. please check input path')
    cameras, images, points3D = read_model(args.input, output_extension)
    num_arc = 0
    num_ring = 0
    # read colmap images and get rotation and translation
    for image_id in images:
        arc, ring = parse_filename(args.pattern, images[image_id][4])
        if arc not in extrinsics:
            extrinsics[arc] = {}
        qvec = images[image_id][1]
        rotation = Rotation.from_quat([qvec[1], qvec[2], qvec[3], qvec[0]])
        extrinsics[arc][ring] = {
            'rotation': rotation.as_matrix(),
            'translation': images[image_id][2].copy()
        }
        #find number of arc and number of ring
        if arc + 1 > num_arc:
            num_arc = arc + 1
        if ring + 1 > num_ring:
            num_ring = ring + 1

    # find camera position in most bottom ring
    base_ring = np.zeros((num_ring, 3))
    for i in range(num_ring):
        base_ring[i] = camera_position(extrinsics[0][i])
    mean_shift = np.mean(base_ring, axis=0)

    # update extrinsic (translation only)
    for i in range(num_arc):
        for j in range(num_ring):
            extrinsics[i][j]['translation'] -= np.matmul(
                extrinsics[i][j]['rotation'], mean_shift)

    #we use only  most bottom camera to optimize
    rotation_matrix = np.zeros((num_ring, 3, 3))
    translation_vector = np.zeros((num_ring, 3))
    for i in range(num_ring):
        rotation_matrix[i, :, :] = extrinsics[0][i]['rotation']
        translation_vector[i, :] = extrinsics[0][i]['translation']

    # optimize to find rotaiton corrector
    rotation_corrector = find_rotation_corrector(rotation_matrix,
                                                 translation_vector)

    # update extrinsic (rotation only)
    for i in range(num_arc):
        for j in range(num_ring):
            extrinsics[i][j]['rotation'] = np.matmul(
                extrinsics[i][j]['rotation'], rotation_corrector)

    # update colmap's images
    images_old = images
    images = {}
    for image_id in images_old:
        arc, ring = parse_filename(args.pattern, images_old[image_id][4])
        rotation = Rotation.from_matrix(extrinsics[arc][ring]['rotation'])
        q = rotation.as_quat()
        qvec = np.array([q[3], q[0], q[1], q[2]])
        images[image_id] = Image(id=image_id,
                                 qvec=qvec,
                                 tvec=extrinsics[arc][ring]['translation'],
                                 camera_id=images_old[image_id][3],
                                 name=images_old[image_id][4],
                                 xys=images_old[image_id][5],
                                 point3D_ids=images_old[image_id][6])

    #update colmap's  point3d
    points3D_old = points3D
    points3D = {}
    for point_id in points3D_old:
        points3D[point_id] = Point3D(
            id=points3D_old[point_id][0],
            xyz=np.matmul(rotation_corrector.T,
                          points3D_old[point_id][1] + mean_shift),
            rgb=points3D_old[point_id][2],
            error=points3D_old[point_id][3],
            image_ids=points3D_old[point_id][4],
            point2D_idxs=points3D_old[point_id][5])

    #write to binary output
    if not os.path.exists(args.output):
        os.mkdir(args.output)
    write_model(cameras, images, points3D, args.output, output_extension)
    total_time = timer() - start_timer
    print('Finished in {:.2f} seconds'.format(total_time))
    print('output are write to {}'.format(os.path.abspath(args.output)))
Example #14
0
        sh_x_desired_train.set_value(train_x)
        sh_x_test.set_value(test_x)
        sh_x_desired_test.set_value(test_x)
        train_cost = train_epoch(lr)
        test_cost = test_epoch()

        t = time.time() - start

        line = "*Epoch: %i\tTime: %0.2f\tLR: %0.5f\tLL Train: %0.3f\tLL test: %0.3f\t" % (
            epoch, t, lr, train_cost, test_cost)
        print(line)

    print("Write model data")
    write_model([l_dec_x_mu], model_filename)
else:
    read_model([l_dec_x_mu], model_filename)


def show_mnist(img, i, title=""):  # expects flattened image of shape (3072,)
    img = img.copy().reshape(28, 28)
    img = np.clip(img, 0, 1)
    plt.subplot(2, 3, i)
    plt.imshow(img, cmap='Greys_r')
    plt.title(title)
    plt.axis("off")


def mnist_input(img):
    return np.tile(img, (batch_size, 1, 1, 1)).reshape(batch_size, 784)


def test_epoch():
    costs = []
    predictions = []

    for i in range(n_test_batches):
        cost_batch, output_batch = test_model(i)
        prediction_batch = list(np.argmax(output_batch, axis=1))
        #cost_batch = test_model(i)
        costs += [cost_batch]
        predictions += prediction_batch

    return (np.mean(costs), predictions)


read_model(l_dec_x, model_filename_read)

read_model(l_cls_output, classifier_filename_read)

sh_y_test.set_value(adv_test_y_one_hot_orig)
start = time.time()
test_cost, predictions_test = test_epoch()
test_acc = np.sum(predictions_test == adv_test_y_orig) / adv_test_x.shape[0]

t = time.time() - start
line = "Time: %0.2f\tLL test: %0.3f\tLL test accuracy: %0.3f\t" % (
    t, test_cost, test_acc)
#line =  "*Epoch: %i\tTime: %0.2f\tLL Train: %0.3f\tLL test: %0.3f\t" % ( epoch, t, train_cost, test_cost)
print(line)
 def read_model(self, path, ext=""):
     self.cameras, self.images, self.points3D = read_model(path, ext)
Example #17
0
    for i in range(n_train_batches):
        cost_batch = train_model(i, lr)
        costs += [cost_batch]
    return np.mean(costs)
'''


def test_epoch():
    costs = []
    for i in range(n_test_batches):
        cost_batch = test_model(i)
        costs += [cost_batch]
    return np.mean(costs)


read_model(l_dec_x, model_filename)


def show_mnist(img, i, title=""):  # expects flattened image of shape (3072,)
    img = img.copy().reshape(28, 28)
    img = np.clip(img, 0, 1)
    plt.subplot(3, 2, i)
    plt.imshow(img, cmap='Greys_r')
    plt.title(title)
    plt.axis("off")


def mnist_input(img):
    return np.tile(img, (batch_size, 1, 1, 1)).reshape(batch_size, 784)

Example #18
0
def main(args):
    cameras, images, points3d = read_model(args.input, '.bin')
    observation_info = []  #format: [camera_id, point3d_id, x, y]
    camera_info = []  # format: [c1, ... c9]
    point_info = []  # format: [x,y,z]
    point3d_lookup = {}
    focal_lookup = {}
    # build point data
    point_len = 0
    for point_id in points3d:
        pid, xyz, rgb, err, img_ids, kpt_ids = points3d[point_id]
        point3d_lookup[pid] = point_len
        point_info.append(xyz)
        point_len = point_len + 1
    #build camera lookup
    for cam_id in cameras:
        cid, model, w, h, params = cameras[cam_id]
        focal_lookup[cid] = params[0]
    # build observe data
    for image_id in images:
        img_id, qvec, tvec, camera_id, name, xys, point3d_ids = images[
            image_id]
        for i in range(len(xys)):
            if point3d_ids[i] != -1:
                x, y = xys[i]
                observation_info.append(
                    [camera_id - 1, point3d_lookup[point3d_ids[i]], x, y])
        rotation = Rotation.from_quat([qvec[1], qvec[2], qvec[3], qvec[0]])
        rotvec = rotation.as_rotvec()
        camera_feature = [
            rotvec[0],
            rotvec[1],
            rotvec[2],
            tvec[0],
            tvec[1],
            tvec[2],
            focal_lookup[camera_id],
            0,  # assume no distrotion
            0,  # assume no distrotion
        ]
        camera_info.append(camera_feature)
    total_camera = len(cameras)
    total_point = len(points3d)
    total_observe = len(observation_info)
    # sort by point3d and camera
    observation_info = sorted(observation_info, key=lambda x: (x[1], x[0]))
    #write output
    with open(args.output, 'w') as f:
        f.write("{:d} {:d} {:d}\n".format(total_camera, total_point,
                                          total_observe))
        for o in observation_info:
            f.write('{} {} {} {}\n'.format(o[0], o[1], o[2], o[3]))
        for cam in camera_info:
            for p in cam:
                f.write('{}\n'.format(p))
        # try random-point
        fake_point = np.random.normal(size=(len(point_info), 3))
        point_info = fake_point
        for point in point_info:
            for p in point:
                f.write('{}\n'.format(p))