Пример #1
0
def main(args):
    cameras, images, points3D = read_model(args.input, '.bin')
    if not os.path.exists(args.image_output):
        os.makedirs(args.image_output)
    if not os.path.exists(args.model_output):
        os.makedirs(args.model_output)
    new_cameras, new_images = crop_image(cameras, images, args)
    write_model(new_cameras, new_images, points3D, args.model_output, '.bin')
Пример #2
0
def main():
    import sys
    if len(sys.argv) != 3:
        print("Usage: python read_model.py "
              "path/to/model/folder/txt path/to/model/folder/bin")
        return

    print("Comparing text and binary models ...")

    path_to_model_txt_folder = sys.argv[1]
    path_to_model_bin_folder = sys.argv[2]
    cameras_txt, images_txt, points3D_txt = \
        read_model(path_to_model_txt_folder, ext=".txt")
    cameras_bin, images_bin, points3D_bin = \
        read_model(path_to_model_bin_folder, ext=".bin")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... text and binary models are equal.")
    print("Saving text model and reloading it ...")

    tmpdir = mkdtemp()
    write_model(cameras_bin, images_bin, points3D_bin, tmpdir, ext='.txt')
    cameras_txt, images_txt, points3D_txt = \
        read_model(tmpdir, ext=".txt")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... saved text and loaded models are equal.")
    print("Saving binary model and reloading it ...")

    write_model(cameras_bin, images_bin, points3D_bin, tmpdir, ext='.bin')
    cameras_bin, images_bin, points3D_bin = \
        read_model(tmpdir, ext=".bin")
    compare_cameras(cameras_txt, cameras_bin)
    compare_images(images_txt, images_bin)
    compare_points(points3D_txt, points3D_bin)

    print("... saved binary and loaded models are equal.")
def take_steamvr_images(save_dir, num_images, delay_between_images):
    plt.show()

    openvr.init(openvr.VRApplication_Scene)

    convert_coordinate_system = np.identity(4)
    convert_coordinate_system[:3, :3] = Rotation.from_euler(
        'XYZ', (180, 0, 0), degrees=True).as_matrix()

    device = openvr.k_unTrackedDeviceIndex_Hmd

    num_cameras = openvr.VRSystem().getInt32TrackedDeviceProperty(
        device, openvr.Prop_NumCameras_Int32)

    camera_to_head_mat = (openvr.HmdMatrix34_t * num_cameras)()

    openvr.VRSystem().getArrayTrackedDeviceProperty(
        device, openvr.Prop_CameraToHeadTransforms_Matrix34_Array,
        openvr.k_unHmdMatrix34PropertyTag, camera_to_head_mat,
        48 * num_cameras)

    cam = openvr.VRTrackedCamera()

    cam_handle = cam.acquireVideoStreamingService(device)

    width, height, buffer_size = cam.getCameraFrameSize(
        device, openvr.VRTrackedCameraFrameType_MaximumUndistorted)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.set_xlabel('x axis - metres')
    ax.set_ylabel('z axis - metres')
    ax.set_zlabel('y axis - metres')
    ax.set_xlim(-0.5, 0.5)
    ax.set_ylim(-0.5, 0.5)
    ax.set_zlim(0, 1)

    save_dir = ColmapFolder(save_dir)

    db = COLMAPDatabase.connect(save_dir.database_path)
    db.create_tables()

    init_params = np.array(
        (420.000000, (width / num_cameras) / 2, height / 2, 0.000000))

    camera_model = CAMERA_MODEL_NAMES['SIMPLE_RADIAL']

    cameras = {}
    camera_to_head_transforms = {}

    for i in range(num_cameras):
        cam_id = db.add_camera(camera_model.model_id, width / 2, height,
                               init_params)
        camera_to_head_transforms[cam_id] = hmd_matrix_to_numpy(
            camera_to_head_mat[i])
        cameras[cam_id] = Camera(id=cam_id,
                                 model=camera_model.model_name,
                                 width=width / num_cameras,
                                 height=height,
                                 params=init_params)

    poses = []  # Let waitGetPoses populate the poses structure the first time
    cam_positions = []

    images = []

    for i in range(num_images):

        poses, game_poses = openvr.VRCompositor().waitGetPoses(poses, None)
        hmd_pose = poses[openvr.k_unTrackedDeviceIndex_Hmd]

        if not hmd_pose.bPoseIsValid:
            print("Pose not valid")
            continue

        world_to_head = hmd_matrix_to_numpy(hmd_pose.mDeviceToAbsoluteTracking)

        world_to_cams = {
            id_: world_to_head @ head_to_cam @ convert_coordinate_system
            for (id_, head_to_cam) in camera_to_head_transforms.items()
        }

        image_buffer = (ctypes.c_ubyte * buffer_size)()
        try:
            cam.getVideoStreamFrameBuffer(
                cam_handle, openvr.VRTrackedCameraFrameType_MaximumUndistorted,
                image_buffer, buffer_size)
        except:
            print("Error getting video stream buffer")
            continue

        image_array = np.array(image_buffer)

        image_array = image_array.reshape((height, width, 4))

        image_array = image_array[:, :, 0:3]

        image_array = np.clip(image_array, 0, 255)

        for j, (cam_id, world_to_cam) in enumerate(world_to_cams.items()):
            image = Image.fromarray(
                image_array[:,
                            int(width / num_cameras) *
                            j:int(width / num_cameras) * (j + 1), :])

            name = f"{i:03d}_cam{j}.jpg"

            image.save(save_dir.images_path / name)

            image_obj = read_write_model.Image(
                camera_id=cam_id,
                name=name,
                transformation_matrix=world_to_cam)

            images.append(image_obj)

            draw_axes(ax, transform_mat=world_to_cam)

        fig.show()

        fig.canvas.draw()
        fig.canvas.flush_events()
        time.sleep(delay_between_images)
        print(f"Picture taken :{i}")

    image_dict = {}

    print("All pictures taken")

    with open(save_dir.geo_reg_path, 'w') as geo_reg_file:

        for image in images:
            image_id = db.add_image(image=image)
            image.id = image_id
            image_dict[image_id] = image
            geo_reg_file.write(
                f"{name} {' '.join(map(str, image.transformation_matrix[0:3, 3]))}\n"
            )

    read_write_model.write_model(cameras, image_dict, {}, save_dir.sparse_path,
                                 '.txt')

    db.commit()
    db.close()

    print("Metadata saved")

    openvr.shutdown()
    plt.show()
        sh_x_train.set_value(train_x[s1])
        sh_y_train.set_value(train_y_one_hot[s1])
        #sh_y_target_train.set_value(train_y[s])
        train_cost, predictions_train = train_epoch(lr)
        train_acc = np.sum(predictions_train==train_y[s1])/train_x.shape[0]
        
        #print("sh_y_target_train shape : ", np.shape(train_y[s1]))
        
        s2 = np.arange(test_x.shape[0])
        np.random.shuffle(s2)
        sh_x_test.set_value(test_x[s2])
        sh_y_test.set_value(test_y_one_hot[s2])
        
        test_cost, predictions_test = test_epoch()
        test_acc = np.sum(predictions_test==test_y[s2])/test_x.shape[0]
        
        t = time.time() - start
        
        line =  "*Epoch: %i\tTime: %0.2f\tLL Train: %0.3f\tLL test: %0.3f\tLL Train accuracy: %0.3f\tLL test accuracy: %0.3f\t" % ( epoch, t, train_cost, test_cost, train_acc, test_acc)
        #line =  "*Epoch: %i\tTime: %0.2f\tLL Train: %0.3f\tLL test: %0.3f\t" % ( epoch, t, train_cost, test_cost)

        print (line)
    
    print ("Write model data")
    write_model(l_cls_output, model_filename_write)
else:
    read_model(l_cls_output, model_filename_read)



def main(args):
    start_timer = timer()
    extrinsics = {}
    model_extension = ''
    if detect_model(args.input, '.bin'):
        output_extension = '.bin'
    elif detect_model(args.input, '.txt'):
        output_extension = '.txt'
    else:
        raise RuntimeError(
            'Cannot find colmap sparse model. please check input path')
    cameras, images, points3D = read_model(args.input, output_extension)
    num_arc = 0
    num_ring = 0
    # read colmap images and get rotation and translation
    for image_id in images:
        arc, ring = parse_filename(args.pattern, images[image_id][4])
        if arc not in extrinsics:
            extrinsics[arc] = {}
        qvec = images[image_id][1]
        rotation = Rotation.from_quat([qvec[1], qvec[2], qvec[3], qvec[0]])
        extrinsics[arc][ring] = {
            'rotation': rotation.as_matrix(),
            'translation': images[image_id][2].copy()
        }
        #find number of arc and number of ring
        if arc + 1 > num_arc:
            num_arc = arc + 1
        if ring + 1 > num_ring:
            num_ring = ring + 1

    # find camera position in most bottom ring
    base_ring = np.zeros((num_ring, 3))
    for i in range(num_ring):
        base_ring[i] = camera_position(extrinsics[0][i])
    mean_shift = np.mean(base_ring, axis=0)

    # update extrinsic (translation only)
    for i in range(num_arc):
        for j in range(num_ring):
            extrinsics[i][j]['translation'] -= np.matmul(
                extrinsics[i][j]['rotation'], mean_shift)

    #we use only  most bottom camera to optimize
    rotation_matrix = np.zeros((num_ring, 3, 3))
    translation_vector = np.zeros((num_ring, 3))
    for i in range(num_ring):
        rotation_matrix[i, :, :] = extrinsics[0][i]['rotation']
        translation_vector[i, :] = extrinsics[0][i]['translation']

    # optimize to find rotaiton corrector
    rotation_corrector = find_rotation_corrector(rotation_matrix,
                                                 translation_vector)

    # update extrinsic (rotation only)
    for i in range(num_arc):
        for j in range(num_ring):
            extrinsics[i][j]['rotation'] = np.matmul(
                extrinsics[i][j]['rotation'], rotation_corrector)

    # update colmap's images
    images_old = images
    images = {}
    for image_id in images_old:
        arc, ring = parse_filename(args.pattern, images_old[image_id][4])
        rotation = Rotation.from_matrix(extrinsics[arc][ring]['rotation'])
        q = rotation.as_quat()
        qvec = np.array([q[3], q[0], q[1], q[2]])
        images[image_id] = Image(id=image_id,
                                 qvec=qvec,
                                 tvec=extrinsics[arc][ring]['translation'],
                                 camera_id=images_old[image_id][3],
                                 name=images_old[image_id][4],
                                 xys=images_old[image_id][5],
                                 point3D_ids=images_old[image_id][6])

    #update colmap's  point3d
    points3D_old = points3D
    points3D = {}
    for point_id in points3D_old:
        points3D[point_id] = Point3D(
            id=points3D_old[point_id][0],
            xyz=np.matmul(rotation_corrector.T,
                          points3D_old[point_id][1] + mean_shift),
            rgb=points3D_old[point_id][2],
            error=points3D_old[point_id][3],
            image_ids=points3D_old[point_id][4],
            point2D_idxs=points3D_old[point_id][5])

    #write to binary output
    if not os.path.exists(args.output):
        os.mkdir(args.output)
    write_model(cameras, images, points3D, args.output, output_extension)
    total_time = timer() - start_timer
    print('Finished in {:.2f} seconds'.format(total_time))
    print('output are write to {}'.format(os.path.abspath(args.output)))
Пример #6
0
        #shuffle train data, train model and test model
        np.random.shuffle(train_x)
        sh_x_train.set_value(train_x)

        train_cost = train_epoch(lr)
        test_cost = test_epoch()

        t = time.time() - start

        line = "*Epoch: %i\tTime: %0.2f\tLR: %0.5f\tLL Train: %0.3f\tLL test: %0.3f\t" % (
            epoch, t, lr, train_cost, test_cost)
        print(line)

    print("Write model data")
    write_model(l_dec_x, model_filename)
else:
    read_model(l_dec_x, model_filename)

# In[12]:

#try:
#   import cPickle as pkl
#except:
#import pickle as pkl
#with open("./params/mnist_model.params", 'rb') as f1:
#   data1 = pkl.load(f1, encoding = 'latin1')

# In[13]:

Пример #7
0
def main(args):
    cameras, images = readCameraDeepview(args.input)
    print(len(cameras))
    exit()
    write_model(cameras, images, {}, 'output/', '.bin')
Пример #8
0
        sh_x_train.set_value(train_x)
        sh_x_desired_train.set_value(train_x)
        sh_x_test.set_value(test_x)
        sh_x_desired_test.set_value(test_x)
        train_cost = train_epoch(lr)
        test_cost = test_epoch()

        t = time.time() - start

        line = "*Epoch: %i\tTime: %0.2f\tLR: %0.5f\tLL Train: %0.3f\tLL test: %0.3f\t" % (
            epoch, t, lr, train_cost, test_cost)
        print(line)

    print("Write model data")
    write_model([l_dec_x_mu], model_filename)
else:
    read_model([l_dec_x_mu], model_filename)


def show_mnist(img, i, title=""):  # expects flattened image of shape (3072,)
    img = img.copy().reshape(28, 28)
    img = np.clip(img, 0, 1)
    plt.subplot(2, 3, i)
    plt.imshow(img, cmap='Greys_r')
    plt.title(title)
    plt.axis("off")


def mnist_input(img):
    return np.tile(img, (batch_size, 1, 1, 1)).reshape(batch_size, 784)