コード例 #1
0
ファイル: compute_alignment.py プロジェクト: niopeng/291f
def compute_alignment():
    config = tf.ConfigProto(device_count={'GPU': 0})

    cfg = app_config

    exp_dir = cfg.checkpoint_dir

    tf.enable_eager_execution(config=config)

    cfg.num_dataset_samples = 50
    dataset = Dataset3D(cfg)

    num_to_estimate = 15
    num_models = dataset.num_samples()

    all_rotations_file = f"{exp_dir}/reference_rotations.mat"
    compute_alignment_candidates(cfg, dataset, all_rotations_file)

    stuff = scipy.io.loadmat(all_rotations_file)
    rotations = stuff["rotations"]
    rmse = stuff["rmse"]

    # For each model filter out outlier views
    num_filtered = 2
    rotations_filtered = np.zeros((num_models, num_filtered, 4))
    rmse_filtered = np.zeros((num_models, num_filtered))

    for model_idx in range(num_models):
        rmse_m = rmse[model_idx, :]
        indices = np.argsort(rmse_m)
        indices = indices[0:num_filtered]
        rmse_filtered[model_idx, :] = rmse_m[indices]
        rotations_filtered[model_idx, :, :] = rotations[model_idx, indices, :]

    # Sort models by RMSE and choose num_to_estimate best ones
    model_mean_rmse = np.mean(rmse_filtered, axis=1)
    models_indices = np.argsort(model_mean_rmse)
    models_indices = models_indices[0:num_to_estimate]
    reference_rotations = rotations_filtered[models_indices, :, :]

    reference_rotations = np.reshape(reference_rotations, [-1, 4])
    print(reference_rotations)

    # Somehow NaNs slip in the computation, so filter them out
    nan = np.isnan(reference_rotations)
    good = np.logical_not(np.any(nan, axis=1))
    reference_rotations = reference_rotations[good, :]

    # Average quaternion rotations, may be better than arithmetic average
    reference_rotation = quatWAvgMarkley(reference_rotations)
    print("Global rotation:", reference_rotation)

    scipy.io.savemat(f"{exp_dir}/final_reference_rotation.mat",
                     mdict={"rotation": reference_rotation})
コード例 #2
0
def main(_):
    cfg = app_config

    exp_dir = cfg.checkpoint_dir
    out_dir = os.path.join(exp_dir, 'render')
    mkdir_if_missing(out_dir)
    inp_dir = os.path.join(exp_dir, cfg.save_predictions_dir)

    if cfg.models_list:
        models = parse_lines(cfg.models_list)
    else:
        dataset = Dataset3D(cfg)
        models = [sample.name for sample in dataset.data]

    for model_name in models:
        in_file = "{}/{}_pc.mat".format(inp_dir, model_name)
        if not os.path.isfile(in_file):
            in_file = "{}/{}_pc.npz".format(inp_dir, model_name)
            assert os.path.isfile(
                in_file), "no input file with saved point cloud"

        out_file = "{}/{}.png".format(out_dir, model_name)

        if os.path.isfile(out_file):
            print("{} already rendered".format(model_name))
            continue

        args = build_command_line_args(
            [["in_file", in_file], ["out_file", out_file],
             ["vis_azimuth", cfg.vis_azimuth],
             ["vis_elevation", cfg.vis_elevation], ["vis_dist", cfg.vis_dist],
             ["cycles_samples", cfg.render_cycles_samples], ["voxels", False],
             ["colored_subsets", cfg.render_colored_subsets],
             ["image_size", cfg.render_image_size]])
        render_cmd = "{} --background  -P {} -- {}".format(
            blender_exec, python_script, args)

        os.system(render_cmd)
コード例 #3
0
def compute_predictions():
    cfg = app_config

    setup_environment(cfg)

    exp_dir = cfg.checkpoint_dir

    cfg.batch_size = 1
    cfg.step_size = 1

    pc_num_points = cfg.pc_num_points
    vox_size = cfg.vox_size
    save_pred = cfg.save_predictions
    save_voxels = cfg.save_voxels
    fast_conversion = True

    pose_student = cfg.pose_predictor_student and cfg.predict_pose

    g = tf.Graph()
    with g.as_default():
        model = model_pc.ModelPointCloud(cfg)

        out = build_model(model)
        input_image = out["inputs"]
        cam_matrix = out["camera_extr_src"]
        cam_quaternion = out["cam_quaternion"]
        point_cloud = out["points_1"]
        rgb = out["rgb_1"] if cfg.pc_rgb else tf.no_op()
        projs = out["projs"]
        projs_rgb = out["projs_rgb"]
        projs_depth = out["projs_depth"]
        cam_transform = out["cam_transform"]
        z_latent = out["z_latent"]

        if pose_student:
            proj_student, camera_pose_student = model_student(
                input_image, model)

        input_pc = tf.placeholder(tf.float32, [cfg.batch_size, None, 3])
        if save_voxels:
            if fast_conversion:
                voxels, _ = pointcloud2voxels3d_fast(cfg, input_pc, None)
                voxels = tf.expand_dims(voxels, axis=-1)
                voxels = smoothen_voxels3d(cfg, voxels, model.gauss_kernel())
            else:
                voxels = pointcloud2voxels(cfg, input_pc, model.gauss_sigma())

        q_inp = tf.placeholder(tf.float32, [1, 4])
        q_matrix = as_rotation_matrix(q_inp)

        input_pc, pred_quat, gt_quat, pc_unrot = model_unrotate_points(cfg)
        pc_rot = quaternion_rotate(input_pc, pred_quat)

        config = tf.ConfigProto(device_count={'GPU': 1})
        config.gpu_options.per_process_gpu_memory_fraction = cfg.per_process_gpu_memory_fraction

        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        variables_to_restore = slim.get_variables_to_restore(exclude=["meta"])

    restorer = tf.train.Saver(variables_to_restore)
    checkpoint_file = tf.train.latest_checkpoint(exp_dir)
    print("restoring checkpoint", checkpoint_file)
    restorer.restore(sess, checkpoint_file)

    save_dir = os.path.join(exp_dir,
                            '{}_vis_proj'.format(cfg.save_predictions_dir))
    mkdir_if_missing(save_dir)
    save_pred_dir = os.path.join(exp_dir, cfg.save_predictions_dir)
    mkdir_if_missing(save_pred_dir)

    vis_size = cfg.vis_size

    dataset = Dataset3D(cfg)

    pose_num_candidates = cfg.pose_predict_num_candidates
    num_views = cfg.num_views
    plot_h = 4
    plot_w = 6
    num_views = int(min(num_views, plot_h * plot_w / 2))

    if cfg.models_list:
        model_names = parse_lines(cfg.models_list)
    else:
        model_names = [sample.name for sample in dataset.data]

    num_models = len(model_names)
    for k in range(num_models):
        model_name = model_names[k]
        sample = dataset.sample_by_name(model_name)

        images = sample.image
        masks = sample.mask
        if cfg.saved_camera:
            cameras = sample.camera
            cam_pos = sample.cam_pos
        if cfg.vis_depth_projs:
            depths = sample.depth
        if cfg.variable_num_views:
            num_views = sample.num_views

        print("{}/{} {}".format(k, num_models, model_name))

        if pose_num_candidates == 1:
            grid = np.empty((plot_h, plot_w), dtype=object)
        else:
            plot_w = pose_num_candidates + 1
            if pose_student:
                plot_w += 1
            grid = np.empty((num_views, plot_w), dtype=object)

        if save_pred:
            all_pcs = np.zeros((num_views, pc_num_points, 3))
            all_cameras = np.zeros((num_views, 4))
            all_voxels = np.zeros((num_views, vox_size, vox_size, vox_size))
            all_z_latent = np.zeros((num_views, cfg.fc_dim))

        for view_idx in range(num_views):
            input_image_np = images[[view_idx], :, :, :]
            gt_mask_np = masks[[view_idx], :, :, :]
            if cfg.saved_camera:
                extr_mtr = cameras[view_idx, :, :]
                cam_quaternion_np = quaternion_from_campos(
                    cam_pos[view_idx, :])
                cam_quaternion_np = np.expand_dims(cam_quaternion_np, axis=0)
            else:
                extr_mtr = np.zeros((4, 4))

            if cfg.pc_rgb:
                proj_tensor = projs_rgb
            elif cfg.vis_depth_projs:
                proj_tensor = projs_depth
            else:
                proj_tensor = projs
            (pc_np, rgb_np, proj_np, cam_transf_np, z_latent_np) = sess.run(
                [point_cloud, rgb, proj_tensor, cam_transform, z_latent],
                feed_dict={
                    input_image: input_image_np,
                    cam_matrix: extr_mtr,
                    cam_quaternion: cam_quaternion_np
                })

            if pose_student:
                (proj_student_np, camera_student_np) = sess.run(
                    [proj_student, camera_pose_student],
                    feed_dict={input_image: input_image_np})
                predicted_camera = camera_student_np
            else:
                predicted_camera = cam_transf_np

            if cfg.vis_depth_projs:
                proj_np = normalise_depthmap(proj_np)
                if depths is not None:
                    depth_np = depths[view_idx, :, :, :]
                    depth_np = normalise_depthmap(depth_np)
                else:
                    depth_np = 1.0 - np.squeeze(gt_mask_np)
                if pose_student:
                    proj_student_np = normalise_depthmap(proj_student_np)

            if cfg.predict_pose:
                if cfg.save_rotated_points:
                    ref_rot = scipy.io.loadmat(
                        "{}/final_reference_rotation.mat".format(exp_dir))
                    ref_rot = ref_rot["rotation"]
                    pc_np_unrot = sess.run(pc_rot,
                                           feed_dict={
                                               input_pc: pc_np,
                                               pred_quat: ref_rot
                                           })
                    pc_np = pc_np_unrot

            if cfg.pc_rgb:
                gt_image = input_image_np
            elif cfg.vis_depth_projs:
                gt_image = depth_np
            else:
                gt_image = gt_mask_np

            if pose_num_candidates == 1:
                view_j = view_idx * 2 // plot_w
                view_i = view_idx * 2 % plot_w

                gt_image = np.squeeze(gt_image)
                grid[view_j, view_i] = mask4vis(cfg, gt_image, vis_size)

                curr_img = np.squeeze(proj_np)
                grid[view_j, view_i + 1] = mask4vis(cfg, curr_img, vis_size)

                if cfg.save_individual_images:
                    curr_dir = os.path.join(save_dir, sample.name)
                    if not os.path.exists(curr_dir):
                        os.makedirs(curr_dir)
                    imageio.imwrite(
                        os.path.join(curr_dir,
                                     '{}_{}.png'.format(view_idx, 'rgb_gt')),
                        mask4vis(cfg, np.squeeze(input_image_np), vis_size))
                    imageio.imwrite(
                        os.path.join(curr_dir,
                                     '{}_{}.png'.format(view_idx,
                                                        'mask_pred')),
                        mask4vis(cfg, np.squeeze(proj_np), vis_size))
            else:
                view_j = view_idx

                gt_image = np.squeeze(gt_image)
                grid[view_j, 0] = mask4vis(cfg, gt_image, vis_size)

                for kk in range(pose_num_candidates):
                    curr_img = np.squeeze(proj_np[kk, :, :, :])
                    grid[view_j, kk + 1] = mask4vis(cfg, curr_img, vis_size)

                    if cfg.save_individual_images:
                        curr_dir = os.path.join(save_dir, sample.name)
                        if not os.path.exists(curr_dir):
                            os.makedirs(curr_dir)
                        imageio.imwrite(
                            os.path.join(
                                curr_dir,
                                '{}_{}_{}.png'.format(view_idx, kk,
                                                      'mask_pred')),
                            mask4vis(cfg, np.squeeze(curr_img), vis_size))

                if cfg.save_individual_images:
                    imageio.imwrite(
                        os.path.join(curr_dir,
                                     '{}_{}.png'.format(view_idx, 'mask_gt')),
                        mask4vis(cfg, np.squeeze(gt_mask_np), vis_size))

                if pose_student:
                    grid[view_j,
                         -1] = mask4vis(cfg, np.squeeze(proj_student_np),
                                        vis_size)

            if save_pred:
                all_pcs[view_idx, :, :] = np.squeeze(pc_np)
                all_z_latent[view_idx] = z_latent_np
                if cfg.predict_pose:
                    all_cameras[view_idx, :] = predicted_camera
                if save_voxels:
                    # multiplying by two is necessary because
                    # pc->voxel conversion expects points in [-1, 1] range
                    pc_np_range = pc_np
                    if not fast_conversion:
                        pc_np_range *= 2.0
                    voxels_np = sess.run(voxels,
                                         feed_dict={input_pc: pc_np_range})
                    all_voxels[view_idx, :, :, :] = np.squeeze(voxels_np)

            vis_view = view_idx == 0 or cfg.vis_all_views
            if cfg.vis_voxels and vis_view:
                rgb_np = np.squeeze(rgb_np) if cfg.pc_rgb else None
                vis_pc(np.squeeze(pc_np), rgb=rgb_np)

        grid_merged = merge_grid(cfg, grid)
        imageio.imwrite("{}/{}_proj.png".format(save_dir, sample.name),
                        grid_merged)

        if save_pred:
            if cfg.save_as_mat:
                save_dict = {"points": all_pcs, "z_latent": all_z_latent}
                if cfg.predict_pose:
                    save_dict["camera_pose"] = all_cameras
                scipy.io.savemat("{}/{}_pc".format(save_pred_dir, sample.name),
                                 mdict=save_dict)
            else:
                np.savez("{}/{}_pc".format(save_pred_dir, sample.name),
                         all_pcs)

            if save_voxels:
                np.savez("{}/{}_vox".format(save_pred_dir, sample.name),
                         all_voxels)

    sess.close()
コード例 #4
0
def run_eval():
    config = tf.ConfigProto(
        device_count={'GPU': 1}
    )

    cfg = app_config
    exp_dir = cfg.checkpoint_dir
    num_views = cfg.num_views

    g = tf.Graph()
    with g.as_default():
        quat_inp = tf.placeholder(dtype=tf.float64, shape=[1, 4])
        quat_inp_2 = tf.placeholder(dtype=tf.float64, shape=[1, 4])

        quat_conj = quaternion_conjugate(quat_inp)
        quat_mul = quaternion_multiply(quat_inp, quat_inp_2)

        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

    save_pred_name = "{}_{}".format(cfg.save_predictions_dir, cfg.eval_split)
    save_dir = os.path.join(exp_dir, cfg.save_predictions_dir)

    reference_rotation = scipy.io.loadmat("{}/final_reference_rotation.mat".format(exp_dir))["rotation"]
    ref_conj_np = sess.run(quat_conj, feed_dict={quat_inp: reference_rotation})

    dataset = Dataset3D(cfg)

    num_models = dataset.num_samples()

    model_names = []

    angle_error = np.zeros((num_models, num_views), dtype=np.float64)

    for model_idx in range(num_models):
        sample = dataset.data[model_idx]

        print("{}/{}".format(model_idx, num_models))
        print(sample.name)
        model_names.append(sample.name)

        # mat_filename = "{}/{}_pc.mat".format(save_dir, sample.name)
        mat_filename = "{}/{}_pc".format(save_dir, sample.name)
        data = scipy.io.loadmat(mat_filename)
        all_cameras = data["camera_pose"]

        for view_idx in range(num_views):
            cam_pos = sample.cam_pos[view_idx, :]
            gt_quat_np = quaternion_from_campos(cam_pos)
            gt_quat_np = np.expand_dims(gt_quat_np, 0)
            pred_quat_np = all_cameras[view_idx, :]
            pred_quat_np /= np.linalg.norm(pred_quat_np)
            pred_quat_np = np.expand_dims(pred_quat_np, 0)

            pred_quat_aligned_np = sess.run(quat_mul, feed_dict={
                quat_inp: pred_quat_np,
                quat_inp_2: ref_conj_np
            })

            q1 = gt_quat_np
            q2 = pred_quat_aligned_np

            q1_conj = sess.run(quat_conj, feed_dict={quat_inp: q1})
            q_diff = sess.run(quat_mul, feed_dict={quat_inp: q1_conj, quat_inp_2: q2})

            ang_diff = 2 * np.arccos(q_diff[0, 0])
            if ang_diff > np.pi:
                ang_diff -= 2*np.pi

            angle_error[model_idx, view_idx] = np.fabs(ang_diff)

    all_errors = np.reshape(angle_error, (-1))
    angle_thresh_rad = cfg.pose_accuracy_threshold / 180.0 * np.pi
    correct = all_errors < angle_thresh_rad
    num_predictions = correct.shape[0]
    accuracy = np.count_nonzero(correct) / num_predictions
    median_error = np.sort(all_errors)[num_predictions // 2]
    median_error = median_error / np.pi * 180
    print("accuracy:", accuracy, "median angular error:", median_error)

    scipy.io.savemat(os.path.join(exp_dir, "pose_error_{}.mat".format(save_pred_name)),
                     {"angle_error": angle_error,
                      "accuracy": accuracy,
                      "median_error": median_error})

    f = open(os.path.join(exp_dir, "pose_error_{}.txt".format(save_pred_name)), "w")
    f.write("{} {}\n".format(accuracy, median_error))
    f.close()
コード例 #5
0
def run_eval():
    config = tf.ConfigProto(device_count={'GPU': 1})

    cfg = app_config
    exp_dir = cfg.checkpoint_dir
    num_views = cfg.num_views
    eval_unsup = cfg.eval_unsupervised_shape

    gt_dir = os.path.join(cfg.gt_pc_dir, cfg.synth_set)

    g = tf.Graph()
    with g.as_default():
        source_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3])
        target_pc = tf.placeholder(dtype=tf.float64, shape=[None, 3])
        quat_tf = tf.placeholder(dtype=tf.float64, shape=[1, 4])

        _, min_dist, min_idx = point_cloud_distance(source_pc, target_pc)

        source_pc_2 = tf.placeholder(dtype=tf.float64, shape=[1, None, 3])
        rotated_pc = quaternion_rotate(source_pc_2, quat_tf)

        sess = tf.Session(config=config)
        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

    save_pred_name = "{}_{}".format(cfg.save_predictions_dir, cfg.eval_split)
    save_dir = os.path.join(exp_dir, cfg.save_predictions_dir)

    if eval_unsup:
        reference_rotation = scipy.io.loadmat(
            "{}/final_reference_rotation.mat".format(exp_dir))["rotation"]

    dataset = Dataset3D(cfg)

    num_models = dataset.num_samples()

    model_names = []
    chamfer_dists = np.zeros((0, num_views, 2), dtype=np.float64)
    for k in range(num_models):
        sample = dataset.data[k]

        print("{}/{}".format(k, num_models))
        print(sample.name)

        gt_filename = "{}/{}.mat".format(gt_dir, sample.name)
        if not os.path.isfile(gt_filename):
            continue

        model_names.append(sample.name)
        mat_filename = "{}/{}_pc.mat".format(save_dir, sample.name)
        if os.path.isfile(mat_filename):
            data = scipy.io.loadmat(mat_filename)
            all_pcs = np.squeeze(data["points"])
            if "num_points" in data:
                all_pcs_nums = np.squeeze(data["num_points"])
                has_number = True
            else:
                has_number = False
        else:
            data = np.load("{}/{}_pc.npz".format(save_dir, sample.name))
            all_pcs = np.squeeze(data["arr_0"])
            if 'arr_1' in list(data.keys()):
                all_pcs_nums = np.squeeze(data["arr_1"])
                has_number = True
            else:
                has_number = False

        obj = scipy.io.loadmat(gt_filename)
        Vgt = obj["points"]

        chamfer_dists_current = np.zeros((num_views, 2), dtype=np.float64)
        for i in range(num_views):
            pred = all_pcs[i, :, :]
            if has_number:
                pred = pred[0:all_pcs_nums[i], :]

            if eval_unsup:
                pred = np.expand_dims(pred, 0)
                pred = sess.run(rotated_pc,
                                feed_dict={
                                    source_pc_2: pred,
                                    quat_tf: reference_rotation
                                })
                pred = np.squeeze(pred)

            pred_to_gt, idx_np = compute_distance(cfg, sess, min_dist, min_idx,
                                                  source_pc, target_pc, pred,
                                                  Vgt)
            gt_to_pred, _ = compute_distance(cfg, sess, min_dist, min_idx,
                                             source_pc, target_pc, Vgt, pred)
            chamfer_dists_current[i, 0] = np.mean(pred_to_gt)
            chamfer_dists_current[i, 1] = np.mean(gt_to_pred)

            is_nan = np.isnan(pred_to_gt)
            assert (not np.any(is_nan))

        current_mean = np.mean(chamfer_dists_current, 0)
        print("total:", current_mean)
        chamfer_dists = np.concatenate(
            (chamfer_dists, np.expand_dims(chamfer_dists_current, 0)))

    final = np.mean(chamfer_dists, axis=(0, 1)) * 100
    print(final)

    scipy.io.savemat(
        os.path.join(exp_dir, "chamfer_{}.mat".format(save_pred_name)), {
            "chamfer": chamfer_dists,
            "model_names": to_np_object(model_names)
        })

    file = open(os.path.join(exp_dir, "chamfer_{}.txt".format(save_pred_name)),
                "w")
    file.write("{} {}\n".format(final[0], final[1]))
    file.close()