depth_image = np.array(
            imageio.imread(os.path.join(args.dir, depth_image_name))) / 1000.
        depth_image_drake = Image[PixelType.kDepth32F](depth_image.shape[1],
                                                       depth_image.shape[0])
        depth_image_drake.mutable_data[:, :, 0] = depth_image[:, :]
        points = camera.ConvertDepthImageToPointCloud(
            depth_image_drake, camera.depth_camera_info())
        good_points_mask = np.all(np.isfinite(points), axis=0)
        points = points[:, good_points_mask]
        points = np.vstack([points, np.ones([1, points.shape[1]])])
        # Transform them to camera base frame
        points = np.dot(depth_camera_pose, points)
        # and then to world frame
        # Last body = camera floating base
        kinsol = rbt.doKinematics(q0)
        tf = rbt.relativeTransform(kinsol, 0, rbt.get_num_bodies() - 1)
        points = tf.dot(points)[:3, :]

        label_image_name = "%09d_mask.png" % (i)
        labels = np.array(
            imageio.imread(os.path.join(args.dir, label_image_name))).ravel()
        labels = labels[good_points_mask]

        normal_image_name = "%09d_normal.png" % (i)
        # Empirically-derived transposing and memory reording to get into
        # 3xN format...
        normals = np.array(
            imageio.imread(os.path.join(args.dir, normal_image_name)))
        normals = normals.T.reshape(3, width * height,
                                    order='F')[:, good_points_mask]
        # Rescale from int8 to float values
예제 #2
0
        if np.all(np.isfinite(state.CopyToVector())):
            break
        timestep /= 2.
        print "Halving timestep and trying again..."

    if args.save:
        export_dir = "data/scene_%09d" % (args.seed)

        # Bend over goddamn backwards to recover poses
        # since we had to spawn objects at random frames
        # to get simulation to not crash.
        kinsol = rbt.doKinematics(
            state.CopyToVector()[0:rbt.get_num_positions()])
        for i, instance in enumerate(instances):
            tf = rbt.relativeTransform(kinsol, 0, i + 1)
            transformations.euler_from_matrix(tf)
            instance["pose"][0:3] = tf[:3, 3]
            instance["pose"][3:6] = transformations.euler_from_matrix(tf)

        config = {
            "objects": objects,
            "with_ground": True,
            "camera_config": {
                "z_near":
                0.2,
                "z_far":
                3.5,
                "fov_y":
                .7853,
                "width":