def render_colored_batch(m_v, m_f, m_vc, width, height, camera_f, camera_c, bgcolor=np.zeros(3, dtype=np.float32),
                         num_channels=3, camera_t=np.zeros(3, dtype=np.float32),
                         camera_rt=np.zeros(3, dtype=np.float32), name=None):
    with ops.name_scope(name, "render_batch", [m_v]) as name:
        # print(name)
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        # projection_matrix = perspective_projection(camera_f, camera_c, width, height, .1, 10)

        # view_matrix = matrices.compose(
        #     matrices.rodrigues(camera_rt.astype(np.float32)),
        #     matrices.translation(camera_t.astype(np.float32)),
        # )

        bg = tf.tile(bgcolor.astype(np.float32)[np.newaxis, np.newaxis, np.newaxis, :],
                     (tf.shape(m_v)[0], height, width, 1))

        if m_vc.ndim < m_v.ndim:
            m_vc = tf.tile(tf.cast(m_vc, tf.float32)[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1))

        m_v = project_points_perspective(m_v, camera_f, camera_c, camera_t, camera_rt, width, height, near=0.1, far=10)
        # m_v = tf.cast(m_v, tf.float32)
        # m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)
        # m_v = tf.matmul(m_v, tf.tile(view_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))
        # m_v = tf.matmul(m_v, tf.tile(projection_matrix[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1)))

        m_f = tf.tile(tf.cast(m_f, tf.int32)[np.newaxis, ...], (tf.shape(m_v)[0], 1, 1))

        return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
Exemple #2
0
def render_colored_batch(m_v,
                         m_f,
                         m_vc,
                         width,
                         height,
                         camera_f,
                         camera_c,
                         bgcolor=np.zeros(3, dtype=np.float32),
                         num_channels=3,
                         camera_t=np.zeros(3, dtype=np.float32),
                         camera_rt=np.zeros(3, dtype=np.float32),
                         name=None,
                         batch_size=None,
                         cam_pred=None):
    """ Render a batch of meshes with fixed BG. Supported projection types 1) Perspective, 2) Orthographic. """

    with ops.name_scope(name, "render_batch", [m_v]) as name:
        assert (num_channels == m_vc.shape[-1] == bgcolor.shape[0])

        #projection_matrix = perspective_projection(camera_f, camera_c, width, height, .1, 10)
        projection_matrix = orthgraphic_projection(
            width, height, -(width / 2),
            (width / 2))  ### im_w x im_h x im_w cube

        ## Camera Extrinsics, rotate & trans
        view_matrix = matrices.compose(
            matrices.rodrigues(camera_rt.astype(np.float32)),
            matrices.translation(camera_t.astype(np.float32)),
        )
        ## Fixed clr BG
        bg = tf.tile(bgcolor[tf.newaxis, tf.newaxis, tf.newaxis, ...],
                     [tf.shape(m_v)[0], width, height, 1])

        m_v = tf.cast(m_v, tf.float32)
        m_v = tf.concat([m_v, tf.ones_like(m_v[:, :, -1:])], axis=2)

        ## Extrinsic multiplication
        m_v = tf.matmul(
            m_v, tf.tile(view_matrix[np.newaxis, ...],
                         (tf.shape(m_v)[0], 1, 1)))

        ## Intrinsic Camera projection
        m_v = tf.matmul(
            m_v,
            tf.tile(projection_matrix[np.newaxis, ...],
                    (tf.shape(m_v)[0], 1, 1)))

        m_f = tf.tile(
            tf.cast(m_f, tf.int32)[tf.newaxis, ...], (tf.shape(m_v)[0], 1, 1))

        ## Rasterize
        return dirt.rasterise_batch(bg, m_v, m_vc, m_f, name=name)
Exemple #3
0
    def call(self, x):
        #N: num. of vertices, F: num of faces
        vertices = x[0]  #(batchxNx3) #same for each batch
        uv_map = x[1]  #(batchxNx2) #different for each batch
        faces = tf.cast(x[2], tf.int32)  #batchxFx3, same for each batch
        texture = x[3]  #batchxWxHxCH different for each batch
        poses = x[4]  #batch x n_target_poses x 4x4, same for each batch
        bboxes = x[
            5]  #batch x n_target_poses x 4 -> should be normalized by the full res
        #ignore batch dimension of poses
        vertices_mult = tf.tile(vertices, [tf.shape(poses)[1], 1, 1])
        vert_uvs_mult = tf.tile(uv_map, [tf.shape(poses)[1], 1, 1])
        faces_multi = tf.tile(faces, [tf.shape(poses)[1], 1, 1])
        texture_multi = tf.tile(texture, [tf.shape(poses)[1], 1, 1, 1])
        poses_t = tf.transpose(poses, [1, 0, 2, 3])  #posexbatchx4x4
        poses_t = tf.reshape(poses_t, [-1, 4, 4])
        bboxes_t = tf.transpose(bboxes, [1, 0, 2])
        bboxes_t = tf.reshape(bboxes_t, [-1, 4])  #(posexbatch)x4

        # Transform vertices from camera to clip space
        vertices_objects, vertices_cameras,vertices_clips,vertices_normals,view_matrices=\
           tf.map_fn(self.transform_vertices,(vertices_mult,poses_t,faces_multi),dtype=(tf.float32,tf.float32,tf.float32,tf.float32,tf.float32))

        gbuffer_temp = dirt.rasterise_batch(
            background=tf.zeros(
                [tf.shape(vertices_mult)[0], self.img_h, self.img_w, 3]),
            vertices=vertices_clips,
            vertex_colors=tf.concat(
                [
                    tf.ones_like(vertices_objects[:, :, :1]),  #1 mask
                    vert_uvs_mult
                ],
                axis=2),
            faces=faces_multi,
            height=self.img_h,
            width=self.img_w,
            channels=3)
        rendered_feature_raw = tf.map_fn(
            self.sample_texture, (texture_multi, gbuffer_temp[:, :, :, 1:3]),
            dtype=tf.float32)
        #if both uv value is zero ->
        uv_projection = gbuffer_temp[:, :, :, 1:3]
        mask_old = gbuffer_temp[:, :, :, :
                                1]  #regardless of the facts that each pixel was seen by the input images

        if not (self.target_h == self.img_h and self.target_h == self.img_h):
            #for the same pose -> same crop and resize area
            mask_old = tf.image.crop_and_resize(
                mask_old,
                bboxes_t,
                crop_size=(self.target_h, self.target_w),
                box_indices=tf.range(0,
                                     tf.shape(rendered_feature_raw)[0]))
            mask_old = tf.cast(tf.greater(mask_old, 0.5), tf.float32)
            mask_rend = tf.cast(
                tf.greater(
                    tf.reduce_sum(gbuffer_temp[:, :, :, 1:3],
                                  axis=3,
                                  keepdims=True), 0), tf.float32)
            mask_crop = tf.image.crop_and_resize(
                mask_rend,
                bboxes_t,
                crop_size=(self.target_h, self.target_w),
                box_indices=tf.range(0,
                                     tf.shape(rendered_feature_raw)[0]))
            mask_new = tf.cast(tf.greater(mask_crop, 0.5), tf.float32)
            rendered_feature = tf.image.crop_and_resize(
                rendered_feature_raw,
                bboxes_t,
                crop_size=(self.target_h, self.target_w),
                box_indices=tf.range(0,
                                     tf.shape(rendered_feature_raw)[0]))

            uv_projection = tf.image.crop_and_resize(
                uv_projection,
                bboxes_t,
                crop_size=(self.target_h, self.target_w),
                box_indices=tf.range(0,
                                     tf.shape(rendered_feature_raw)[0]))
        else:
            mask_new = tf.cast(
                tf.greater(
                    tf.reduce_sum(gbuffer_temp[:, :, :, 1:3],
                                  axis=3,
                                  keepdims=True), 0), tf.float32)
            rendered_feature = mask_new * rendered_feature_raw  #remove backgrounds

        concated_out = tf.concat(
            [mask_new, rendered_feature, mask_old, uv_projection],
            axis=3)  # P X B x H x W x CH
        final_out = tf.reshape(concated_out, [
            tf.shape(poses)[1], -1, self.target_h, self.target_w,
            self.ch_dim + 4
        ])
        #(batch*n_poses) x H x W x (ch+1) -> (n_poses x batch x H x W x (ch+1))
        #pack each image in a pose
        return final_out
def render_batch(pose_param, shape_param, exp_param, tex_param, color_param,
                 illum_param, frame_width: int, frame_height: int,
                 tf_bfm: TfMorphableModel, batch_size: int):
    """
    render faces in batch
    :param: pose_param: [batch, n_pose_para] or (batch, 1, n_pose_param)
    :param: shape_param: [batch, n_shape_para, 1] or [batch, n_shape_para]
    :param: exp_param:   [batch, n_exp_para, 1] or [batch, n_exp_para]
    :param: tex_param: [batch, n_tex_para, 1] or [batch, n_tex_para]
    :param: color_param: [batch, 1, n_color_para] or [batch, n_color_para]
    :param: illum_param: [batch, 1, n_illum_para] or [batch, n_illum_para]
    :param: frame_width: rendered image width
    :param: frame_height: rendered image height
    :param: tf_bfm: basel face model
    :param: batch_size: batch size
    :return: images, [batch, frame_width, frame_height, 3]
    """
    assert is_tf_expression(pose_param)

    pose_shape = tf.shape(pose_param)
    if pose_shape.shape[0] == 2:
        tf.debugging.assert_shapes(
            [(pose_param, (batch_size, tf_bfm.get_num_pose_param()))],
            message='pose_param shape wrong, dim != ({batch}, {dim})'.format(
                batch=batch_size, dim=tf_bfm.get_num_pose_param()))
        pose_param = tf.expand_dims(pose_param, 1)
    elif pose_shape.shape[0] == 3:
        tf.debugging.assert_shapes(
            [(pose_param, (batch_size, 1, tf_bfm.get_num_pose_param()))],
            message='pose_param shape wrong, dim != ({batch}, 1, {dim})'.
            format(batch=batch_size, dim=tf_bfm.get_num_pose_param()))
    else:
        raise ValueError(
            'pose_param shape wrong, dim != ({batch}, 1, {dim}) or ({batch}, {dim})'
            .format(batch=batch_size, dim=tf_bfm.get_num_pose_param()))

    vertices = tf_bfm.get_vertices(shape_param=shape_param,
                                   exp_param=exp_param,
                                   batch_size=batch_size)
    # vertex_norm = lighting.vertex_normals(vertices, tf_bfm.triangles)
    vertex_norm = tfg.geometry.representation.mesh.normals.vertex_normals(
        vertices=vertices,
        indices=tf.repeat(tf.expand_dims(tf_bfm.triangles, 0),
                          batch_size,
                          axis=0),
        clockwise=True)

    colors = tf_bfm.get_vertex_colors(tex_param=tex_param,
                                      color_param=color_param,
                                      illum_param=illum_param,
                                      vertex_norm=-vertex_norm,
                                      batch_size=batch_size)

    colors = tf.clip_by_value(colors / 255., 0., 1.)

    transformed_vertices = affine_transform(vertices=vertices,
                                            scaling=pose_param[:, 0, 6:],
                                            angles_rad=pose_param[:, 0, 0:3],
                                            t3d=pose_param[:, 0:, 3:6])
    transformed_vertices_x = transformed_vertices[:, :,
                                                  0] * 2 / frame_width - 1
    transformed_vertices_y = transformed_vertices[:, :,
                                                  1] * 2 / frame_height - 1
    transformed_vertices_z = -transformed_vertices[:, :, 2] / tf.reduce_max(
        tf.abs(transformed_vertices[:, :, 2]))

    # Convert vertices to homogeneous coordinates
    transformed_vertices = tf.concat([
        tf.expand_dims(transformed_vertices_x, axis=2),
        tf.expand_dims(transformed_vertices_y, axis=2),
        tf.expand_dims(transformed_vertices_z, axis=2),
        tf.ones_like(transformed_vertices[:, :, -1:])
    ],
                                     axis=2)

    # Render the G-buffer
    image = dirt.rasterise_batch(
        vertices=transformed_vertices,
        faces=tf.tile(tf.expand_dims(tf_bfm.triangles, axis=0),
                      (batch_size, 1, 1)),
        # faces=tf.expand_dims(tf_bfm.triangles, axis=0),
        vertex_colors=colors,
        background=tf.zeros([batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    return image * 255
Exemple #5
0
def dirt_rendering_orthographic( vertices, faces, reflectances, \
                    frame_width, frame_height, cx, cy, vertices_to_keep_track = None ):

    if vertices.shape[-1] == 2:
        vertices = tf.concat(
            [vertices, tf.zeros_like(vertices[..., -1:])], axis=-1)

    vertical_flip = False
    if frame_height < 0:
        frame_height = -frame_height
        vertical_flip = True

    if reflectances is None:
        reflectances = tf.ones_like(vertices)

    rendering_batch_size = tf.shape(vertices)[0]
    ones = tf.ones([rendering_batch_size], dtype=tf.float32)
    normals = lighting.vertex_normals(vertices, faces)

    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    if vertices_to_keep_track is None:
        landmark_vertices = vertices
        landmark_normals = normals
    else:
        landmark_vertices, landmark_normals = get_landmarks(
            vertices, normals, vertices_to_keep_track)

    translation_parameters = np.array([[0, 0, 100]], dtype=np.float32)
    translation_parameters = tf.tile(
        tf.constant(translation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    rotation_parameters = np.array([[1e-10, 0, 0]], dtype=np.float32)
    rotation_parameters = tf.tile(
        tf.constant(rotation_parameters, dtype=tf.float32),
        [rendering_batch_size, 1])
    cx = tf.ones([rendering_batch_size], dtype=tf.float32) * cx
    cy = tf.ones([rendering_batch_size],
                 dtype=tf.float32) * (frame_height - cy)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        matrices.translation(
            translation_parameters),  # translate it away from the camera
        matrices.rodrigues(rotation_parameters)  # tilt the view downwards
    )
    """
    with tf.Session() as sess: 
        _batch = sess.run( rendering_batch_size )
        _a = sess.run( matrices.translation(translation_parameters) )
        _b = sess.run( matrices.rodrigues(rotation_parameters)  )
        _c = sess.run( view_matrix )

        print("translation", np.transpose(_a[0]))
        print("rotation",  np.transpose(_b[0]))
        print("camera",  np.transpose(_c[0]))
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= norms

    viewpoint_direction = np.array([0, 0, -1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))
    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)

    # Transform vertices from camera to clip space
    near = 10.0 * ones
    far = 200.0 * ones

    projection_matrix = orthographic_projection(near=near,
                                                far=far,
                                                w=frame_width,
                                                ones=ones,
                                                h=frame_height,
                                                cx=cx,
                                                cy=cy)
    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_world,
                                                projection_matrix)
    """
    with tf.Session() as sess: 
        
        _v0 = sess.run( bz_facemodel_vertices_world )
        _v1 = sess.run( bz_facemodel_vertices_camera )
        _v2 = sess.run( bz_facemodel_vertices_clip )


        print("vertices\n", (_v0[0]))
        print("vertices\n", (_v1[0]))
        print("vertices\n", (_v2[0]))
    """

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=bz_facemodel_vertex_colors,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 + landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)

    output_dictionary = {}
    output_dictionary["rendering_results"] = full_model_pixels
    output_dictionary["vertices"] = landmark_points
    output_dictionary["landmark_normals"] = landmark_normals
    """
    with tf.Session() as sess:
        _vertices = sess.run( bz_facemodel_vertices_clip )
        print( _vertices.shape )
        print( _vertices )
    """

    if vertical_flip is True:
        output_dictionary["rendering_results"] = output_dictionary[
            "rendering_results"][:, ::-1, :, :]

    return output_dictionary
Exemple #6
0
def dirt_rendering( vertices, faces, reflectances, \
                    spherical_harmonics_parameters, translation_parameters, camera_rotation_parameters, base_rotation, rotation_parameters, \
                    frame_width, frame_height, focal_length, cx, cy, vertices_indices ):

    rendering_batch_size = tf.shape(vertices)[0]
    vertices = tf.matmul(vertices,
                         matrices.rodrigues(rotation_parameters)[..., :3, :3])

    vertex_normals = normals = lighting.vertex_normals(vertices, faces)

    landmark_vertices, landmark_normals = get_landmarks(
        vertices, normals, vertices_indices)
    bz_facemodel_vertices_object, bz_facemodel_vertex_colors, bz_facemodel_vertex_normals, bz_facemodel_faces = split_vertices_by_color_normal_face(
        vertices, reflectances, normals, faces)

    # Transform vertices from world to camera space; note that the camera points along the negative-z axis in camera space
    view_matrix = matrices.compose(
        #        matrices.rodrigues(rotation_parameters),
        matrices.translation(-translation_parameters
                             ),  # translate it away from the camera
        matrices.rodrigues(
            camera_rotation_parameters),  # tilt the view downwards
        matrices.rodrigues(base_rotation))
    """

    view_matrix = matrices.compose(
        matrices.translation(translation_parameters),  # translate it away from the camera                
        matrices.rodrigues(camera_rotation_parameters),        # tilt the view downwards
        matrices.translation(-translation_parameters)  # translate it away from the camera        
    )
    """

    # Convert vertices to homogeneous coordinates
    bz_facemodel_vertices_object = tf.concat([
        bz_facemodel_vertices_object,
        tf.ones_like(bz_facemodel_vertices_object[..., -1:])
    ],
                                             axis=-1)

    landmark_vertices = tf.concat(
        [landmark_vertices,
         tf.ones_like(landmark_vertices[..., -1:])],
        axis=-1)
    """
    with tf.Session() as sess:
        _view_matrix = sess.run( view_matrix )
        print( _view_matrix )
    """

    viewpoint_direction = -tf.ones_like(translation_parameters)
    norms = tf.norm(viewpoint_direction, axis=-1,
                    keep_dims=True)  # indexed by *, singleton
    viewpoint_direction /= (norms + 1e-5)

    viewpoint_direction = np.array([0, 0, 1], np.float32)
    viewpoint_direction = np.expand_dims(viewpoint_direction, axis=0)
    viewpoint_direction = tf.constant(viewpoint_direction)
    viewpoint_direction = tf.tile(viewpoint_direction,
                                  [rendering_batch_size, 1])

    # Transform vertices from object to world space, by rotating around the vertical axis
    bz_facemodel_vertices_world = bz_facemodel_vertices_object  #  tf.matmul(cube_vertices_object, [matrices.rodrigues([0.0, 0.0, 0.0])] )
    landmark_vertices_world = landmark_vertices

    # Calculate face normals; pre_split implies that no faces share a vertex
    bz_facemodel_faces = tf.expand_dims(bz_facemodel_faces, axis=0)
    bz_facemodel_faces = tf.tile(bz_facemodel_faces,
                                 (rendering_batch_size, 1, 1))

    bz_facemodel_normals_world = bz_facemodel_vertex_normals  #  lighting.vertex_normals_pre_split(cube_vertices_world, cube_faces)
    landmark_vertices_normals = landmark_normals

    bz_facemodel_vertices_camera = tf.matmul(bz_facemodel_vertices_world,
                                             view_matrix)
    landmark_vertices_camera = tf.matmul(landmark_vertices_world, view_matrix)

    # Transform vertices from camera to clip space
    near = focal_length * 0.1  # 0.01 just constant
    far = focal_length * 100
    #    right = frame_width * 0.5 * 0.01
    #    projection_matrix = matrices.perspective_projection(near=near, far=1000., right=right, aspect=float(frame_height) / frame_width)
    projection_matrix = matrices.perspective_projection( near=near, far=far, fx = focal_length, fy=focal_length, \
        w = frame_width, h = frame_height, cx = cx, cy = cy )

    #    projection_matrix = tf.expand_dims( projection_matrix, axis = 0)
    #    projection_matrix = tf.tile( projection_matrix, (rendering_batch_size,1,1))

    bz_facemodel_vertices_clip = tf.matmul(bz_facemodel_vertices_camera,
                                           projection_matrix)
    landmark_vertices_vertices_clip = tf.matmul(landmark_vertices_camera,
                                                projection_matrix)

    #K = tf.constant(
    #    np.array( [ [focal_length,0,frame_width*0.5],[0,focal_length,frame_height*0.5],[0.0,0.0,1.0]] ), dtype = tf.float32)
    """
    ### if you want check projection matrix 
    with tf.Session() as sess:
        _landmarks = sess.run( landmark_vertices )    
        _view_matrix = sess.run( view_matrix )
        _projection_matrix = sess.run( projection_matrix )
        #_K = sess.run(K )
        _T = sess.run( matrices.translation(translation_parameters) )
        _R = sess.run( matrices.rodrigues(rotation_parameters)  )

        print(np.array2string(_T, separator=', ')) 
        print(np.array2string(_R, separator=', ')) 
        print(np.array2string(_landmarks, separator=', ')) 
        print(np.array2string(_view_matrix, separator=', ')) 
        #print(np.array2string(_K, separator=', ')) 
        print(np.array2string(_projection_matrix, separator=', ')) 
    """
    """
    # Calculate lighting, as combination of diffuse and ambient
    vertex_colors_lit = diffuse_directional_lights(
        bz_facemodel_normals_world, bz_facemodel_vertex_colors,
        light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors
    )  * 1.0 #+ bz_facemodel_vertex_colors * 0.2
    """

    # geometry
    use_spherical_harmonics = False

    if use_spherical_harmonics == True:
        vertex_colors_lit = spherical_harmonics(
            bz_facemodel_normals_world,
            bz_facemodel_vertex_colors,
            spherical_harmonics_parameters,
            viewpoint_direction=viewpoint_direction)

        geometry_visualization_spherical_harmonics_parameters = np.zeros(
            [27], dtype=np.float32)
        geometry_visualization_spherical_harmonics_parameters[3:3 + 9] = 1.0
        geometry_visualization_spherical_harmonics_parameters = tf.constant(
            geometry_visualization_spherical_harmonics_parameters,
            dtype=tf.float32)
        geometry_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                                geometry_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )
    else:
        light_directions = np.array([[0, 0, 1]])
        norm = np.linalg.norm(light_directions, axis=-1)
        light_directions = light_directions / norm[:, np.newaxis]
        light_directions = tf.constant(light_directions, dtype=tf.float32)
        light_directions = tf.tile(light_directions, (rendering_batch_size, 1))

        light_colors = tf.constant([1., 1., 1.], dtype=tf.float32)
        light_colors = tf.expand_dims(light_colors, axis=0)
        light_colors = tf.tile(light_colors, (rendering_batch_size, 1))

        geometry_visualization_vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  tf.ones_like(bz_facemodel_vertex_colors), \
                                    light_direction=light_directions, viewpoint_direction=viewpoint_direction, light_color=light_colors, double_sided = False ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2
        vertex_colors_lit = diffuse_directional_lights( bz_facemodel_normals_world,  bz_facemodel_vertex_colors, light_direction=light_directions, \
                                            viewpoint_direction=viewpoint_direction, light_color=light_colors ) * 1.0 #+ bz_facemodel_vertex_colors * 0.2

    # reflectance
    reflectance_visualization_spherical_harmonics_parameters = np.zeros(
        [27], dtype=np.float32)
    reflectance_visualization_spherical_harmonics_parameters[0:3] = 3.0
    reflectance_visualization_spherical_harmonics_parameters = tf.constant(
        reflectance_visualization_spherical_harmonics_parameters,
        dtype=tf.float32)
    reflectance_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, bz_facemodel_vertex_colors, \
                                reflectance_visualization_spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # illumination
    illumination_visualization_vertex_colors_lit = spherical_harmonics( bz_facemodel_normals_world, tf.ones_like(bz_facemodel_vertex_colors), \
                            spherical_harmonics_parameters, viewpoint_direction=viewpoint_direction )

    # depth
    depth_vertex_colors_lit = tf.concat([
        geometry_visualization_vertex_colors_lit[:, :, 0:1],
        bz_facemodel_normals_world[:, :, 2:3],
        bz_facemodel_vertices_camera[:, :, 2:3]
    ],
                                        axis=-1)

    # landmarks
    denom = landmark_vertices_vertices_clip[:, :, 3]
    landmark_points = landmark_vertices_vertices_clip[:, :, 0:2] / (
        denom[..., tf.newaxis])
    landmark_xpoint = (1.0 + landmark_points[:, :, 0:1]) * frame_width * 0.5
    landmark_ypoint = (1.0 - landmark_points[:, :, 1:2]) * frame_height * 0.5
    landmark_points = tf.concat([landmark_xpoint, landmark_ypoint], axis=-1)
    """
    with tf.Session() as sess:
        _AAA = sess.run( landmark_points )
        print(_AAA)
    """

    # landmark visibility
    landmark_visibility = tf.matmul(landmark_vertices_normals,
                                    viewpoint_direction[..., tf.newaxis])
    #visibility = tf.minimum( landmark_visibility+0.9999, 1.0)
    #visibility = tf.cast( visibility, tf.int32 )

    full_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    geometry_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=geometry_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    reflectance_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=tf.ones_like(
            reflectance_visualization_vertex_colors_lit),
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    illumination_model_pixels = dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=illumination_visualization_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    depth_maps = -dirt.rasterise_batch(
        vertices=bz_facemodel_vertices_clip,
        faces=bz_facemodel_faces,
        vertex_colors=depth_vertex_colors_lit,
        background=tf.zeros(
            [rendering_batch_size, frame_height, frame_width, 3]),
        width=frame_width,
        height=frame_height,
        channels=3)

    output1_dict = {}
    output1_dict["full_model_pixels"] = full_model_pixels
    output1_dict["vertices_clip"] = bz_facemodel_vertices_clip
    output1_dict["landmark_points"] = landmark_points
    output1_dict["landmark_visibility"] = landmark_visibility
    #output1_dict["depth_masks"] =  tf.clip_by_value( 5*( -depth_maps[:,:,:,0] ), 0, 1 ) # tf.stop_gradient( ???
    #output1_dict["depth_masks"] =   tf.stop_gradient( tf.clip_by_value( 100*( -depth_maps[:,:,:,0] ), 0, 1 ) )
    output1_dict["depth_maps"] = depth_maps[:, :, :, 2]
    output1_dict["geometry_model_pixels"] = geometry_model_pixels
    output1_dict["reflectance_model_pixels"] = reflectance_model_pixels
    output1_dict["illumination_model_pixels"] = illumination_model_pixels
    output1_dict["surface_normals"] = (1 + depth_maps[:, :, :, 1]) / 2.0
    output1_dict["vertex_normals"] = vertex_normals

    return output1_dict