Пример #1
0
def example_render_batch3(pic_names: list, tf_bfm: TfMorphableModel,
                          n_tex_para: int, save_to_folder: str,
                          resolution: int):
    batch_size = len(pic_names)

    images_orignal = load_images(pic_names, '/opt/project/examples/Data/80k/')

    shape_param_batch, exp_param_batch, pose_param_batch = load_params_80k(
        pic_names=pic_names)
    shape_param = tf.squeeze(shape_param_batch)
    exp_param = tf.squeeze(exp_param_batch)
    pose_param = tf.squeeze(pose_param_batch)
    pose_param = tf.concat([
        pose_param[:, :-1],
        tf.constant(0.0, shape=(batch_size, 1), dtype=tf.float32),
        pose_param[:, -1:]
    ],
                           axis=1)
    lm = tf_bfm.get_landmarks(shape_param,
                              exp_param,
                              pose_param,
                              batch_size,
                              450,
                              is_2d=True,
                              is_plot=True)

    images_rendered = render_batch(
        pose_param=pose_param,
        shape_param=shape_param,
        exp_param=exp_param,
        tex_param=tf.constant(0.0,
                              shape=(len(pic_names), n_tex_para),
                              dtype=tf.float32),
        color_param=None,
        illum_param=None,
        frame_height=450,
        frame_width=450,
        tf_bfm=tf_bfm,
        batch_size=batch_size).numpy().astype(np.uint8)

    for i, pic_name in enumerate(pic_names):
        fig = plt.figure()
        ax = fig.add_subplot(1, 2, 1)
        plot_image_w_lm(ax, resolution, images_orignal[i], lm[i])
        ax = fig.add_subplot(1, 2, 2)
        plot_image_w_lm(ax, resolution, images_rendered[i], lm[i])
        plt.savefig(os.path.join(save_to_folder, pic_name))
Пример #2
0
def example_render_batch2(pic_names: list, tf_bfm: TfMorphableModel, save_to_folder: str, n_tex_para:int):
    batch_size = len(pic_names)

    images_orignal = load_images(pic_names, '/opt/project/examples/Data/300W_LP/')

    shape_param_batch, exp_param_batch, tex_param_batch, color_param_batch, illum_param_batch, pose_param_batch, lm_batch = \
        load_params(pic_names=pic_names, n_tex_para=n_tex_para)

    # pose_param: [batch, n_pose_param]
    # shape_param: [batch, n_shape_para]
    # exp_param:   [batch, n_exp_para]
    # tex_param: [batch, n_tex_para]
    # color_param: [batch, n_color_para]
    # illum_param: [batch, n_illum_para]

    shape_param_batch = tf.squeeze(shape_param_batch)
    exp_param_batch = tf.squeeze(exp_param_batch)
    tex_param_batch = tf.squeeze(tex_param_batch)
    color_param_batch = tf.squeeze(color_param_batch)
    illum_param_batch = tf.squeeze(illum_param_batch)
    pose_param_batch = tf.squeeze(pose_param_batch)
    lm_rended = tf_bfm.get_landmarks(shape_param_batch, exp_param_batch, pose_param_batch, batch_size, 450, is_2d=True, is_plot=True)

    images_rendered = render_batch(
        pose_param=pose_param_batch,
        shape_param=shape_param_batch,
        exp_param=exp_param_batch,
        tex_param=tex_param_batch,
        color_param=color_param_batch,
        illum_param=illum_param_batch,
        frame_height=450,
        frame_width=450,
        tf_bfm=tf_bfm,
        batch_size=batch_size
    ).numpy().astype(np.uint8)

    for i, pic_name in enumerate(pic_names):
        fig = plt.figure()
        ax = fig.add_subplot(1, 2, 1)
        plot_image_w_lm(ax, 450, images_orignal[i], lm_batch[i])
        ax = fig.add_subplot(1, 2, 2)
        plot_image_w_lm(ax, 450, images_rendered[i], lm_rended[i])
        plt.savefig(os.path.join(save_to_folder, pic_name))
            ax.plot(lm[0, 31:36], lm[1, 31:36], marker='o', markersize=2, linestyle='-',
                    color='w', lw=2)
            ax.plot(lm[0, 36:42], lm[1, 36:42], marker='o', markersize=2, linestyle='-',
                    color='w', lw=2)
            ax.plot(lm[0, 42:48], lm[1, 42:48], marker='o', markersize=2, linestyle='-',
                    color='w', lw=2)
            ax.plot(lm[0, 48:60], lm[1, 48:60], marker='o', markersize=2, linestyle='-',
                    color='w', lw=2)
            ax.plot(lm[0, 60:68], lm[1, 60:68], marker='o', markersize=2, linestyle='-',
                    color='w', lw=2)

        plt.savefig(filename)


if __name__ == '__main__':
    n_tex_para = 40
    bfm = TfMorphableModel(model_path='/opt/project/examples/Data/BFM/Out/BFM.mat', n_tex_para=n_tex_para)
    output_folder = '/opt/project/output/landmarks/landmark2'
    pic_names = ['image00002', 'IBUG_image_014_01_2', 'AFW_134212_1_0', 'IBUG_image_008_1_0']
    # pic_names = ['IBUG_image_008_1_0']
    batch_size = len(pic_names)
    images = load_images(pic_names, '/opt/project/examples/Data')
    resolution = 450
    shape_param, exp_param, _, _, _, pose_param = load_params(pic_names=pic_names, n_tex_para=n_tex_para,
                                                              data_folder='/opt/project/examples/Data/')
    landmarks = compute_landmarks(
        bfm=bfm, shape_param=shape_param, exp_param=exp_param, pose_param=pose_param, batch_size=batch_size,
        resolution=resolution)

    save_landmarks(images=images, landmarks=landmarks, output_folder=output_folder, resolution=resolution)
for im in image_filenames:
    assert (os.path.exists(im))

# Create a network from the model file
net_id, graph_id, parser, runtime = eu.create_tflite_network(model_filename)

# Load input information from the model
# tflite has all the need information in the model unlike other formats
input_names = parser.GetSubgraphInputTensorNames(graph_id)
assert len(input_names) == 1  # there should be 1 input tensor in mobilenet

input_binding_info = parser.GetNetworkInputBindingInfo(graph_id,
                                                       input_names[0])
input_width = input_binding_info[1].GetShape()[1]
input_height = input_binding_info[1].GetShape()[2]

# Load output information from the model and create output tensors
output_names = parser.GetSubgraphOutputTensorNames(graph_id)
assert len(output_names) == 1  # and only one output tensor
output_binding_info = parser.GetNetworkOutputBindingInfo(
    graph_id, output_names[0])

# Load labels file
labels = eu.load_labels(labels_filename)

# Load images and resize to expected size
images = eu.load_images(image_filenames, input_width, input_height)

eu.run_inference(runtime, net_id, images, labels, input_binding_info,
                 output_binding_info)
Пример #5
0
    # Load input information from the model and create input tensors
    input_binding_info = parser.GetNetworkInputBindingInfo("data")

    # Load output information from the model and create output tensors
    output_binding_info = parser.GetNetworkOutputBindingInfo(
        "mobilenetv20_output_flatten0_reshape0")
    output_tensors = ann.make_output_tensors([output_binding_info])

    # Load labels
    labels = eu.load_labels(labels_filename)

    # Load images and resize to expected size
    image_names = [kitten_filename]
    images = eu.load_images(image_names, 224, 224, np.float32, 255.0,
                            [0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
                            preprocess_onnx)

    for idx, im in enumerate(images):
        # Create input tensors
        input_tensors = ann.make_input_tensors([input_binding_info], [im])

        # Run inference
        print("Running inference on '{0}' ...".format(image_names[idx]))
        runtime.EnqueueWorkload(net_id, input_tensors, output_tensors)

        # Process output
        out_tensor = ann.workload_tensors_to_ndarray(output_tensors)[0][0]
        results = np.argsort(out_tensor)[::-1]
        eu.print_top_n(5, results, labels, out_tensor)