Esempio n. 1
0
  def forward(self, x):
    #x = self.dropout1(self.relu1(self.fc1(x)))
    #x = self.dropout2(self.relu2(self.fc2(x)))
    x = self.relu1(self.fc1(x))
    x = self.relu2(self.fc2(x))
    x = self.fc3(x)
    return x

# if __name__ == '__main__':
    config = get_config()
    template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')
    renderer = vis_util.SMPLRenderer(faces=template_mesh.f)

    if not os.path.exists(config.out_folder):
        os.makedirs(config.out_folder)

    if not os.path.exists(config.out_folder + '/images'):
        os.mkdir(config.out_folder + '/images')

    main(config, template_mesh)
Esempio n. 2
0
def main(config):
    print('Tensorflow version {}'.format(tf.__version__))

    print("Input Dir: <{}>".format(config.in_folder))
    print("Output Dir: <{}>".format(config.out_folder))

    img_paths = glob.glob(os.path.join(config.in_folder, '*'))

    if (config.save_viz or config.save_obj_file):
        template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')

    sess = tf.Session()
    model = RingNet_inference(config, sess=sess)

    pre_process_times = []
    inference_times = []
    start = time.time()
    for img_path in img_paths:
        pre_start = time.time()
        input_img, proc_param, img = preprocess_image(img_path,
                                                      config.img_size)
        pre_end = time.time()
        duration = pre_end - pre_start
        pre_process_times.append(duration)

        inf_start = time.time()
        vertices, flame_parameters = model.predict(np.expand_dims(input_img,
                                                                  axis=0),
                                                   get_parameters=True)
        inf_end = time.time()
        duration = inf_end - inf_start
        inference_times.append(duration)

        if config.save_viz:
            if not os.path.exists(config.out_folder + '/images'):
                os.mkdir(config.out_folder + '/images')

            cams = flame_parameters[0][:3]
            renderer = vis_util.SMPLRenderer(faces=template_mesh.f)
            # visualize(img, proc_param, vertices[0], cams, renderer, img_name=config.out_folder + '/images/' + img_path.split('/')[-1][:-4])
            visualize_single_row(img,
                                 proc_param,
                                 vertices[0],
                                 cams,
                                 renderer,
                                 img_name=config.out_folder + '/images/' +
                                 img_path.split('/')[-1][:-4])

        if config.save_obj_file:
            if not os.path.exists(config.out_folder + '/mesh'):
                os.mkdir(config.out_folder + '/mesh')
            mesh = Mesh(v=vertices[0], f=template_mesh.f)
            mesh.write_obj(config.out_folder + '/mesh/' +
                           img_path.split('/')[-1][:-4] + '.obj')

        if config.save_flame_parameters:
            if not os.path.exists(config.out_folder + '/params'):
                os.mkdir(config.out_folder + '/params')
            flame_parameters_ = {
                'cam':
                flame_parameters[0][:3],
                'pose':
                flame_parameters[0][3:3 + config.pose_params],
                'shape':
                flame_parameters[0][3 + config.pose_params:3 +
                                    config.pose_params + config.shape_params],
                'expression':
                flame_parameters[0][3 + config.pose_params +
                                    config.shape_params:]
            }
            np.save(
                config.out_folder + '/params/' + img_path.split('/')[-1][:-4] +
                '.npy', flame_parameters_)

        if config.neutralize_expression:
            from util.using_flame_parameters import make_prdicted_mesh_neutral
            if not os.path.exists(config.out_folder + '/neutral_mesh'):
                os.mkdir(config.out_folder + '/neutral_mesh')
            neutral_mesh = make_prdicted_mesh_neutral(
                config.out_folder + '/params/' + img_path.split('/')[-1][:-4] +
                '.npy', config.flame_model_path)
            neutral_mesh.write_obj(config.out_folder + '/neutral_mesh/' +
                                   img_path.split('/')[-1][:-4] + '.obj')
    end = time.time()
    overall_duration = end - start

    mean_pre_process_time = np.mean(pre_process_times)
    mean_inference_time = np.mean(inference_times)

    print('mean_pre_process_time = {}'.format(mean_pre_process_time))
    print('mean_inference_time = {}'.format(mean_inference_time))

    n_images = len(img_paths)
    throughput = n_images / (np.sum(pre_process_times) +
                             np.sum(inference_times))
    print('total images = {} throughput = {}/s'.format(n_images, throughput))

    throughput = n_images / overall_duration
    print('total images = {} duration {} throughput = {}/s'.format(
        n_images, overall_duration, throughput))
Esempio n. 3
0
        y = i[1].int()
        plt.annotate(str(count), xy=(x, y))
        plt.scatter(x, y, s=50, c='red', marker='o')
        count = count + 1
    count = 0
    #openpose[0] *= scale
    for i in openpose[0]:
        x = i[0]
        y = i[1]
        plt.annotate(str(count), xy=(x, y))
        plt.scatter(x, y, s=50, c='blue', marker='o')
        count = count + 1
    plt.show()


    renderer = vis_util.SMPLRenderer(faces=mesh_faces)

    print(img.shape[:2])
    cam_for_render, vert_shifted = vis_util.get_original(
        #proc_param, mesh_vertices, new_cam.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
        proc_param, mesh_vertices, cam_params.detach().cpu().numpy().squeeze(), img_size=img.shape[:2]
    )
    print(cam_params, new_cam, cam_for_render)
    #exit(0)

    # rend_img_overlay = renderer(
    #     #vert_shifted * 1.0, cam=new_cam.squeeze().detach().cpu().numpy(), img=img, do_alpha=True
    #     #vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
    #     vert_shifted * 1.0, cam=cam_for_render, img=img, do_alpha=True
    # )
    rend_img_vp1 = renderer.rotated(
Esempio n. 4
0
def main(config):
    print('Tensorflow version {}'.format(tf.__version__))
    print("Input Dir: <{}>".format(config.in_folder))
    print("Output Dir: <{}>".format(config.out_folder))

    transformed_dataset = FaceDataset(config.in_folder, config.img_size)

    # Dataloader
    dataloader = DataLoader(transformed_dataset, batch_size=4,
                            shuffle=False, num_workers=4)

    template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')

    # sess = tf.Session()

    tf_config = tf.ConfigProto()
    # dynamically grow the memory used on the GPU
    tf_config.gpu_options.allow_growth = True
    sess = tf.Session(config=tf_config)


    model = RingNet_inference(config, sess=sess)

    pre_process_times = []
    inference_times = []
    start = time.time()
    for i_batch, sample_batched in enumerate(dataloader):

        n_images = len(sample_batched['image'])
        # print(i_batch, n_images)

        for idx in range(n_images):
            img = sample_batched['image'][idx]
            img_name = sample_batched['name'][idx]
            proc_param = sample_batched['proc_param'][idx]
            preprocess_time = sample_batched['pre_process_time'][idx]
            pre_process_times.append(preprocess_time)

            inf_start = time.time()
            vertices, flame_parameters = model.predict(np.expand_dims(img, axis=0), get_parameters=True)
            inf_end = time.time()
            duration = inf_end - inf_start
            inference_times.append(duration)

            if config.save_viz:
                if not os.path.exists(config.out_folder + '/images'):
                    os.mkdir(config.out_folder + '/images')

                cams = flame_parameters[0][:3]
                renderer = vis_util.SMPLRenderer(faces=template_mesh.f)
                visualize_single_row(img, proc_param, vertices[0], cams, renderer,
                                     img_name=config.out_folder + '/images/' + img_name)

            if config.save_obj_file:
                if not os.path.exists(config.out_folder + '/mesh'):
                    os.mkdir(config.out_folder + '/mesh')
                mesh = Mesh(v=vertices[0], f=template_mesh.f)
                mesh.write_obj(config.out_folder + '/mesh/' +  '{}.obj'.format(img_name))

            if config.save_flame_parameters:
                if not os.path.exists(config.out_folder + '/params'):
                    os.mkdir(config.out_folder + '/params')
                flame_parameters_ = {'cam':  flame_parameters[0][:3], 'pose': flame_parameters[0][3:3+config.pose_params], 'shape': flame_parameters[0][3+config.pose_params:3+config.pose_params+config.shape_params],
                 'expression': flame_parameters[0][3+config.pose_params+config.shape_params:]}
                np.save(config.out_folder + '/params/' + '{}.npy'.format(img_name), flame_parameters_)

    end = time.time()
    overall_duration = end - start

    mean_pre_process_time = np.mean(pre_process_times)
    print('mean_pre_process_time = {}'.format(mean_pre_process_time))

    mean_inference_time = np.mean(inference_times)
    print('mean_inference_time = {}'.format(mean_inference_time))

    n_images = len(transformed_dataset)
    throughput = n_images / (np.sum(pre_process_times) + np.sum(inference_times))
    print('total images = {} throughput = {}/s using pre proccess and inference times'.format(n_images, throughput))

    throughput = n_images / overall_duration
    print('total images = {} overall duration {} throughput = {}/s'.format(n_images, overall_duration, throughput))
Esempio n. 5
0
            flame_parameters[0][3 + config.pose_params + config.shape_params:]
        }
        np.save(
            config.out_folder + '/params/' +
            config.img_path.split('/')[-1][:-4] + '.npy', flame_parameters_)

    if config.neutralize_expression:
        from util.using_flame_parameters import make_prdicted_mesh_neutral
        if not os.path.exists(config.out_folder + '/neutral_mesh'):
            os.mkdir(config.out_folder + '/neutral_mesh')
        neutral_mesh = make_prdicted_mesh_neutral(
            config.out_folder + '/params/' +
            config.img_path.split('/')[-1][:-4] + '.npy',
            config.flame_model_path)
        neutral_mesh.write_obj(config.out_folder + '/neutral_mesh/' +
                               config.img_path.split('/')[-1][:-4] + '.obj')


if __name__ == '__main__':
    config = get_config()
    template_mesh = Mesh(filename='./flame_model/FLAME_sample.ply')
    renderer = vis_util.SMPLRenderer(faces=template_mesh.f)

    if not os.path.exists(config.out_folder):
        os.makedirs(config.out_folder)

    if not os.path.exists(config.out_folder + '/images'):
        os.mkdir(config.out_folder + '/images')

    main(config, template_mesh)
Esempio n. 6
0
    def train(self):
        # For rendering!
        self.renderer = vis_util.SMPLRenderer(
            img_size=self.img_size, face_path=self.config.smpl_face_path)

        step = 0

        with self.sv.managed_session(config=self.sess_config) as sess:
            while not self.sv.should_stop():
                fetch_dict = {
                    "summary": self.summary_op_always,
                    "step": self.global_step,
                    "e_loss": self.e_loss,
                    # The meat
                    "e_opt": self.e_opt,
                    "loss_kp": self.e_loss_kp
                }
                if not self.encoder_only:
                    fetch_dict.update({
                        # For D:
                        "d_opt": self.d_opt,
                        "d_loss": self.d_loss,
                        "loss_disc": self.e_loss_disc,
                    })
                if self.use_3d_label:
                    fetch_dict.update({
                        "loss_3d_params": self.e_loss_3d,
                        "loss_3d_joints": self.e_loss_3d_joints
                    })

                if step % self.log_img_step == 0:
                    fetch_dict.update({
                        "input_img": self.show_imgs,
                        "gt_kp": self.show_kps,
                        "e_verts": self.all_verts,
                        "joints": self.all_pred_kps,
                        "cam": self.all_pred_cams,
                    })
                    if not self.encoder_only:
                        fetch_dict.update(
                            {"summary_occasional": self.summary_op_occ})

                t0 = time()
                result = sess.run(fetch_dict)
                t1 = time()

                self.summary_writer.add_summary(result['summary'],
                                                global_step=result['step'])

                e_loss = result['e_loss']
                step = result['step']

                epoch = float(step) / self.num_itr_per_epoch
                if self.encoder_only:
                    print("itr %d/(epoch %.1f): time %g, Enc_loss: %.4f" %
                          (step, epoch, t1 - t0, e_loss))
                else:
                    d_loss = result['d_loss']
                    print(
                        "itr %d/(epoch %.1f): time %g, Enc_loss: %.4f, Disc_loss: %.4f"
                        % (step, epoch, t1 - t0, e_loss, d_loss))

                if step % self.log_img_step == 0:
                    if not self.encoder_only:
                        self.summary_writer.add_summary(
                            result['summary_occasional'],
                            global_step=result['step'])
                    self.draw_results(result)

                self.summary_writer.flush()
                if epoch > self.max_epoch:
                    self.sv.request_stop()

                step += 1

        print('Finish training on %s' % self.model_dir)