Esempio n. 1
0
def main(_):
    #----edited by parker  call to graph
    # graphviz = GraphvizOutput()
    # graphviz.output_file = 'call_to_graph.png'

    img = preprocess_image(opts.img_path, img_size=opts.img_size)
    print("opts:", opts.gpu_id)

    #    with PyCallGraph(output=graphviz):
    #img的維度是3x257x257
    # 創建一個pytorch tensor 的batch 維度是["img"][1][3][257][257]而且值為 1.0

    batch = {'img': torch.Tensor(np.expand_dims(img, 0))}
    predictor = pred_util.MeshPredictor(opts)
    #-----------------得到預測好的vertice
    outputs = predictor.predict(batch)
    #-----------------------------draw predited mesh

    #----------------------------------

    # This is resolution
    renderer = predictor.vis_rend
    renderer.set_light_dir([0, 1, -1], 0.4)
    # output["verts"]是已經預測好的vertce
    visualize(img, outputs, predictor.vis_rend)
Esempio n. 2
0
def main(_):

    if opts.dataset == 'cub':
        self.data_module = cub_data
    else:
        raise NotImplementedError
    print('opts.split', opts.split)
    self.dataloader = self.data_module.data_loader(opts)
    import ipdb
    pdb.set_trace
    # import pdb; pdb.set_trace()
    # return img array (3, 257, 257)
    img = preprocess_image(opts.img_path, img_size=opts.img_size)

    batch = {'img': torch.Tensor(np.expand_dims(img, 0))}
    # init predictor, opts.texture = True, opts.use_sfm_ms = False
    predictor = pred_util.MeshPredictor(opts)
    # outputs keys: ['kp_pred', 'verts', 'kp_verts', 'cam_pred', 'mask_pred', 'texture', 'texture_pred', 'uv_image', 'uv_flow']
    # [(k,v.shape) for k, v in outputs.items()]
    #  ('texture', torch.Size([1, 1280, 6, 6, 6, 3])), ('texture_pred', torch.Size([1, 3, 256, 256])), ('uv_image', torch.Size([1, 3, 128, 256])), ('uv_flow', torch.Size([1, 128, 256, 2]))]
    outputs = predictor.predict(batch)

    # This is resolution
    renderer = predictor.vis_rend
    renderer.set_light_dir([0, 1, -1], 0.4)

    visualize(img, outputs, predictor.vis_rend)
Esempio n. 3
0
    def define_model(self):
        opts = self.opts

        self.predictor = pred_utils.MeshPredictor(opts)

        # for visualization
        self.renderer = self.predictor.vis_rend
        self.renderer.set_bgcolor([1., 1., 1.])
        self.renderer.renderer.renderer.image_size = 512
        self.renderer.set_light_dir([0, 1, -1], 0.38)
Esempio n. 4
0
File: demo.py Progetto: neka-nat/cmr
def main(_):
    for i, img_path in enumerate(opts.img_paths):
        img = preprocess_image(img_path, img_size=opts.img_size)

        batch = {'img': torch.Tensor(np.expand_dims(img, 0))}

        predictor = pred_util.MeshPredictor(opts)
        outputs = predictor.predict(batch)

        # This is resolution
        renderer = predictor.vis_rend
        renderer.set_light_dir([0, 1, -1], 0.4)

        visualize(img, outputs, predictor.vis_rend, f"demo_{i}.png")
Esempio n. 5
0
def main(_):
    # import pdb; pdb.set_trace()
    # return img array (3, 257, 257)
    img = preprocess_image(opts.img_path, img_size=opts.img_size)

    batch = {'img': torch.Tensor(np.expand_dims(img, 0))}
    # init predictor, opts.texture = True, opts.use_sfm_ms = False
    predictor = pred_util.MeshPredictor(opts)
    # outputs keys: ['kp_pred', 'verts', 'kp_verts', 'cam_pred', 'mask_pred', 'texture', 'texture_pred', 'uv_image', 'uv_flow']
    # [(k,v.shape) for k, v in outputs.items()]
    #  ('texture', torch.Size([1, 1280, 6, 6, 6, 3])), ('texture_pred', torch.Size([1, 3, 256, 256])), ('uv_image', torch.Size([1, 3, 128, 256])), ('uv_flow', torch.Size([1, 128, 256, 2]))]
    outputs = predictor.predict(batch)

    # This is resolution
    renderer = predictor.vis_rend
    renderer.set_light_dir([0, 1, -1], 0.4)

    visualize(img, outputs, predictor.vis_rend)
Esempio n. 6
0
def main(_):

    img = preprocess_image(opts.img_path, img_size=opts.img_size)

    batch = {'img': torch.Tensor(np.expand_dims(img, 0))}

    predictor = pred_util.MeshPredictor(opts)
    outputs = predictor.predict(batch)

    print(predictor.faces.shape)

    save_obj('/content/test.obj',
             outputs['verts'][0],
             predictor.faces[0],
             textures=outputs['texture'][0])

    # This is resolution
    renderer = predictor.vis_rend
    renderer.set_light_dir([0, 1, -1], 0.4)

    visualize(img, outputs, predictor.vis_rend)