Ejemplo n.º 1
0
    def __init__(self, gc, gender):
        data_dir = osp.join(global_var.ROOT, 'pca')
        style_model = np.load(
            osp.join(data_dir, 'style_model_{}.npz'.format(gc)))

        self.renderer = Renderer(512)
        self.img = None
        self.body = trimesh.load(osp.join(global_var.ROOT, 'smpl',
                                          'hres_{}.obj'.format(gender)),
                                 process=False)

        with open(osp.join(global_var.ROOT, 'garment_class_info.pkl'),
                  'rb') as f:
            garment_meta = pickle.load(f)
        # self.vert_indcies = garment_meta[gc]['vert_indices']
        self.f = garment_meta[gc]['f']

        self.gamma = np.zeros([1, 4], dtype=np.float32)
        self.pca = PCA(n_components=4)
        self.pca.components_ = style_model['pca_w']
        self.pca.mean_ = style_model['mean']
        win_name = '{}_{}'.format(gc, gender)
        cv2.namedWindow(win_name)
        cv2.createTrackbar('0', win_name, 100, 200, self.value_change)
        cv2.createTrackbar('1', win_name, 100, 200, self.value_change)
        cv2.createTrackbar('2', win_name, 100, 200, self.value_change)
        cv2.createTrackbar('3', win_name, 100, 200, self.value_change)
        cv2.createTrackbar('trans', win_name, 100, 200, self.value_change)
        self.trans = 0
        self.win_name = win_name

        self.render()
Ejemplo n.º 2
0
    def __init__(self):
        super(UrbanEnv, self).__init__()

        # Initialize environment parameters
        self.town = carla_config.town
        self.state_y = carla_config.grid_height
        self.state_x = carla_config.grid_width
        self.channel = carla_config.features

        # Sensors
        self.rgb_sensor = None
        self.semantic_sensor = None

        # Planners
        self.planner = None

        # States and actions
        self.observation_space = spaces.Box(low=0,
                                            high=255,
                                            shape=(carla_config.grid_height,
                                                   carla_config.grid_width,
                                                   carla_config.features),
                                            dtype=np.uint8)
        self.action_space = spaces.Discrete(carla_config.N_DISCRETE_ACTIONS)

        self.ego_vehicle = None
        self.current_speed = 0.0

        # Rendering related
        self.renderer = None
        self.is_render_enabled = carla_config.render

        if self.is_render_enabled:
            self.renderer = Renderer()
            self.init_renderer()
Ejemplo n.º 3
0
    def init_fn(self):
        self.train_ds = MixedDataset(self.options, ignore_3d=self.options.ignore_3d, is_train=True)
        self.model = hmr(config.SMPL_MEAN_PARAMS, pretrained=True).to(self.device)      # feature extraction model
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                            lr = self.options.lr,
                                            weight_decay=0)
        self.smpl = SMPL(config.SMPL_MODEL_DIR,
                         batch_size = 16,
                         create_transl=False).to(self.device)
        # per vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # keypoints loss including 2D and 3D
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # SMPL parameters loss if we have
        self.criterion_regr = nn.MSELoss().to(self.device)

        self.models_dict = {'model':self.model}
        self.optimizers_dict = {'optimizer':self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH
        # initialize MVSMPLify
        self.mvsmplify = MVSMPLify(step_size=1e-2, batch_size=16, num_iters=100,focal_length=self.focal_length)
        print(self.options.pretrained_checkpoint)
        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(checkpoint_file = self.options.pretrained_checkpoint)
        #load dictionary of fits
        self.fits_dict = FitsDict(self.options, self.train_ds)
        # create renderer
        self.renderer = Renderer(focal_length=self.focal_length, img_res = 224, faces=self.smpl.faces)
Ejemplo n.º 4
0
    def init_fn(self):
        self.train_ds = MixedDataset(self.options, ignore_3d=self.options.ignore_3d, is_train=True)

        self.model = hmr(config.SMPL_MEAN_PARAMS, pretrained=True).to(self.device)
        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=self.options.lr,
                                          weight_decay=0)
        self.smpl = SMPL(config.SMPL_MODEL_DIR,
                         batch_size=self.options.batch_size,
                         create_transl=False).to(self.device)
        # Per-vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # Keypoint (2D and 3D) loss
        # No reduction because confidence weighting needs to be applied
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # Loss for SMPL parameter regression
        self.criterion_regr = nn.MSELoss().to(self.device)
        self.models_dict = {'model': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH
        self.conf_thresh = self.options.conf_thresh

        # Initialize SMPLify fitting module
        self.smplify = SMPLify(step_size=1e-2, batch_size=self.options.batch_size, num_iters=self.options.num_smplify_iters, focal_length=self.focal_length, prior_mul=0.1, conf_thresh=self.conf_thresh)
        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(checkpoint_file=self.options.pretrained_checkpoint)

        # Load dictionary of fits
        self.fits_dict = FitsDict(self.options, self.train_ds)

        # Create renderer
        self.renderer = Renderer(focal_length=self.focal_length, img_res=self.options.img_res, faces=self.smpl.faces)
Ejemplo n.º 5
0
    def init_fn(self):
        # create training dataset
        self.train_ds = create_dataset(self.options.dataset, self.options)

        # create Mesh object
        self.mesh = Mesh()
        self.faces = self.mesh.faces.to(self.device)

        # create GraphCNN
        self.graph_cnn = GraphCNN(self.mesh.adjmat,
                                  self.mesh.ref_vertices.t(),
                                  num_channels=self.options.num_channels,
                                  num_layers=self.options.num_layers).to(
                                      self.device)

        # SMPL Parameter regressor
        self.smpl_param_regressor = SMPLParamRegressor().to(self.device)

        # Setup a joint optimizer for the 2 models
        self.optimizer = torch.optim.Adam(
            params=list(self.graph_cnn.parameters()) +
            list(self.smpl_param_regressor.parameters()),
            lr=self.options.lr,
            betas=(self.options.adam_beta1, 0.999),
            weight_decay=self.options.wd)

        # SMPL model
        self.smpl = SMPL().to(self.device)

        # Create loss functions
        self.criterion_shape = nn.L1Loss().to(self.device)
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        self.criterion_regr = nn.MSELoss().to(self.device)

        # Pack models and optimizers in a dict - necessary for checkpointing
        self.models_dict = {
            'graph_cnn': self.graph_cnn,
            'smpl_param_regressor': self.smpl_param_regressor
        }
        self.optimizers_dict = {'optimizer': self.optimizer}

        # Renderer for visualization
        self.renderer = Renderer(faces=self.smpl.faces.cpu().numpy())

        # LSP indices from full list of keypoints
        self.to_lsp = list(range(14))

        # Optionally start training from a pretrained checkpoint
        # Note that this is different from resuming training
        # For the latter use --resume
        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)
Ejemplo n.º 6
0
    def __init__(self, light_variability=(20,8), gridlines_on=None, gridlines_width=None, gridlines_spacing=None):
        if gridlines_on or gridlines_width or gridlines_spacing:
            assert not (gridlines_on is None\
                or gridlines_width is None\
                or gridlines_spacing is None),\
                "All gridlines variables must be set if any are"

        self.rend = Renderer()

        self.shapes = []
        self.grid_shapes = []

        self.center = np.array((0, 140, 300))

        self.light_variability = light_variability

        self.background_prims = []
        background_lower_bound = -1e3
        background_upper_bound = 1e3
        wall_bound = 1e3
        self.background_prims.append(
            Tri([(-wall_bound, 0, wall_bound),
                (wall_bound, 0, wall_bound),
                (-wall_bound, 0, -wall_bound)]))
        self.background_prims.append(
            Tri([(-wall_bound, 0, -wall_bound),
                (wall_bound, 0, wall_bound),
                (wall_bound, 0, -wall_bound)]))
        self.background_prims.append(
            Tri([(-wall_bound, -50, wall_bound),
                (0, wall_bound, wall_bound),
                (wall_bound, -50, wall_bound)]))

        if gridlines_on:
            for i in range(int((background_upper_bound - background_lower_bound) / (gridlines_width + gridlines_spacing))):
                offset = i * (gridlines_width + gridlines_spacing)
                self.grid_shapes.append(Tri([(background_lower_bound + offset, 0.01, background_lower_bound),
                                            (background_lower_bound + offset, 0.01, background_upper_bound),
                                            (background_lower_bound + gridlines_width + offset, 0.01, background_lower_bound)]))
                self.grid_shapes.append(Tri([(background_lower_bound + offset, 0.01, background_upper_bound),
                                            (background_lower_bound + gridlines_width + offset, 0.01, background_upper_bound),
                                            (background_lower_bound + gridlines_width + offset, 0.01, background_lower_bound)]))
                self.grid_shapes.append(Tri([(background_lower_bound, 0.01, background_lower_bound + gridlines_width + offset),
                                             (background_upper_bound, 0.01, background_lower_bound + offset),
                                             (background_lower_bound, 0.01, background_lower_bound + offset)]))
                self.grid_shapes.append(Tri([(background_upper_bound, 0.01, background_lower_bound + offset),
                                             (background_lower_bound, 0.01, background_lower_bound + gridlines_width + offset),
                                            (background_upper_bound, 0.01, background_lower_bound + gridlines_width + offset)]))

        self.default_light = np.array((400, 300, -800))
        self.default_intensity = 1000000
        self.camera = Cam((0, 140, 300), (128, 128))
Ejemplo n.º 7
0
def bbox_from_json(bbox_file):
    """Get center and scale of bounding box from bounding box annotations.
    The expected format is [top_left(x), top_left(y), width, height].
    """
    with open(bbox_file, 'r') as f:
        bbox = np.array(json.load(f)['bbox']).astype(np.float32)
    ul_corner = bbox[:2]
    center = ul_corner + 0.5 * bbox[2:]
    # Load pretrained model
    model = hmr(config.SMPL_MEAN_PARAMS).to(device)
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['model'], strict=False)

    # Load SMPL model
    smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1,
                create_transl=False).to(device)
    model.eval()

    # Setup renderer for visualization
    renderer = Renderer(focal_length=constants.FOCAL_LENGTH,
                        img_res=constants.IMG_RES,
                        faces=smpl.faces)

    # Preprocess input image and generate predictions
    img, norm_img = process_image(args.img,
                                  args.bbox,
                                  args.openpose,
                                  input_res=constants.IMG_RES)
    with torch.no_grad():
        pred_rotmat, pred_betas, pred_camera = model(norm_img.to(device))
        pred_output = smpl(betas=pred_betas,
                           body_pose=pred_rotmat[:, 1:],
                           global_orient=pred_rotmat[:, 0].unsqueeze(1),
                           pose2rot=False)
        pred_vertices = pred_output.vertices

    # Calculate camera parameters for rendering
    camera_translation = torch.stack([
        pred_camera[:, 1], pred_camera[:, 2], 2 * constants.FOCAL_LENGTH /
        (constants.IMG_RES * pred_camera[:, 0] + 1e-9)
    ],
                                     dim=-1)
    camera_translation = camera_translation[0].cpu().numpy()
    pred_vertices = pred_vertices[0].cpu().numpy()
    img = img.permute(1, 2, 0).cpu().numpy()

    width = max(bbox[2], bbox[3])
    scale = width / 200.0
    # make sure the bounding box is rectangular
    return center, scale
Ejemplo n.º 8
0
def render_plot(img, poses, bboxes):
    renderer = Renderer(vertices_path="./pose_references/vertices_trans.npy",
                        triangles_path="./pose_references/triangles.npy")

    (w, h) = img.size
    image_intrinsics = np.array([[w + h, 0, w // 2], [0, w + h, h // 2],
                                 [0, 0, 1]])

    trans_vertices = renderer.transform_vertices(img, poses)
    img = renderer.render(img, trans_vertices, alpha=1)
    # for bbox in bboxes:
    #     bbox = bbox.astype(np.uint8)
    #     print(bbox)
    #     img = cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255,0,0), 2)
    return img
Ejemplo n.º 9
0
    def init_fn(self):
        self.train_ds = MixedDataset(self.options,
                                     ignore_3d=self.options.ignore_3d,
                                     is_train=True)

        self.model = hmr(config.SMPL_MEAN_PARAMS,
                         pretrained=True).to(self.device)

        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=self.options.lr)
        self.lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
            self.optimizer, gamma=0.95)

        self.smpl = SMPL(config.SMPL_MODEL_DIR,
                         batch_size=self.options.batch_size,
                         create_transl=False).to(self.device)

        # consistency loss
        self.criterion_consistency_contrastive = NTXent(
            tau=self.options.tau, kernel=self.options.kernel).to(self.device)
        self.criterion_consistency_mse = nn.MSELoss().to(self.device)
        # Per-vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # Keypoint (2D and 3D) loss
        # No reduction because confidence weighting needs to be applied
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # Loss for SMPL parameter regression
        self.criterion_regr = nn.MSELoss().to(self.device)
        self.models_dict = {'model': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH

        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)

        # Create renderer
        self.renderer = Renderer(focal_length=self.focal_length,
                                 img_res=self.options.img_res,
                                 faces=self.smpl.faces)

        # Create input image flag
        self.input_img = self.options.input_img

        # initialize queue
        self.feat_queue = FeatQueue(max_queue_size=self.options.max_queue_size)
Ejemplo n.º 10
0
    def init_fn(self):
        self.options.img_res = cfg.DANET.INIMG_SIZE
        self.options.heatmap_size = cfg.DANET.HEATMAP_SIZE
        self.train_ds = MixedDataset(self.options,
                                     ignore_3d=self.options.ignore_3d,
                                     is_train=True)

        self.model = DaNet(options=self.options,
                           smpl_mean_params=path_config.SMPL_MEAN_PARAMS).to(
                               self.device)
        self.smpl = self.model.iuv2smpl.smpl

        self.optimizer = torch.optim.Adam(params=self.model.parameters(),
                                          lr=cfg.SOLVER.BASE_LR,
                                          weight_decay=0)

        self.models_dict = {'model': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH

        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)

        # Load dictionary of fits of SPIN
        self.fits_dict = FitsDict(self.options, self.train_ds)

        # Create renderer
        try:
            self.renderer = Renderer(focal_length=self.focal_length,
                                     img_res=self.options.img_res,
                                     faces=self.smpl.faces)
        except:
            Warning('No renderer for visualization.')
            self.renderer = None

        self.decay_steps_ind = 1
Ejemplo n.º 11
0
    # Load model
    mesh = Mesh(device=device)
    # Our pretrained networks have 5 residual blocks with 256 channels.
    # You might want to change this if you use a different architecture.
    model = CMR(mesh,
                5,
                256,
                pretrained_checkpoint=args.checkpoint,
                device=device)

    model.to(device)
    model.eval()

    # Setup renderer for visualization
    renderer = Renderer()

    # Preprocess input image and generate predictions
    img, norm_img = process_image(args.img,
                                  args.bbox,
                                  args.openpose,
                                  input_res=cfg.INPUT_RES)
    norm_img = norm_img.to(device)
    with torch.no_grad():
        pred_vertices, pred_vertices_smpl, pred_camera, _, _ = model(norm_img)

    # Calculate camera parameters for rendering
    camera_translation = torch.stack([
        pred_camera[:, 1], pred_camera[:, 2], 2 * cfg.FOCAL_LENGTH /
        (cfg.INPUT_RES * pred_camera[:, 0] + 1e-9)
    ],
Ejemplo n.º 12
0
from utils.shapes import Sphere, Cuboid, Tetrahedron
from utils.renderer import Renderer, Cam, Lit, Tri
import cv2

rend = Renderer()

sp = Sphere((0, 0, 0), 25)
shapes = sp.render()
cuboid = Cuboid((0, 0, -100))
cuboid.scale(25, axis=0)
cuboid.scale(50, axis=1)
cuboid.scale(75, axis=2)
cuboid.rotate(90, 90)
shapes.extend(cuboid.render())
tetrahedron = Tetrahedron((100, 0, 0))
tetrahedron.scale(25, axis=0)
tetrahedron.scale(50, axis=1)
tetrahedron.scale(20, axis=2)
tetrahedron.rotate(45, 180)
shapes.extend(tetrahedron.render())

background_prims = []
background_prims.append(Tri([(-1000.00,-40.00,1000.00), (1000.00,-40.00, 1000.00), (-1000.00,-40.00,-1000.00)]))
background_prims.append(Tri([(-1000.00,-40.00,-1000.00), (1000.00,-40.00, 1000.00), (1000.00,-40.00,-1000.00)]))


light = Lit((125,300,35),79000)
camera = [Cam((200,222,83),( -.5,-.7,-.5), (640,480))]
shadow, noshadow = rend.render(camera, light, shapes, background_prims)
cv2.imwrite("shadow.png", shadow)
cv2.imwrite("noshadow.png", noshadow)
Ejemplo n.º 13
0
 def _setup_renderer(self):
     self.render = Renderer(cfg.image_size, obj_filename=cfg.mesh_file).to(self.device)
Ejemplo n.º 14
0
# import matplotlib
# matplotlib.use('MACOSX')
import matplotlib.pyplot as plt
from smplx import SMPL

from utils.renderer import Renderer
from utils.cam_utils import perspective_project_torch
from data.ssp3d_dataset import SSP3DDataset
import config

# SMPL models in torch
smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')
smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')

# Pyrender renderer
renderer = Renderer(faces=smpl_male.faces, img_res=512)

# SSP-3D datset class
ssp3d_dataset = SSP3DDataset(config.SSP_3D_PATH)

indices_to_plot = [11, 60, 199]  # Visualising 3 examples from SSP-3D

for i in indices_to_plot:
    data = ssp3d_dataset.__getitem__(i)

    fname = data['fname']
    image = data['image']
    cropped_image = data['cropped_image']
    silhouette = data['silhouette']
    joints2D = data['joints2D']
Ejemplo n.º 15
0
import os
import os.path as osp
import cv2
import numpy as np
import pickle
from utils.renderer import Renderer
from smpl_torch import SMPLNP
from global_var import ROOT


if __name__ == '__main__':
    garment_class = 't-shirt'
    gender = 'female'
    img_size = 512
    renderer = Renderer(img_size)
    smpl = SMPLNP(gender=gender, cuda=False)

    pose_dir = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'pose')
    shape_dir = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'shape')
    ss_dir = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'style_shape')
    pose_vis_dir = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'pose_vis')
    ss_vis_dir = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'style_shape_vis')
    pivots_path = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'pivots.txt')
    avail_path = osp.join(ROOT, '{}_{}'.format(garment_class, gender), 'avail.txt')
    os.makedirs(pose_vis_dir, exist_ok=True)
    os.makedirs(ss_vis_dir, exist_ok=True)

    with open(os.path.join(ROOT, 'garment_class_info.pkl'), 'rb') as f:
        class_info = pickle.load(f, encoding='latin-1')
    body_f = smpl.base.faces
    garment_f = class_info[garment_class]['f']
 def _setup_renderer(self):
     mesh_file = './data/head_template_mesh.obj'
     self.render = Renderer(self.image_size,
                            obj_filename=mesh_file).to(self.device)
Ejemplo n.º 17
0
    # Load model
    if args.config is None:
        tmp = args.checkpoint.split('/')[:-2]
        tmp.append('config.json')
        args.config = '/' + join(*tmp)

    with open(args.config, 'r') as f:
        options = json.load(f)
        options = namedtuple('options', options.keys())(**options)

    model = DMR(options, args.checkpoint)
    model.eval()

    # Setup renderer for visualization
    _, faces = read_obj('data/reference_mesh.obj')
    renderer = Renderer(faces=np.array(faces) - 1)

    # Preprocess input image and generate predictions
    img, norm_img = process_image(args.img, args.bbox, args.openpose, input_res=cfg.INPUT_RES)
    with torch.no_grad():
        out_dict = model(norm_img.to(model.device))
        pred_vertices = out_dict['pred_vertices']
        pred_camera = out_dict['camera']

    # Calculate camera parameters for rendering
    camera_translation = torch.stack([pred_camera[:,1], pred_camera[:,2], 2*cfg.FOCAL_LENGTH/(cfg.INPUT_RES * pred_camera[:,0] +1e-9)],dim=-1)
    camera_translation = camera_translation[0].cpu().numpy()
    pred_vertices = pred_vertices[0].cpu().numpy()
    img = img.permute(1,2,0).cpu().numpy()

    # Render non-parametric shape
Ejemplo n.º 18
0
    def init_fn(self):
        # create training dataset
        self.train_ds = create_dataset(self.options.dataset,
                                       self.options,
                                       use_IUV=True)
        self.dp_res = int(self.options.img_res // (2**self.options.warp_level))

        self.CNet = DPNet(warp_lv=self.options.warp_level,
                          norm_type=self.options.norm_type).to(self.device)

        self.LNet = get_LNet(self.options).to(self.device)
        self.smpl = SMPL().to(self.device)
        self.female_smpl = SMPL(cfg.FEMALE_SMPL_FILE).to(self.device)
        self.male_smpl = SMPL(cfg.MALE_SMPL_FILE).to(self.device)

        uv_res = self.options.uv_res
        self.uv_type = self.options.uv_type
        self.sampler = Index_UV_Generator(UV_height=uv_res,
                                          UV_width=-1,
                                          uv_type=self.uv_type).to(self.device)

        weight_file = 'data/weight_p24_h{:04d}_w{:04d}_{}.npy'.format(
            uv_res, uv_res, self.uv_type)
        if not os.path.exists(weight_file):
            cal_uv_weight(self.sampler, weight_file)

        uv_weight = torch.from_numpy(np.load(weight_file)).to(
            self.device).float()
        uv_weight = uv_weight * self.sampler.mask.to(uv_weight.device).float()
        uv_weight = uv_weight / uv_weight.mean()
        self.uv_weight = uv_weight[None, :, :, None]
        self.tv_factor = (uv_res - 1) * (uv_res - 1)

        # Setup an optimizer
        if self.options.stage == 'dp':
            self.optimizer = torch.optim.Adam(
                params=list(self.CNet.parameters()),
                lr=self.options.lr,
                betas=(self.options.adam_beta1, 0.999),
                weight_decay=self.options.wd)
            self.models_dict = {'CNet': self.CNet}
            self.optimizers_dict = {'optimizer': self.optimizer}

        else:
            self.optimizer = torch.optim.Adam(
                params=list(self.LNet.parameters()) +
                list(self.CNet.parameters()),
                lr=self.options.lr,
                betas=(self.options.adam_beta1, 0.999),
                weight_decay=self.options.wd)
            self.models_dict = {'CNet': self.CNet, 'LNet': self.LNet}
            self.optimizers_dict = {'optimizer': self.optimizer}

        # Create loss functions
        self.criterion_shape = nn.L1Loss().to(self.device)
        self.criterion_uv = nn.L1Loss().to(self.device)
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        self.criterion_keypoints_3d = nn.L1Loss(reduction='none').to(
            self.device)
        self.criterion_regr = nn.MSELoss().to(self.device)

        # LSP indices from full list of keypoints
        self.to_lsp = list(range(14))
        self.renderer = Renderer(faces=self.smpl.faces.cpu().numpy())

        # Optionally start training from a pretrained checkpoint
        # Note that this is different from resuming training
        # For the latter use --resume
        if self.options.pretrained_checkpoint is not None:
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)
Ejemplo n.º 19
0
if __name__ == '__main__':
    args = parser.parse_args()
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # Load trained model
    model = hmr(config.SMPL_MEAN_PARAMS).to(device)
    checkpoint = torch.load(args.trained_model)
    model.load_state_dict(checkpoint['model'], strict=False)
    smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1,
                create_transl=False).to(device)
    model.eval()
    # Generate rendered image
    renderer = Renderer(focal_length=constants.FOCAL_LENGTH,
                        img_res=constants.IMG_RES,
                        faces=smpl.faces)
    # Processs the image and predict the parameters
    img, norm_img = process_image(args.test_image,
                                  args.bbox,
                                  input_res=constants.IMG_RES)
    with torch.no_grad():
        pred_rotmat, pred_betas, pred_camera = model(norm_img.to(device))
        pred_output = smpl(betas=pred_betas,
                           body_pose=pred_rotmat[:, 1:],
                           global_orient=pred_rotmat[:, 0].unsqueeze(1),
                           pose2rot=False)
        pred_vertices = pred_output.vertices
    camera_translation = torch.stack([
        pred_camera[:, 1], pred_camera[:, 2], 2 * constants.FOCAL_LENGTH /
        (constants.IMG_RES * pred_camera[:, 0] + 1e-9)
Ejemplo n.º 20
0
                      2))  # n_skirt, n_body
body_ind = np.argsort(dist, 1)[:, :K]
body_dist = np.sort(dist, 1)[:, :K]
# Inverse distance weighting
w = 1 / (body_dist**p)
w = w / np.sum(w, 1, keepdims=True)
n_skirt = len(skirt_v)
n_body = len(body_v)
skirt_weight = np.zeros([n_skirt, n_body], dtype=np.float32)
skirt_weight[np.tile(np.arange(n_skirt)[:, None], (1, K)), body_ind] = w
np.savez_compressed('C:/data/v3/skirt_weight.npz', w=skirt_weight)

exit()

# test
renderer = Renderer(512)
smpl = SMPLNP(gender='female', skirt=True)
smpl_torch = TorchSMPL4Garment('female')

import torch
disp = smpl_torch.forward_unpose_deformation(
    torch.from_numpy(np.zeros([1, 72])).float(),
    torch.from_numpy(np.zeros([1, 300])).float(),
    torch.from_numpy(skirt_v)[None].float())
disp = disp.detach().cpu().numpy()[0]

for t in np.linspace(0, 1, 20):
    theta = np.zeros([72])
    theta[5] = t
    theta[8] = -t
    body_v, gar_v = smpl(np.zeros([300]), theta, disp, 'skirt')
Ejemplo n.º 21
0
def run_evaluation(model, args, dataset, mesh):
    """Run evaluation on the datasets and metrics we report in the paper. """

    # Create SMPL model
    smpl = SMPL().cuda()

    # Regressor for H36m joints
    J_regressor = torch.from_numpy(np.load(cfg.JOINT_REGRESSOR_H36M)).float()

    # Create dataloader for the dataset
    data_loader = DataLoader(dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    # Transfer model to the GPU
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    model.to(device)
    model.eval()

    # predictions
    all_kps = {}

    # Iterate over the entire dataset
    for step, batch in enumerate(
            tqdm(data_loader, desc='Eval', total=args.num_imgs)):

        # Get ground truth annotations from the batch
        images = batch['img'].to(device)
        curr_batch_size = images.shape[0]

        # Run inference
        with torch.no_grad():
            pred_vertices, pred_vertices_smpl, camera, pred_rotmat, pred_betas = model(
                images)
            pred_keypoints_3d_smpl = smpl.get_joints(pred_vertices_smpl)
            pred_keypoints_2d_smpl = orthographic_projection(
                pred_keypoints_3d_smpl,
                camera.detach())[:, :, :2].cpu().data.numpy()

        eval_part = np.zeros((1, 19, 2))

        # we use custom keypoints for evaluation: MPII + COCO face joints
        # see paper / supplementary for details
        eval_part[0, :14, :] = pred_keypoints_2d_smpl[0][:14]
        eval_part[0, 14:, :] = pred_keypoints_2d_smpl[0][19:]

        all_kps[step] = eval_part
        if args.write_imgs == 'True':
            renderer = Renderer(faces=smpl.faces.cpu().numpy())
            write_imgs(batch, pred_keypoints_2d_smpl, pred_vertices_smpl,
                       camera, args, renderer)

    if args.eval_pck == 'True':
        gt_kp_path = os.path.join(cfg.BASE_DATA_DIR, args.dataset,
                                  args.crop_setting, 'keypoints.pkl')
        log_dir = os.path.join(cfg.BASE_DATA_DIR, 'cmr_pck_results.txt')
        with open(gt_kp_path, 'rb') as f:
            gt = pkl.load(f)

        calc = CalcPCK(
            all_kps,
            gt,
            num_imgs=cfg.DATASET_SIZES[args.dataset][args.crop_setting],
            log_dir=log_dir,
            dataset=args.dataset,
            crop_setting=args.crop_setting,
            pck_eval_threshold=args.pck_eval_threshold)
        calc.eval()