コード例 #1
0
    def __init__(self,
                 regressor_checkpoint,
                 smpl_dir,
                 device=torch.device('cuda'),
                 use_smplx=False):

        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')

        # Load parametric model (SMPLX or SMPL)
        if use_smplx:
            smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
            self.smpl = SMPLX(smpl_dir,
                              batch_size=1,
                              num_betas=10,
                              use_pca=False,
                              create_transl=False).to(self.device)
            self.use_smplx = True
        else:
            smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
            self.smpl = SMPL(smplModelPath, batch_size=1,
                             create_transl=False).to(self.device)
            self.use_smplx = False

        #Load pre-trained neural network
        SMPL_MEAN_PARAMS = './extra_data/body_module/data_from_spin/smpl_mean_params.npz'
        self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
        checkpoint = torch.load(regressor_checkpoint)
        self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
        self.model_regressor.eval()
コード例 #2
0
    def __init__(self,
                 regressor_checkpoint,
                 smpl_dir,
                 device=torch.device('cuda'),
                 bUseSMPLX=False):

        self.device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')

        #Load parametric model (SMPLX or SMPL)
        if bUseSMPLX:
            self.smpl = SMPLX(smpl_dir, batch_size=1,
                              create_transl=False).to(self.device)
        else:
            smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
            self.smpl = SMPL(smplModelPath, batch_size=1,
                             create_transl=False).to(self.device)

        #Load pre-trained neural network
        self.model_regressor = hmr(config.SMPL_MEAN_PARAMS).to(self.device)
        checkpoint = torch.load(regressor_checkpoint)
        self.model_regressor.load_state_dict(checkpoint['model'], strict=False)
        self.model_regressor.eval()

        self.normalize_img = Normalize(mean=constants.IMG_NORM_MEAN,
                                       std=constants.IMG_NORM_STD)
        self.de_normalize_img = Normalize(mean=[
            -constants.IMG_NORM_MEAN[0] / constants.IMG_NORM_STD[0],
            -constants.IMG_NORM_MEAN[1] / constants.IMG_NORM_STD[1],
            -constants.IMG_NORM_MEAN[2] / constants.IMG_NORM_STD[2]
        ],
                                          std=[
                                              1 / constants.IMG_NORM_STD[0],
                                              1 / constants.IMG_NORM_STD[1],
                                              1 / constants.IMG_NORM_STD[2]
                                          ])
コード例 #3
0
    def init_fn(self):
        self.train_ds = MixedDataset(self.options,
                                     ignore_3d=self.options.ignore_3d,
                                     is_train=True)

        self.model = hmr(config.SMPL_MEAN_PARAMS,
                         pretrained=True).to(self.device)

        if self.options.bExemplarMode:
            lr = 5e-5 * 0.2
        else:
            lr = self.options.lr
        self.optimizer = torch.optim.Adam(
            params=self.model.parameters(),
            #   lr=self.options.lr,
            lr=lr,
            weight_decay=0)

        if self.options.bUseSMPLX:  #SMPL-X model           #No change is required for HMR training. SMPL-X ignores hand and other parts.
            #SMPL uses 23 joints, while SMPL-X uses 21 joints, automatically ignoring the last two joints of SMPL
            self.smpl = SMPLX(config.SMPL_MODEL_DIR,
                              batch_size=self.options.batch_size,
                              create_transl=False).to(self.device)
        else:  #Original SMPL
            self.smpl = SMPL(config.SMPL_MODEL_DIR,
                             batch_size=self.options.batch_size,
                             create_transl=False).to(self.device)

        # Per-vertex loss on the shape
        self.criterion_shape = nn.L1Loss().to(self.device)
        # Keypoint (2D and 3D) loss
        # No reduction because confidence weighting needs to be applied
        self.criterion_keypoints = nn.MSELoss(reduction='none').to(self.device)
        # Loss for SMPL parameter regression
        self.criterion_regr = nn.MSELoss().to(self.device)
        self.models_dict = {'model': self.model}
        self.optimizers_dict = {'optimizer': self.optimizer}
        self.focal_length = constants.FOCAL_LENGTH

        # Initialize SMPLify fitting module
        self.smplify = SMPLify(step_size=1e-2,
                               batch_size=self.options.batch_size,
                               num_iters=self.options.num_smplify_iters,
                               focal_length=self.focal_length)
        if self.options.pretrained_checkpoint is not None:
            print(">>> Load Pretrained mode: {}".format(
                self.options.pretrained_checkpoint))
            self.load_pretrained(
                checkpoint_file=self.options.pretrained_checkpoint)
            self.backupModel()

        #This should be called here after loading model
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self.model = torch.nn.DataParallel(self.model)  #Failed...

        # Load dictionary of fits
        self.fits_dict = FitsDict(self.options, self.train_ds)

        # Create renderer
        self.renderer = None  # Renderer(focal_length=self.focal_length, img_res=self.options.img_res, faces=self.smpl.faces)

        #debug
        from torchvision.transforms import Normalize
        self.de_normalize_img = Normalize(mean=[
            -constants.IMG_NORM_MEAN[0] / constants.IMG_NORM_STD[0],
            -constants.IMG_NORM_MEAN[1] / constants.IMG_NORM_STD[1],
            -constants.IMG_NORM_MEAN[2] / constants.IMG_NORM_STD[2]
        ],
                                          std=[
                                              1 / constants.IMG_NORM_STD[0],
                                              1 / constants.IMG_NORM_STD[1],
                                              1 / constants.IMG_NORM_STD[2]
                                          ])
コード例 #4
0
ファイル: evalfrompkl_iter.py プロジェクト: zhly0/eft
            'spinmodel_shared/11-13-78679-bab_spin_mlc3d_fter60_ag-9589/checkpoints/2019_11_14-02_14_28-best-55.79321086406708.pt'
        ]  #Ours 3D + augmentation
        params = [
            '--checkpoint',
            'logs/11-13-78679-bab_spin_mlc3d_fter60-7183/checkpoints/2019_11_14-08_12_35-best-56.12510070204735.pt'
        ]  #Ours 3D  (no Aug!)
        params = ['--checkpoint', 'data/model_checkpoint.pt']  #Original

        params += ['--dataset', '3dpw']
        # params +=['--num_workers',0]

        args = parser.parse_args(params)
        args.batch_size = 64
        args.num_workers = 4

    model = hmr(config.SMPL_MEAN_PARAMS)
    checkpoint = torch.load(args.checkpoint)
    model.load_state_dict(checkpoint['model'], strict=False)
    model.cuda()
    model.eval()

    # # Setup evaluation dataset
    # # dataset = BaseDataset(None, '3dpw', is_train=False, bMiniTest=False)
    # dataset = BaseDataset(None, '3dpw', is_train=False, bMiniTest=False, bEnforceUpperOnly=False)
    # # dataset = BaseDataset(None, '3dpw-crop', is_train=False, bMiniTest=False, bEnforceUpperOnly=True)
    # # # Run evaluation
    # run_evaluation(model, '3dpw',dataset , args.result_file,
    #                batch_size=args.batch_size,
    #                shuffle=args.shuffle,
    #                log_freq=args.log_freq, num_workers=args.num_workers)
コード例 #5
0
    def __init__(self, body_regressor_checkpoint, hand_regressor_checkpoint, smpl_dir, device=torch.device('cuda'), use_smplx=True):
        super().__init__('BodyMocap')
        self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
        print("Loading Body Pose Estimator")
        self.__load_body_estimator()
        self.visualizer = Visualizer('opengl')
        self.frame_id = 0	#count frames

        parser = argparse.ArgumentParser()
        parser.add_argument("--rot90", default=False, type= bool, help="clockwise rotate 90 degrees")
        #parser.add_argument("--camera_topic", default="/logi_c922_2/image_rect_color", help="choose a topic as input image")
        parser.add_argument("--body_only", default=False, type= bool, help="detect only body and save its result")
        parser.add_argument("--result_path", default="/home/student/result/", help="choose a topic as input image")
        parser.add_argument("--save_result", default=False, help="save result or not")
        args = parser.parse_args()
        self.rot90 = args.rot90
        #self.camera_topic = args.camera_topic
        self.body_only = args.body_only
        self.result_path = args.result_path
        self.save_result = args.save_result
        self.load = [0,0]
        self.angle_leg = 0
        self.angle_trunk = 0
        self.start = 0
        self.angles =  np.empty((1,20),dtype = float)
        self.body_side =  np.empty((25,3),dtype = float)
        # Load parametric model (SMPLX or SMPL)
        if use_smplx:
            smplModelPath = smpl_dir + '/SMPLX_NEUTRAL.pkl'
            self.smpl = SMPLX(smpl_dir,
                    batch_size=1,
                    num_betas = 10,
                    use_pca = False,
                    create_transl=False).to(self.device)
            self.use_smplx = True
        else:
            smplModelPath = smpl_dir + '/basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
            self.smpl = SMPL(smplModelPath, batch_size=1, create_transl=False).to(self.device)
            self.use_smplx = False
            
        #Load pre-trained neural network 
        SMPL_MEAN_PARAMS = '/home/student/frankmocap/extra_data/body_module/data_from_spin/smpl_mean_params.npz'
        self.model_regressor = hmr(SMPL_MEAN_PARAMS).to(self.device)
        body_checkpoint = torch.load(body_regressor_checkpoint)
        self.model_regressor.load_state_dict(body_checkpoint['model'], strict=False)
        self.model_regressor.eval()

       #hand module init
        
        transform_list = [ transforms.ToTensor(),
                          transforms.Normalize((0.5, 0.5, 0.5),
                                               (0.5, 0.5, 0.5))]
        self.normalize_transform = transforms.Compose(transform_list)

        #Load Hand network 
        self.opt = TestOptions().parse([])

        #Default options
        self.opt.single_branch = True
        self.opt.main_encoder = "resnet50"
        # self.opt.data_root = "/home/hjoo/dropbox/hand_yu/data/"
        self.opt.model_root = "/home/student/frankmocap/extra_data"
        self.opt.smplx_model_file = os.path.join(smpl_dir,'SMPLX_NEUTRAL.pkl')
      
        self.opt.batchSize = 1
        self.opt.phase = "test"
        self.opt.nThreads = 0
        self.opt.which_epoch = -1
        self.opt.checkpoint_path = hand_regressor_checkpoint

        self.opt.serial_batches = True  # no shuffle
        self.opt.no_flip = True  # no flip
        self.opt.process_rank = -1

        # self.opt.which_epoch = str(epoch)
        self.hand_model_regressor = H3DWModel(self.opt)
        # if there is no specified checkpoint, then skip
        assert self.hand_model_regressor.success_load, "Specificed checkpoints does not exists: {}".format(self.opt.checkpoint_path)
        self.hand_model_regressor.eval()

        self.hand_bbox_detector = HandBboxDetector('third_view', self.device)

 		#subscriber and publisher initialization
		#input subscriber
        self.br = CvBridge()
        self.subscription_img = self.create_subscription(Image, '/side_img', self.callback_side,10)
        self.subscription_img = self.create_subscription(Image, '/front_img', self.callback_front,10)
		
		#output publisher
        self.publisher_pose = self.create_publisher(Image,'/pose',10)	#images with keypoints annotation
        #self.publisher_keypoints = self.create_publisher(Float32MultiArray,'/keypoints',10)	#keypoints coordinates
        self.publisher_risk = self.create_publisher(Int64,'/risk',10)	#risk level
        self.publisher_angles = self.create_publisher(Float32MultiArray,'/angles',10)
コード例 #6
0
ファイル: eval.py プロジェクト: zhly0/eft
def eval_main(params):
    args = parser.parse_args(params)

    model = hmr(config.SMPL_MEAN_PARAMS)

    if os.path.isdir(args.checkpoint):
        fileCands = os.listdir(args.checkpoint)
        fileCandsBest = [n for n in fileCands if "-best-" in n]

        if len(fileCandsBest) > 0:
            bestCand = sorted(fileCandsBest)[-1]
        else:
            bestCand = sorted(fileCands)[-1]
        args.checkpoint = os.path.join(args.checkpoint, bestCand)
    assert os.path.isfile(args.checkpoint)

    checkpoint = torch.load(args.checkpoint)

    model.load_state_dict(checkpoint['model'], strict=False)
    model.cuda()
    model.eval()

    # Load if eval is alread done
    evalFolder = os.path.dirname(args.checkpoint) + "/../evallog"
    evalLogFileName = os.path.join(
        evalFolder,
        os.path.basename(args.checkpoint)[:-3] + '.json')
    if os.path.exists(evalLogFileName):  #Bug recompute all
        with open(evalLogFileName, 'r') as f:
            evalLogAll = json.load(f)
    else:
        evalLogAll = {}

    evalLogAll['checkpoint'] = args.checkpoint
    # Setup evaluation dataset
    if args.dataset == 'all':  #Process all quantitative evaluations
        # datasetList =['h36m-p1', '3dpw', 'mpi-inf-3dhp']
        # datasetList =['h36m-p2', 'h36m-p1', '3dpw', 'mpi-inf-3dhp']
        datasetList = [
            '3dpw-crop', 'h36m-p2', 'h36m-p1', '3dpw', 'mpi-inf-3dhp'
        ]
    else:
        datasetList = [args.dataset]
    datasetList = [n for n in datasetList
                   if n not in evalLogAll]  #Ignore already processed one

    for dbname in datasetList:
        dataset = BaseDataset(None,
                              dbname,
                              is_train=False,
                              bMiniTest=False,
                              bEnforceUpperOnly=False)
        # Run evaluation
        evalLogAll[dbname] = run_evaluation(model,
                                            dbname,
                                            dataset,
                                            args.result_file,
                                            batch_size=args.batch_size,
                                            shuffle=args.shuffle,
                                            log_freq=args.log_freq,
                                            num_workers=args.num_workers)

    #Export log to json
    evalFolder = os.path.dirname(args.checkpoint) + "/../evallog"
    if not os.path.exists(evalFolder):
        os.mkdir(evalFolder)

    # evalLogFileName = os.path.join(evalFolder, os.path.basename(args.checkpoint)[:-3] + '.json' )
    with open(evalLogFileName, 'w') as f:
        json.dump(evalLogAll, f, indent=4)

    #Copy to the evallogs dir (Han, devfair only)
    if os.path.exists("/private/home/hjoo/codes/bodymocap/benchmarks_eval"):
        targetFile = args.checkpoint[:-3].replace('/', '__')
        targetFile = "h36p2_" + targetFile
        strcmd = f"cp -v {evalLogFileName} /private/home/hjoo/codes/bodymocap/benchmarks_eval/{targetFile}"
        os.system(strcmd)