コード例 #1
0
from PIL import Image, ImageTk
import sys, os
import math
import pickle
import numpy as np
from smpl.hello_smpl import get_mesh
from smpl.smpl_webuser.serialization import load_model
from hmr.democheck2 import rerenders

os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUDA_LAUNCH_BLOCKING"] = "2"
os.environ["CUDA_LAUNCH_BLOCKING"] = "4"

root = Tk()

m = load_model('smpl/models/basicmodel_m_lbs_10_207_0_v1.0.0.pkl')
root.title('Mesh GUI')
root.geometry('1024x1024')
bottomframe = Frame(root)
leftframe = Frame(root)
leftframe.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(side=RIGHT)
bottomframe.pack(side=BOTTOM)

Posex = dict()  #dict for shape and pose parameters
Posey = dict()
Posez = dict()
Beta = dict()

im_path = ""
コード例 #2
0
    def __init__(self, filepath_prefix, participant_directory):

        ##load participant info
        #if False:
        participant_info = load_pickle("/home/henry/Desktop/CVPR2020_study/P136/participant_info.p")
        print "participant directory: ", participant_directory
        for entry in participant_info:
            print entry, participant_info[entry]

        self.gender = participant_info['gender']
        self.height_in = participant_info['height_in']
        self.weight_lbs = participant_info['weight_lbs']
        self.calibration_optim_values = participant_info['cal_func']
        self.tf_corners = participant_info['corners']
        #except:



        ## Load SMPL model
        self.filepath_prefix = filepath_prefix

        self.index_queue = []
        if self.gender == "m":
            model_path = filepath_prefix+'/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = filepath_prefix+'/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.reset_pose = False
        self.m = load_model(model_path)

        self.marker0, self.marker1, self.marker2, self.marker3 = None, None, None, None
        self.pressure = None
        self.markers = [self.marker0, self.marker1, self.marker2, self.marker3]


        self.point_cloud_array = np.array([[0., 0., 0.]])
        self.pc_isnew = False


        self.CTRL_PNL = {}
        self.CTRL_PNL['batch_size'] = 1
        self.CTRL_PNL['loss_vector_type'] = 'anglesDC'
        self.CTRL_PNL['verbose'] = False
        self.CTRL_PNL['num_epochs'] = 101
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['GPU'] = GPU
        self.CTRL_PNL['dtype'] = dtype
        self.CTRL_PNL['repeat_real_data_ct'] = 1
        self.CTRL_PNL['regr_angles'] = 1
        self.CTRL_PNL['dropout'] = DROPOUT
        self.CTRL_PNL['depth_map_labels'] = False
        self.CTRL_PNL['depth_map_output'] = True
        self.CTRL_PNL['depth_map_input_est'] = False#rue #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL['depth_map_input_est'] #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True#False
        self.CTRL_PNL['normalize_input'] = True#False
        self.CTRL_PNL['all_tanh_activ'] = True#False
        self.CTRL_PNL['L2_contact'] = True#False
        self.CTRL_PNL['pmat_mult'] = int(5)
        self.CTRL_PNL['cal_noise'] = False
        self.CTRL_PNL['output_only_prev_est'] = False
        self.CTRL_PNL['double_network_size'] = False



        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['incl_pmat_cntct_input'] = False #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL['depth_map_input_est'] == True: #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1

        pmat_std_from_mult = ['N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369, 0.0, 25.50538629767412]
        if self.CTRL_PNL['cal_noise'] == False:
            sobel_std_from_mult = ['N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501, 0.0, 34.86393494050921]
        else:
            sobel_std_from_mult = ['N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700, 0.0, 97.90075708182506]

        self.CTRL_PNL['norm_std_coeffs'] =  [1./41.80684362163343,  #contact
                                             1./16.69545796387731,  #pos est depth
                                             1./45.08513083167194,  #neg est depth
                                             1./43.55800622930469,  #cm est
                                             1./pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])], #pmat x5
                                             1./sobel_std_from_mult[int(self.CTRL_PNL['pmat_mult'])], #pmat sobel
                                             1./1.0,                #bed height mat
                                             1./1.0,                #OUTPUT DO NOTHING
                                             1./1.0,                #OUTPUT DO NOTHING
                                             1. / 30.216647403350,  #weight
                                             1. / 14.629298141231]  #height


        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'

        if self.CTRL_PNL['depth_map_output'] == True:  # we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"

        self.TPL = TensorPrepLib()


        self.count = 0


        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'
        self.CTRL_PNL['aws'] = False
        self.CTRL_PNL['lock_root'] = False



        self.bridge = CvBridge()
        self.color, self.depth_r, self.pressure = 0, 0, 0

        self.kinect_im_size = (960, 540)
        self.pressure_im_size = (64, 27)
        self.pressure_im_size_required = (64, 27)

        # initialization of kinect and thermal cam calibrations from YAML files
        dist_model = 'rational_polynomial'
        self.kcam = Camera('kinect', self.kinect_im_size, dist_model)
        self.kcam.init_from_yaml(osp.expanduser('~/catkin_ws/src/multimodal_pose/calibrations/kinect.yaml'))

        # we are at qhd not hd so need to cut the focal lengths and centers in half
        self.kcam.K[0:2, 0:3] = self.kcam.K[0:2, 0:3] / 2

        print self.kcam.K

        self.new_K_kin, roi = cv2.getOptimalNewCameraMatrix(self.kcam.K, self.kcam.D, self.kinect_im_size, 1,
                                                            self.kinect_im_size)

        print self.new_K_kin

        self.drawing = False  # true if mouse is pressed
        self.mode = True  # if True, draw rectangle. Press 'm' to toggle to curve
        self.ix, self.iy = -1, -1
        self.label_index = 0
        self.coords_from_top_left = [0, 0]
        self.overall_image_scale_amount = 0.85
        self.depthcam_midpixel = [0, 0]
        self.select_new_calib_corners = {}
        self.select_new_calib_corners["lay"] = True
        self.select_new_calib_corners["sit"] = True
        self.calib_corners = {}
        self.calib_corners["lay"] = 8 * [[0, 0]]
        self.calib_corners["sit"] = 8 * [[0, 0]]

        self.final_dataset = {}

        self.filler_taxels = []
        for i in range(28):
            for j in range(65):
                self.filler_taxels.append([i - 1, j - 1, 20000])
        self.filler_taxels = np.array(self.filler_taxels).astype(int)
コード例 #3
0
import sys, os
import math
import pickle
import numpy as np
from smpl.hello_smpl import get_mesh
from smpl.smpl_webuser.serialization import load_model
from hmr.democheck2 import rerenders

#Blocking GPU
os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUDA_LAUNCH_BLOCKING"] = "2"

root = Tk()
#TODO - ADD radio button to switch between Male and Female models
m = load_model('smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
               )  #IMPORTANT - Change model according to the person in frame
root.title('Mesh GUI')
root.geometry('1024x1024')
bottomframe = Frame(root)
leftframe = Frame(root)
leftframe.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(side=RIGHT)
bottomframe.pack(side=BOTTOM)

Posex = dict()  #dict for shape and pose parameters
Posey = dict()
Posez = dict()
Beta = dict()

im_path = ""
コード例 #4
0
    def __init__(self, filepath_prefix='/home/henry'):
        ## Load SMPL model
        self.filepath_prefix = filepath_prefix

        self.index_queue = []
        if GENDER == "m":
            model_path = filepath_prefix + '/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = filepath_prefix + '/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.reset_pose = False
        self.m = load_model(model_path)

        self.marker0, self.marker1, self.marker2, self.marker3 = None, None, None, None
        self.pressure = None
        self.markers = [self.marker0, self.marker1, self.marker2, self.marker3]
        rospy.Subscriber("/multi_pose/ar_pose_marker", AlvarMarkers,
                         self.callback_bed_tags)
        rospy.Subscriber("/multi_pose/kinect2/qhd/points", PointCloud2,
                         self.callback_points)
        rospy.Subscriber("/multi_pose/fsascan", FloatArrayBare,
                         self.callback_pressure)

        rospy.Subscriber("/abdout0", FloatArrayBare, self.bed_config_callback)
        #rospy.Subscriber("/abdout0", FloatArrayBare, self.callback_bed_state)
        print "init subscriber"

        rospy.init_node('vol_3d_listener', anonymous=False)
        self.point_cloud_array = np.array([[0., 0., 0.]])
        self.pc_isnew = False

        self.CTRL_PNL = {}
        self.CTRL_PNL['batch_size'] = 1
        self.CTRL_PNL['loss_vector_type'] = 'anglesDC'  #'anglesEU'
        self.CTRL_PNL['loss_vector_type'] = 'anglesDC'  #'anglesEU'
        self.CTRL_PNL['verbose'] = False
        self.CTRL_PNL['num_epochs'] = 101
        self.CTRL_PNL['incl_inter'] = True
        self.CTRL_PNL['shuffle'] = False
        self.CTRL_PNL['incl_ht_wt_channels'] = True
        self.CTRL_PNL['incl_pmat_cntct_input'] = True
        self.CTRL_PNL['num_input_channels'] = 3
        self.CTRL_PNL['lock_root'] = False
        self.CTRL_PNL['GPU'] = True
        self.CTRL_PNL['dtype'] = torch.cuda.FloatTensor
        self.CTRL_PNL['repeat_real_data_ct'] = 1
        self.CTRL_PNL['regr_angles'] = 1
        self.CTRL_PNL['dropout'] = False
        self.CTRL_PNL['depth_map_labels'] = False
        self.CTRL_PNL['depth_map_output'] = True
        self.CTRL_PNL[
            'depth_map_input_est'] = False  #rue #do this if we're working in a two-part regression
        self.CTRL_PNL['adjust_ang_from_est'] = self.CTRL_PNL[
            'depth_map_input_est']  #holds betas and root same as prior estimate
        self.CTRL_PNL['clip_sobel'] = True
        self.CTRL_PNL['clip_betas'] = True
        self.CTRL_PNL['mesh_bottom_dist'] = True
        self.CTRL_PNL['full_body_rot'] = True  #False
        self.CTRL_PNL['normalize_input'] = True  #False
        self.CTRL_PNL['all_tanh_activ'] = True  #False
        self.CTRL_PNL['L2_contact'] = True  #False
        self.CTRL_PNL['pmat_mult'] = int(5)
        self.CTRL_PNL['cal_noise'] = False

        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL[
                'incl_pmat_cntct_input'] = False  #if there's calibration noise we need to recompute this every batch
            self.CTRL_PNL['clip_sobel'] = False

        if self.CTRL_PNL['incl_pmat_cntct_input'] == True:
            self.CTRL_PNL['num_input_channels'] += 1
        if self.CTRL_PNL[
                'depth_map_input_est'] == True:  #for a two part regression
            self.CTRL_PNL['num_input_channels'] += 3
        self.CTRL_PNL['num_input_channels_batch0'] = np.copy(
            self.CTRL_PNL['num_input_channels'])
        if self.CTRL_PNL['incl_ht_wt_channels'] == True:
            self.CTRL_PNL['num_input_channels'] += 2
        if self.CTRL_PNL['cal_noise'] == True:
            self.CTRL_PNL['num_input_channels'] += 1

        pmat_std_from_mult = [
            'N/A', 11.70153502792190, 19.90905848383454, 23.07018866032369,
            0.0, 25.50538629767412
        ]
        if self.CTRL_PNL['cal_noise'] == False:
            sobel_std_from_mult = [
                'N/A', 29.80360490415032, 33.33532963163579, 34.14427844692501,
                0.0, 34.86393494050921
            ]
        else:
            sobel_std_from_mult = [
                'N/A', 45.61635847182483, 77.74920396659292, 88.89398421073700,
                0.0, 97.90075708182506
            ]

        self.CTRL_PNL['norm_std_coeffs'] = [
            1. / 41.80684362163343,  #contact
            1. / 16.69545796387731,  #pos est depth
            1. / 45.08513083167194,  #neg est depth
            1. / 43.55800622930469,  #cm est
            1. / pmat_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat x5
            1. /
            sobel_std_from_mult[int(self.CTRL_PNL['pmat_mult'])],  #pmat sobel
            1. / 1.0,  #bed height mat
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 1.0,  #OUTPUT DO NOTHING
            1. / 30.216647403350,  #weight
            1. / 14.629298141231
        ]  #height

        self.CTRL_PNL['filepath_prefix'] = '/home/henry/'

        if self.CTRL_PNL[
                'depth_map_output'] == True:  # we need all the vertices if we're going to regress the depth maps
            self.verts_list = "all"

        self.TPL = TensorPrepLib()
コード例 #5
0
    def __init__(self, sampling="NORMAL", sigma=0, one_side_range=0):
        ## Load SMPL model (here we load the female model)
        model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        self.m = load_model(model_path)

        ## Assign random pose and shape parameters
        self.m.pose[:] = np.random.rand(self.m.pose.size) * 0.
        self.m.betas[:] = np.random.rand(self.m.betas.size) * .0
        #self.m.betas[5] = 20.

        self.m.pose[
            0] = 0  #pitch rotation of the person in space. 0 means the person is upside down facing back. pi is standing up facing forward
        self.m.pose[
            1] = 0  #roll of the person in space. -pi/2 means they are tilted to their right side
        self.m.pose[
            2] = 0  #-np.pi/4 #yaw of the person in space, like turning around normal to the ground

        self.m.pose[
            3] = 0  #-np.pi/4 #left hip extension (i.e. leg bends back for np.pi/2)
        self.m.pose[
            4] = 0  #np.pi/8 #left leg yaw about hip, where np.pi/2 makes bowed leg
        self.m.pose[
            5] = 0  #-np.pi/8 #left leg abduction (POS) /adduction (NEG)

        self.m.pose[
            6] = 0  #-np.pi/4 #right hip extension (i.e. leg bends back for np.pi/2)
        self.m.pose[7] = 0  #-np.pi/2
        self.m.pose[
            8] = 0  #-np.pi/4 #right leg abduction (NEG) /adduction (POS)

        self.m.pose[
            9] = 0  #bending of spine at hips. np.pi/2 means person bends down to touch the ground
        self.m.pose[
            10] = 0  #twisting of spine at hips. body above spine yaws normal to the ground
        self.m.pose[
            11] = 0  #bending of spine at hips. np.pi/2 means person bends down sideways to touch the ground 3

        self.m.pose[
            12] = 0  #np.pi/3 #left knee extension. (i.e. knee bends back for np.pi/2)
        self.m.pose[13] = 0  #twisting of knee normal to ground. KEEP AT ZERO
        self.m.pose[14] = 0  #bending of knee sideways. KEEP AT ZERO

        self.m.pose[
            15] = 0  #np.pi/4 #right knee extension (i.e. knee bends back for np.pi/2)

        self.m.pose[
            18] = 0  #bending at mid spine. makes person into a hunchback for positive values
        self.m.pose[
            19] = 0  #twisting of midspine. body above midspine yaws normal to the ground
        self.m.pose[
            20] = 0  #bending of midspine, np.pi/2 means person bends down sideways to touch ground 6

        self.m.pose[21] = 0  #left ankle flexion/extension
        self.m.pose[22] = 0  #left ankle yaw about leg
        self.m.pose[23] = 0  #left ankle twist KEEP CLOSE TO ZERO

        self.m.pose[24] = 0  #right ankle flexion/extension
        self.m.pose[25] = 0  #right ankle yaw about leg
        self.m.pose[26] = 0  #np.pi/4 #right ankle twist KEEP CLOSE TO ZERO

        self.m.pose[
            27] = 0  #bending at upperspine. makes person into a hunchback for positive values
        self.m.pose[
            28] = 0  #twisting of upperspine. body above upperspine yaws normal to the ground
        self.m.pose[
            29] = 0  #bending of upperspine, np.pi/2 means person bends down sideways to touch ground 9

        self.m.pose[30] = 0  #flexion/extension of left ankle midpoint

        self.m.pose[33] = 0  #flexion/extension of right ankle midpoint

        self.m.pose[
            36] = 0  #np.pi/2 #flexion/extension of neck. i.e. whiplash 12
        self.m.pose[37] = 0  #-np.pi/2 #yaw of neck
        self.m.pose[38] = 0  #np.pi/4  #tilt head side to side

        self.m.pose[39] = 0  #left inner shoulder roll
        self.m.pose[40] = 0  #left inner shoulder yaw, negative moves forward
        self.m.pose[41] = 0  #left inner shoulder pitch, positive moves up

        self.m.pose[42] = 0  #np.pi/4
        self.m.pose[43] = 0  #right inner shoulder yaw, positive moves forward
        self.m.pose[44] = 0  #right inner shoulder pitch, positive moves down

        self.m.pose[45] = 0  #flexion/extension of head 15

        self.m.pose[48] = 0  #left outer shoulder roll
        self.m.pose[49] = 0  #-np.pi/4
        self.m.pose[50] = 0  #np.pi/4 #left outer shoulder pitch

        self.m.pose[51] = 0  #-np.pi/3 #right outer shoulder roll
        self.m.pose[52] = 0  #np.pi/4
        self.m.pose[53] = 0  #-np.pi/4

        self.m.pose[54] = 0  #left elbow roll KEEP AT ZERO
        self.m.pose[
            55] = 0  #np.pi/3 #left elbow flexion/extension. KEEP NEGATIVE
        self.m.pose[56] = 0  #left elbow KEEP AT ZERO

        self.m.pose[57] = 0
        self.m.pose[
            58] = 0  #np.pi/4 #right elbow flexsion/extension KEEP POSITIVE

        self.m.pose[60] = 0  #left wrist roll

        self.m.pose[63] = 0  #right wrist roll
        #self.m.pose[65] = np.pi/5

        self.m.pose[66] = 0  #left hand roll

        self.m.pose[69] = 0  #right hand roll
        #self.m.pose[71] = np.pi/5 #right fist

        mu = 0

        for i in range(10):
            if sampling == "NORMAL":
                self.m.betas[i] = random.normalvariate(mu, sigma)
            elif sampling == "UNIFORM":
                self.m.betas[i] = np.random.uniform(-one_side_range,
                                                    one_side_range)
コード例 #6
0
from smal.my_mesh.mesh import myMesh as mesh_loader
import pickle as pkl
import numpy as np
from tqdm import tqdm
import sys

"""
# Load the family clusters data (see paper for details)
# and save the mean per-family shape
# 0-felidae(cats); 1-canidae(dogs); 2-equidae(horses);
# 3-bovidae(cows); 4-hippopotamidae(hippos);
# The clusters are over the shape coefficients (betas);
# setting different betas changes the shape of the model
"""

SMAL_MODEL = load_model(os.path.join('smal', 'smal_CVPR2017.pkl'))
SMAL_DATA = pkl.load(open(os.path.join('smal', 'smal_CVPR2017_data.pkl'), "rb"))
OUTPUT_DIR = os.path.join('.', 'outputs')
NULL_POSE_DIR = os.path.join(OUTPUT_DIR, 'null_pose')
os.makedirs(NULL_POSE_DIR)

NUM_TRAIN_PER_SUBJECT = 5000
NUM_VALD_PER_SUBJECT = 1000
NUM_TEST_PER_SUBJECT = 1000

for i, (betas, sub_name) in enumerate(zip(SMAL_DATA['cluster_means'], ['cats', 'dogs', 'horses', 'cows', 'hippos'])):

    SMAL_MODEL.betas[:] = betas
    SMAL_MODEL.pose[:] = 0.
    SMAL_MODEL.trans[:] = 0.
コード例 #7
0
    def __init__(self,
                 render,
                 STARTING_HEIGHT,
                 input_flex_radius,
                 input_flex_length,
                 input_flex_mass,
                 shiftSIDE=0.0,
                 shiftUD=0.0):

        model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        m = load_model(model_path)

        regs = np.load(
            '/home/henry/git/smplify_public/code/models/regressors_locked_normalized_male.npz'
        )
        length_regs = regs['betas2lens']
        rad_regs = regs['betas2rads']
        betas = m.betas

        capsules_median = get_capsules(m, betas * 0, length_regs, rad_regs)
        capsules = get_capsules(m, betas, length_regs, rad_regs)

        joint_names = joint2name
        initial_rots = rots0
        self.num_steps = 10000
        self.render_dart = render
        self.ct = 0
        self.num_dart_steps = 4

        self.has_reset_velocity1 = False
        self.has_reset_velocity2 = False

        joint_ref = list(m.kintree_table[1])  #joints
        parent_ref = list(m.kintree_table[0])  #parent of each joint
        parent_ref[0] = -1

        self.capsules = capsules

        pydart.init(verbose=True)
        print('pydart initialization OK')

        self.world = pydart.World(
            0.0103 / 4, "EMPTY"
        )  #0.002 is what works well. 0.0103 is when the velocity aligns. thus flex is 0.0103/0.0020 = 5.15x more fast than dart
        self.world.set_gravity([0, 0, GRAVITY])  #([0, 0,  -9.81])
        self.world.set_collision_detector(detector_type=2)
        self.world.add_empty_skeleton(_skel_name="human")

        self.force_dir_list_prev = [[], [], [], [], [], [], [], [], [], [], [],
                                    [], [], [], [], [], [], [], [], []]
        self.pmat_idx_list_prev = [[], [], [], [], [], [], [], [], [], [], [],
                                   [], [], [], [], [], [], [], [], []]
        self.force_loc_list_prev = [[], [], [], [], [], [], [], [], [], [], [],
                                    [], [], [], [], [], [], [], [], []]

        joint_root_loc = np.asarray(np.transpose(capsules[0].t)[0])

        self.step_num = 0

        joint_locs = []
        capsule_locs = []
        joint_locs_abs = []
        joint_locs_trans_abs = []
        capsule_locs_abs = []

        mJ = np.asarray(m.J)
        mJ_transformed = np.asarray(m.J_transformed)

        shift = [shiftSIDE, shiftUD, 0.0]

        red_joint_ref = joint_ref[0:20]  #joints
        red_parent_ref = parent_ref[0:20]  #parent of each joint
        red_parent_ref[10] = 9  #fix neck
        red_parent_ref[11] = 9  #fix l inner shoulder
        red_parent_ref[13] = 10  #fix head
        red_parent_ref[14] = 11  #fix l outer shoulder
        red_parent_ref[15] = 12  #fix r outer shoulder
        red_parent_ref[16] = 14  #fix l elbow
        red_parent_ref[17] = 15  #fix r elbow

        head_ref = [10, 13]
        leg_cap_ref = [1, 2, 4, 5]
        foot_ref = [7, 8]
        l_arm_ref = [11, 14, 16, 18]
        r_arm_ref = [12, 15, 17, 19]

        self.red_joint_ref = [joint_ref[0]]
        self.red_parent_ref = red_parent_ref

        #make lists of the locations of the joint locations and the smplify capsule initial ends
        for i in range(np.shape(mJ)[0]):
            if i == 0:
                joint_locs.append(list(mJ[0, :] - mJ[0, :] + shift))
                joint_locs_abs.append(list(mJ[0, :] - mJ[0, :]))
                joint_locs_trans_abs.append(
                    list(mJ_transformed[0, :] - mJ_transformed[0, :]))
                if i < 20:
                    capsule_locs.append(
                        list(
                            np.asarray(np.transpose(capsules[i].t)[0]) -
                            joint_root_loc))
                    capsule_locs_abs.append(
                        list(
                            np.asarray(np.transpose(capsules[i].t)[0]) -
                            joint_root_loc))
                    print(capsule_locs_abs, "caps locs abs")
            else:
                joint_locs.append(list(mJ[i, :] - mJ[parent_ref[i], :]))
                joint_locs_abs.append(list(mJ[i, :] - mJ[0, :]))
                joint_locs_trans_abs.append(
                    list(mJ_transformed[i, :] - mJ_transformed[0, :]))
                if i < 20:
                    capsule_locs.append(
                        list(
                            np.asarray(np.transpose(capsules[i].t)[0]) -
                            np.asarray(
                                np.transpose(capsules[red_parent_ref[i]].t)[0])
                        ))
                    capsule_locs_abs.append(
                        list(
                            np.asarray(np.transpose(capsules[i].t)[0]) -
                            joint_root_loc))
                    capsule_locs_abs[i][0] += np.abs(
                        float(capsules[0].length[0])) / 2
                    if i in [
                            1, 2
                    ]:  #shift over the legs relative to where the pelvis mid capsule is
                        capsule_locs[i][0] += np.abs(
                            float(capsules[0].length[0])) / 2
                    if i in [
                            3, 6, 9
                    ]:  #shift over the torso segments relative to their length and their parents length to match the mid capsule
                        capsule_locs[i][0] -= (
                            np.abs(float(capsules[i].length[0])) -
                            np.abs(float(
                                capsules[red_parent_ref[i]].length[0]))) / 2
                    if i in [
                            10, 11, 12
                    ]:  #shift over the inner shoulders and neck to match the middle of the top spine capsule
                        capsule_locs[i][0] += np.abs(
                            float(capsules[red_parent_ref[i]].length[0])) / 2
                    if i in [
                            3, 6, 9
                    ]:  #shift over everything in the abs list to match the root
                        capsule_locs_abs[i][0] -= np.abs(
                            float(capsules[i].length[0])) / 2

        del (joint_locs[10])
        del (joint_locs[10])
        del (joint_locs_abs[10])
        del (joint_locs_abs[10])

        self.joint_locs = joint_locs

        count = 0
        root_joint_type = "FREE"

        self.cap_offsets = []
        self.cap_init_rots = []
        lowest_points = []

        for capsule in capsules:
            print "************* Capsule No.", count, joint_names[
                count], " joint ref: ", red_joint_ref[
                    count], " parent_ref: ", red_parent_ref[
                        count], " ****************"

            cap_rad = input_flex_radius
            cap_len = input_flex_length
            cap_init_rot = list(np.asarray(initial_rots[count]))

            joint_loc = joint_locs[count]
            joint_loc_abs = joint_locs_abs[count]
            capsule_loc = capsule_locs[count]
            capsule_loc_abs = capsule_locs_abs[count]

            cap_offset = [0., 0., 0.]
            if count in leg_cap_ref:
                cap_offset[1] = -cap_len / 2
            if count in foot_ref: cap_offset[2] = cap_len / 2
            if count in l_arm_ref: cap_offset[0] = cap_len / 2
            if count in r_arm_ref: cap_offset[0] = -cap_len / 2
            #if count in head_ref: cap_offset[1] = cap_len/2

            cap_offset[0] += capsule_loc_abs[0] - joint_loc_abs[0]
            cap_offset[1] += capsule_loc_abs[1] - joint_loc_abs[1] - .04
            cap_offset[2] += capsule_loc_abs[2] - joint_loc_abs[2]
            self.cap_offsets.append(np.asarray(cap_offset))
            self.cap_init_rots.append(np.asarray(cap_init_rot))

            if count == 0:
                self.world.add_capsule(parent=int(red_parent_ref[count]),
                                       radius=cap_rad,
                                       length=cap_len,
                                       cap_rot=cap_init_rot,
                                       cap_offset=cap_offset,
                                       joint_loc=joint_loc,
                                       joint_type=root_joint_type,
                                       joint_name=joint_names[count])
            elif count == 4 or count == 5:
                self.world.add_capsule(parent=int(red_parent_ref[count]),
                                       radius=cap_rad,
                                       length=cap_len,
                                       cap_rot=cap_init_rot,
                                       cap_offset=cap_offset,
                                       joint_loc=joint_loc,
                                       joint_type="REVOLUTE_X",
                                       joint_name=joint_names[count])
            elif count == 16 or count == 17:
                self.world.add_capsule(parent=int(red_parent_ref[count]),
                                       radius=cap_rad,
                                       length=cap_len,
                                       cap_rot=cap_init_rot,
                                       cap_offset=cap_offset,
                                       joint_loc=joint_loc,
                                       joint_type="REVOLUTE_Y",
                                       joint_name=joint_names[count])
            else:
                self.world.add_capsule(parent=int(red_parent_ref[count]),
                                       radius=cap_rad,
                                       length=cap_len,
                                       cap_rot=cap_init_rot,
                                       cap_offset=cap_offset,
                                       joint_loc=joint_loc,
                                       joint_type="BALL",
                                       joint_name=joint_names[count])

            lowest_points.append(
                np.asarray(joint_locs_trans_abs)[count, 2] -
                np.abs(float(input_flex_radius)))

            count += 1
            break

        #print "pelvis cap",
        #print np.asarray(joint_locs_trans_abs)[:, 2]
        self.STARTING_HEIGHT = STARTING_HEIGHT - 0.0508

        #add a floor-STARTING_HEIGHT / DART_TO_FLEX_CONV
        self.world.add_weld_box(
            width=10.0,
            length=10.0,
            height=0.2,
            joint_loc=[
                0.0, 0.0, -self.STARTING_HEIGHT / DART_TO_FLEX_CONV / 2 - 0.05
            ],
            box_rot=[0.0, 0.0, 0.0],
            joint_name="floor")  #-0.05

        skel = self.world.add_built_skeleton(_skel_id=0, _skel_name="human")

        skel.set_self_collision_check(True)

        #weight the capsules appropriately
        volume = []
        volume_median = []
        for body_ct in range(NUM_CAPSULES):
            #give the capsules a weight propertional to their volume
            cap_rad = input_flex_radius
            cap_len = input_flex_length

            cap_rad_median = np.abs(float(capsules_median[body_ct].rad[0]))
            cap_len_median = np.abs(float(capsules_median[body_ct].length[0]))

            volume.append(np.pi * np.square(cap_rad) *
                          (cap_rad * 4 / 3 + cap_len))
            volume_median.append(np.pi * np.square(cap_rad_median) *
                                 (cap_rad_median * 4 / 3 + cap_len_median))

        skel.bodynodes[0].set_mass(input_flex_mass)

        body_mass = 0.0
        #set the mass moment of inertia matrices
        for body_ct in range(NUM_CAPSULES):
            radius = input_flex_radius
            length = input_flex_length
            radius2 = radius * radius
            length2 = length * length
            mass = skel.bodynodes[body_ct].m

            cap_init_rot = list(np.asarray(initial_rots[body_ct]))

            volumeCylinder = np.pi * radius2 * length
            volumeSphere = np.pi * radius * radius * radius * 4 / 3

            density = mass / (volumeCylinder + volumeSphere)
            massCylinder = density * volumeCylinder
            massSphere = density * volumeSphere
            Ixx = massCylinder * (length2 / 12.0 +
                                  radius2 / 4.0) + massSphere * (
                                      length2 + (3.0 / 8.0) * length * radius +
                                      (2.0 / 5.0) * radius2)
            Izz = massCylinder * (radius2 / 2.0) + massSphere * (
                (2.0 / 5.0) * radius2)

            RotMatInit = LibDartSkel().eulerAnglesToRotationMatrix(
                [np.pi / 2, 0.0, 0.0])
            RotMat = LibDartSkel().eulerAnglesToRotationMatrix(cap_init_rot)
            I = np.matmul(np.matmul(RotMatInit, RotMat),
                          np.asarray([Ixx, Izz, Ixx]))
            Ixx = np.abs(I[0])
            Iyy = np.abs(I[1])
            Izz = np.abs(I[2])
            #print body_ct, I

            skel.bodynodes[body_ct].set_inertia_entries(Ixx, Iyy, Izz)

            body_mass += skel.bodynodes[body_ct].m
            break

        print "Body mass is: ", body_mass, "kg"
        self.body_node = 9  #need to solve for the body node that corresponds to a force using flex.
        self.force = np.asarray([0.0, 100.0, 100.0])
        self.offset_from_centroid = np.asarray([-0.15, 0.0, 0.0])

        self.pmat_red_all = np.load(
            "/home/henry/git/volumetric_pose_gen/data/pmat_red.npy")
        self.force_dir_red_dart_all = np.load(
            "/home/henry/git/volumetric_pose_gen/data/force_dir_red.npy")
        for element in range(len(self.force_dir_red_dart_all)):
            self.force_dir_red_dart_all[element] = (np.multiply(
                np.asarray(self.force_dir_red_dart_all[element]),
                np.expand_dims(np.asarray(self.pmat_red_all[element]),
                               axis=1)))
        self.force_loc_red_dart_all = np.load(
            "/home/henry/git/volumetric_pose_gen/data/force_loc_red.npy"
        ).tolist()
        self.nearest_capsule_list_all = np.load(
            "/home/henry/git/volumetric_pose_gen/data/nearest_capsule.npy"
        ).tolist()

        print('init pose = %s' % skel.q)
        skel.controller = DampingController(skel)

        #now setup the open GL window
        self.title = "GLUT Window"
        self.window_size = (1280, 720)
        self.scene = OpenGLScene(*self.window_size)

        self.mouseLastPos = None
        self.is_simulating = False
        self.is_animating = False
        self.frame_index = 0
        self.capture_index = 0

        self.force_application_count = 0
        self.count = 0

        self.zi = []
        self.b = []
        self.a = []
        for i in range(60):
            b, a = signal.butter(1, 0.05, analog=False)
            self.b.append(b)
            self.a.append(a)
            self.zi.append(signal.lfilter_zi(self.b[-1], self.a[-1]))
        self.ziF = []
        self.bF = []
        self.aF = []
        for i in range(3):
            b, a = signal.butter(1, 0.05, analog=False)
            self.bF.append(b)
            self.aF.append(a)
            self.ziF.append(signal.lfilter_zi(self.bF[-1], self.aF[-1]))
コード例 #8
0
    def val_convnet_general(self, epoch):

        if GENDER == "m":
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.m = load_model(model_path)

        self.pyRender = libPyRender.pyRenderMesh(render=True)
        '''
        Train the model for one epoch.
        '''
        # Some models use slightly different forward passes and train and test
        # time (e.g., any model with Dropout). This puts the model in train mode
        # (as opposed to eval mode) so it knows which one to use.

        RESULTS_DICT = {}
        RESULTS_DICT['j_err'] = []
        RESULTS_DICT['betas'] = []
        RESULTS_DICT['dir_v_err'] = []
        RESULTS_DICT['v2v_err'] = []
        RESULTS_DICT['dir_v_limb_err'] = []
        RESULTS_DICT['v_to_gt_err'] = []
        RESULTS_DICT['v_limb_to_gt_err'] = []
        RESULTS_DICT['gt_to_v_err'] = []
        RESULTS_DICT['precision'] = []
        RESULTS_DICT['recall'] = []
        RESULTS_DICT['overlap_d_err'] = []
        RESULTS_DICT['all_d_err'] = []
        init_time = time.time()

        with torch.autograd.set_detect_anomaly(True):

            # This will loop a total = training_images/batch_size times
            for batch_idx, batch in enumerate(self.test_loader):
                if batch_idx > BATCH_IDX_START and batch_idx < 500:  #57:

                    batch1 = batch[1].clone()

                    betas_gt = torch.mean(batch[1][:, 72:82], dim=0).numpy()
                    angles_gt = torch.mean(batch[1][:, 82:154], dim=0).numpy()
                    root_shift_est_gt = torch.mean(batch[1][:, 154:157],
                                                   dim=0).numpy()

                    NUMOFOUTPUTDIMS = 3
                    NUMOFOUTPUTNODES_TEST = 24
                    self.output_size_test = (NUMOFOUTPUTNODES_TEST,
                                             NUMOFOUTPUTDIMS)

                    self.CTRL_PNL['adjust_ang_from_est'] = False
                    self.CTRL_PNL['depth_map_labels'] = False
                    self.CTRL_PNL['align_procr'] = False

                    print self.CTRL_PNL['num_input_channels_batch0'], batch[
                        0].size()

                    scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib(
                    ).unpack_batch(batch, False, self.model, self.CTRL_PNL)

                    mdm_est_pos = OUTPUT_DICT['batch_mdm_est'].clone(
                    ).unsqueeze(1)  # / 16.69545796387731
                    mdm_est_neg = OUTPUT_DICT['batch_mdm_est'].clone(
                    ).unsqueeze(1)  # / 45.08513083167194
                    mdm_est_pos[mdm_est_pos < 0] = 0
                    mdm_est_neg[mdm_est_neg > 0] = 0
                    mdm_est_neg *= -1
                    cm_est = OUTPUT_DICT['batch_cm_est'].clone().unsqueeze(
                        1) * 100  # / 43.55800622930469

                    # 1. / 16.69545796387731,  # pos est depth
                    # 1. / 45.08513083167194,  # neg est depth
                    # 1. / 43.55800622930469,  # cm est

                    sc_sample1 = OUTPUT_DICT['batch_targets_est'].clone()
                    sc_sample1 = sc_sample1[0, :].squeeze() / 1000
                    sc_sample1 = sc_sample1.view(self.output_size_test)
                    # print sc_sample1

                    if self.model2 is not None:
                        print "Using model 2"
                        batch_cor = []

                        if self.CTRL_PNL['cal_noise'] == False:
                            batch_cor.append(
                                torch.cat((batch[0][:, 0:1, :, :],
                                           mdm_est_pos.type(torch.FloatTensor),
                                           mdm_est_neg.type(torch.FloatTensor),
                                           cm_est.type(torch.FloatTensor),
                                           batch[0][:, 1:, :, :]),
                                          dim=1))
                        else:
                            if self.opt.pmr == True:
                                batch_cor.append(
                                    torch.cat(
                                        (mdm_est_pos.type(torch.FloatTensor),
                                         mdm_est_neg.type(torch.FloatTensor),
                                         cm_est.type(torch.FloatTensor),
                                         batch[0][:, 0:, :, :]),
                                        dim=1))
                            else:
                                batch_cor.append(batch[0])

                        if self.CTRL_PNL['full_body_rot'] == False:
                            batch_cor.append(
                                torch.cat(
                                    (batch1,
                                     OUTPUT_DICT['batch_betas_est'].cpu(),
                                     OUTPUT_DICT['batch_angles_est'].cpu(),
                                     OUTPUT_DICT['batch_root_xyz_est'].cpu()),
                                    dim=1))
                        elif self.CTRL_PNL['full_body_rot'] == True:
                            batch_cor.append(
                                torch.cat((
                                    batch1,
                                    OUTPUT_DICT['batch_betas_est'].cpu(),
                                    OUTPUT_DICT['batch_angles_est'].cpu(),
                                    OUTPUT_DICT['batch_root_xyz_est'].cpu(),
                                    OUTPUT_DICT['batch_root_atan2_est'].cpu()),
                                          dim=1))

                        self.CTRL_PNL['adjust_ang_from_est'] = True

                        if self.opt.pmr == True:
                            self.CTRL_PNL['num_input_channels_batch0'] += 3

                        print self.CTRL_PNL[
                            'num_input_channels_batch0'], batch_cor[0].size()

                        self.CTRL_PNL['align_procr'] = self.opt.align_procr

                        scores, INPUT_DICT, OUTPUT_DICT = UnpackBatchLib(
                        ).unpack_batch(batch_cor,
                                       is_training=False,
                                       model=self.model2,
                                       CTRL_PNL=self.CTRL_PNL)
                        if self.opt.pmr == True:
                            self.CTRL_PNL['num_input_channels_batch0'] -= 3

                    self.CTRL_PNL['first_pass'] = False

                    # print betas_est, root_shift_est, angles_est
                    if self.CTRL_PNL['dropout'] == True:
                        #print OUTPUT_DICT['verts'].shape
                        smpl_verts = np.mean(OUTPUT_DICT['verts'], axis=0)
                        dropout_variance = np.std(OUTPUT_DICT['verts'], axis=0)
                        dropout_variance = np.linalg.norm(dropout_variance,
                                                          axis=1)
                    else:
                        smpl_verts = OUTPUT_DICT['verts'][0, :, :]
                        dropout_variance = None

                    smpl_verts = np.concatenate(
                        (smpl_verts[:, 1:2] - 0.286 + 0.0143,
                         smpl_verts[:, 0:1] - 0.286 + 0.0143,
                         -smpl_verts[:, 2:3]),
                        axis=1)

                    smpl_faces = np.array(self.m.f)

                    q = OUTPUT_DICT['batch_mdm_est'].data.numpy().reshape(
                        OUTPUT_DICT['batch_mdm_est'].size()[0], 64, 27) * -1
                    q = np.mean(q, axis=0)

                    camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                    bedangle = 0.0
                    # print smpl_verts

                    RESULTS_DICT['betas'].append(
                        OUTPUT_DICT['batch_betas_est_post_clip'].cpu().numpy()
                        [0])
                    print RESULTS_DICT['betas'][-1], "BETAS"

                    viz_dim = self.opt.viz_dim

                    if viz_dim == "2D":
                        from visualization_lib import VisualizationLib
                        if self.model2 is not None:
                            self.im_sample = INPUT_DICT['batch_images'][
                                0, 4:, :].squeeze(
                                ) * 20.  # normalizing_std_constants[4]*5.  #pmat
                        else:
                            self.im_sample = INPUT_DICT['batch_images'][
                                0, 1:, :].squeeze(
                                ) * 20.  # normalizing_std_constants[4]*5.  #pmat
                        self.im_sample_ext = INPUT_DICT['batch_images'][
                            0, 0:, :].squeeze(
                            ) * 20.  # normalizing_std_constants[0]  #pmat contact
                        # self.im_sample_ext2 = INPUT_DICT['batch_images'][im_display_idx, 2:, :].squeeze()*20.#normalizing_std_constants[4]  #sobel
                        self.im_sample_ext3 = OUTPUT_DICT['batch_mdm_est'][
                            0, :, :].squeeze().unsqueeze(
                                0) * -1  # est depth output

                        # print scores[0, 10:16], 'scores of body rot'

                        # print self.im_sample.size(), self.im_sample_ext.size(), self.im_sample_ext2.size(), self.im_sample_ext3.size()

                        # self.publish_depth_marker_array(self.im_sample_ext3)

                        self.tar_sample = INPUT_DICT['batch_targets']
                        self.tar_sample = self.tar_sample[
                            0, :].squeeze() / 1000
                        sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                        sc_sample = sc_sample[0, :].squeeze() / 1000

                        sc_sample = sc_sample.view(self.output_size_test)

                        VisualizationLib().visualize_pressure_map(
                            self.im_sample,
                            sc_sample1,
                            sc_sample,
                            # self.im_sample_ext, None, None,
                            self.im_sample_ext3,
                            None,
                            None,
                            # , self.tar_sample_val, self.sc_sample_val,
                            block=False)

                    elif viz_dim == "3D":
                        pmat = batch[0][0, 0, :, :].clone().numpy()
                        #print pmat.shape

                        for beta in range(betas_gt.shape[0]):
                            self.m.betas[beta] = betas_gt[beta]
                        for angle in range(angles_gt.shape[0]):
                            self.m.pose[angle] = angles_gt[angle]

                        smpl_verts_gt = np.array(self.m.r)
                        for s in range(root_shift_est_gt.shape[0]):
                            smpl_verts_gt[:, s] += (
                                root_shift_est_gt[s] -
                                float(self.m.J_transformed[0, s]))

                        smpl_verts_gt = np.concatenate(
                            (smpl_verts_gt[:, 1:2] - 0.286 + 0.0143,
                             smpl_verts_gt[:, 0:1] - 0.286 + 0.0143,
                             0.0 - smpl_verts_gt[:, 2:3]),
                            axis=1)

                        joint_cart_gt = np.array(self.m.J_transformed).reshape(
                            24, 3)
                        for s in range(root_shift_est_gt.shape[0]):
                            joint_cart_gt[:, s] += (
                                root_shift_est_gt[s] -
                                float(self.m.J_transformed[0, s]))

                        #print joint_cart_gt, 'gt'

                        sc_sample = OUTPUT_DICT['batch_targets_est'].clone()
                        sc_sample = (sc_sample[0, :].squeeze().numpy() /
                                     1000).reshape(24, 3)

                        #print sc_sample, 'estimate'
                        joint_error = np.linalg.norm(joint_cart_gt - sc_sample,
                                                     axis=1)
                        #print joint_error
                        RESULTS_DICT['j_err'].append(joint_error)

                        camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                        save_name = NETWORK_2 + '_' + TESTING_FILENAME + '_' + str(
                            batch_idx)
                        if self.opt.align_procr == True: save_name += '_ap'
                        # render everything
                        RESULTS_DICT = self.pyRender.render_mesh_pc_bed_pyrender_everything_synth(
                            smpl_verts,
                            smpl_faces,
                            camera_point,
                            bedangle,
                            RESULTS_DICT,
                            smpl_verts_gt=smpl_verts_gt,
                            pmat=pmat,
                            markers=None,
                            dropout_variance=dropout_variance,
                            save_name=save_name)

                    #time.sleep(300)

                    #print RESULTS_DICT['j_err']
                    print np.mean(np.array(RESULTS_DICT['j_err']), axis=0)
                    #print RESULTS_DICT['precision']
                    print np.mean(RESULTS_DICT['precision'])
                    print time.time() - init_time
コード例 #9
0
    def __init__(self, sampling = "NORMAL", sigma = 0, one_side_range = 0, gender="m"):
        ## Load SMPL model (here we load the female model)
        model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_'+gender+'_lbs_10_207_0_v1.0.0.pkl'
        self.m = load_model(model_path)

        ## Assign random pose and shape parameters

        self.m.betas[:] = np.random.rand(self.m.betas.size) * .0
        #self.m.betas[5] = 20.

        self.m.pose[0] = 0 #pitch rotation of the person in space. 0 means the person is upside down facing back. pi is standing up facing forward
        self.m.pose[1] = 0 #roll of the person in space. -pi/2 means they are tilted to their right side
        self.m.pose[2] = 0#-np.pi/4 #yaw of the person in space, like turning around normal to the ground

        self.m.pose[3] = 0#-np.pi/4 #left hip extension (i.e. leg bends back for np.pi/2)
        self.m.pose[4] = 0#np.pi/8 #left leg yaw about hip, where np.pi/2 makes bowed leg
        self.m.pose[5] = 0.#np.pi/4 #left leg abduction (POS) /adduction (NEG)

        self.m.pose[6] = 0#-np.pi/4 #right hip extension (i.e. leg bends back for np.pi/2)
        self.m.pose[7] = 0#-np.pi/8
        self.m.pose[8] = 0.#-np.pi/4 #right leg abduction (NEG) /adduction (POS)

        self.m.pose[9] = 0 #bending of spine at hips. np.pi/2 means person bends down to touch the ground
        self.m.pose[10] = 0 #twisting of spine at hips. body above spine yaws normal to the ground
        self.m.pose[11] = 0 #bending of spine at hips. np.pi/2 means person bends down sideways to touch the ground 3

        self.m.pose[12] = 0#np.pi/4 #left knee extension. (i.e. knee bends back for np.pi/2)
        self.m.pose[13] = 0 #twisting of knee normal to ground. KEEP AT ZERO
        self.m.pose[14] = 0 #bending of knee sideways. KEEP AT ZERO

        self.m.pose[15] = 0#np.pi/4 #right knee extension (i.e. knee bends back for np.pi/2)

        self.m.pose[18] = 0 #bending at mid spine. makes person into a hunchback for positive values
        self.m.pose[19] = 0#twisting of midspine. body above midspine yaws normal to the ground
        self.m.pose[20] = 0 #bending of midspine, np.pi/2 means person bends down sideways to touch ground 6

        self.m.pose[21] = 0 #left ankle flexion/extension
        self.m.pose[22] = 0 #left ankle yaw about leg
        self.m.pose[23] = 0 #left ankle twist KEEP CLOSE TO ZERO

        self.m.pose[24] = 0 #right ankle flexion/extension
        self.m.pose[25] = 0 #right ankle yaw about leg
        self.m.pose[26] = 0#np.pi/4 #right ankle twist KEEP CLOSE TO ZERO

        self.m.pose[27] = 0 #bending at upperspine. makes person into a hunchback for positive values
        self.m.pose[28] = 0#twisting of upperspine. body above upperspine yaws normal to the ground
        self.m.pose[29] = 0 #bending of upperspine, np.pi/2 means person bends down sideways to touch ground 9

        self.m.pose[30] = 0 #flexion/extension of left ankle midpoint

        self.m.pose[33] = 0 #flexion/extension of right ankle midpoint

        self.m.pose[36] = 0#np.pi/2 #flexion/extension of neck. i.e. whiplash 12
        self.m.pose[37] = 0#-np.pi/2 #yaw of neck
        self.m.pose[38] = 0#np.pi/4  #tilt head side to side

        self.m.pose[39] = 0 #left inner shoulder roll
        self.m.pose[40] = 0 #left inner shoulder yaw, negative moves forward
        self.m.pose[41] = 0.#np.pi/4 #left inner shoulder pitch, positive moves up

        self.m.pose[42] = 0
        self.m.pose[43] = 0 #right inner shoulder yaw, positive moves forward
        self.m.pose[44] = 0.#-np.pi/4 #right inner shoulder pitch, positive moves down

        self.m.pose[45] = 0 #flexion/extension of head 15

        self.m.pose[48] = 0#-np.pi/4 #left outer shoulder roll
        self.m.pose[49] = 0#-np.pi/4
        self.m.pose[50] = 0#np.pi/4 #left outer shoulder pitch

        self.m.pose[51] = 0#-np.pi/4 #right outer shoulder roll
        self.m.pose[52] = 0#np.pi/4
        self.m.pose[53] = 0#-np.pi/4

        self.m.pose[54] = 0 #left elbow roll KEEP AT ZERO
        self.m.pose[55] = 0#np.pi/3 #left elbow flexion/extension. KEEP NEGATIVE
        self.m.pose[56] = 0 #left elbow KEEP AT ZERO

        self.m.pose[57] = 0
        self.m.pose[58] = 0#np.pi/4 #right elbow flexsion/extension KEEP POSITIVE

        self.m.pose[60] = 0 #left wrist roll

        self.m.pose[63] = 0 #right wrist roll
        #self.m.pose[65] = np.pi/5

        self.m.pose[66] = 0 #left hand roll

        self.m.pose[69] = 0 #right hand roll
        #self.m.pose[71] = np.pi/5 #right fist


        mu = 0



        for i in range(10):
            if sampling == "NORMAL":
                self.m.betas[i] = random.normalvariate(mu, sigma)
            elif sampling == "UNIFORM":
                self.m.betas[i]  = np.random.uniform(-one_side_range, one_side_range)
        #self.m.betas[0] = random.normalvariate(mu, sigma) #overall body size. more positive number makes smaller, negative makes larger with bigger belly
        #self.m.betas[1] = random.normalvariate(mu, sigma) #positive number makes person very skinny, negative makes fat
        #self.m.betas[2] = random.normalvariate(mu, sigma) #muscle mass. higher makes person less physically fit
        #self.m.betas[3] = random.normalvariate(mu, sigma) #proportion for upper vs lower bone lengths. more negative number makes legs much bigger than arms
        #self.m.betas[4] = random.normalvariate(mu, sigma) #neck. more negative seems to make neck longer and body more skinny
        #self.m.betas[5] = random.normalvariate(mu, sigma) #size of hips. larger means bigger hips
        #self.m.betas[6] = random.normalvariate(mu, sigma) #proportion of belly with respect to rest of the body. higher number is larger belly
        #self.m.betas[7] = random.normalvariate(mu, sigma)
        #self.m.betas[8] = random.normalvariate(-3, 3)
        #self.m.betas[9] = random.normalvariate(-3, 3)

        #print self.m.pose.shape
        #print self.m.pose, 'pose'
        #print self.m.betas, 'betas'

        ## Create OpenDR renderer
        self.rn = ColoredRenderer()

        # terms = 'f', 'frustum', 'background_image', 'overdraw', 'num_channels'
        # dterms = 'vc', 'camera', 'bgcolor'
        self.first_pass = True
        self.scene = pyrender.Scene()
        self.human_mat = pyrender.MetallicRoughnessMaterial(baseColorFactor=[0.0, 0.0, 1.0, 0.0])
        #166, 206, 227
        #31, 120, 180
        #178, 223, 138
        #51, 160, 44
        #251, 154, 153
        #227, 26, 28
        #253, 191, 111
        #255, 127, 0
        #202, 178, 214
        #106, 61, 154
        self.mesh_parts_mat_list = [pyrender.MetallicRoughnessMaterial(baseColorFactor=[166./255., 206./255., 227./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[31./255., 120./255., 180./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[251./255., 154./255., 153./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[227./255., 26./255., 28./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[178./255., 223./255., 138./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[51./255., 160./255., 44./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[253./255., 191./255., 111./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[255./255., 127./255., 0./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[202./255., 178./255., 214./255., 0.0]),
                                   pyrender.MetallicRoughnessMaterial(baseColorFactor=[106./255., 61./255., 154./255., 0.0])]

        self.artag_mat = pyrender.MetallicRoughnessMaterial(baseColorFactor=[0.3, 1.0, 0.3, 0.5])
        self.artag_mat_other = pyrender.MetallicRoughnessMaterial(baseColorFactor=[0.1, 0.1, 0.1, 0.0])
        self.artag_r = np.array(
            [[-0.055, -0.055, 0.0], [-0.055, 0.055, 0.0], [0.055, -0.055, 0.0], [0.055, 0.055, 0.0]])
        self.artag_f = np.array([[0, 1, 3], [3, 1, 0], [0, 2, 3], [3, 2, 0], [1, 3, 2]])
        self.artag_facecolors_root = np.array(
            [[0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]])
        self.artag_facecolors = np.array(
            [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], ])
コード例 #10
0
    def init_smpl(self, batch_size):

        from smpl.smpl_webuser.serialization import load_model

        model_path_f = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
        human_f = load_model(model_path_f)
        self.v_template_f = torch.Tensor(np.array(
            human_f.v_template)).type(dtype)
        self.shapedirs_f = torch.Tensor(np.array(human_f.shapedirs)).permute(
            0, 2, 1).type(dtype)
        self.J_regressor_f = np.zeros(
            (human_f.J_regressor.shape)) + human_f.J_regressor
        self.J_regressor_f = torch.Tensor(
            np.array(self.J_regressor_f).astype(float)).permute(1,
                                                                0).type(dtype)
        self.posedirs_f = torch.Tensor(np.array(human_f.posedirs))
        self.weights_f = torch.Tensor(np.array(human_f.weights))

        model_path_m = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        human_m = load_model(model_path_m)
        self.v_template_m = torch.Tensor(np.array(
            human_m.v_template)).type(dtype)
        self.shapedirs_m = torch.Tensor(np.array(human_m.shapedirs)).permute(
            0, 2, 1).type(dtype)
        self.J_regressor_m = np.zeros(
            (human_m.J_regressor.shape)) + human_m.J_regressor
        self.J_regressor_m = torch.Tensor(
            np.array(self.J_regressor_m).astype(float)).permute(1,
                                                                0).type(dtype)
        self.posedirs_m = torch.Tensor(np.array(human_m.posedirs))
        self.weights_m = torch.Tensor(np.array(human_m.weights))

        print self.posedirs_m.size()

        self.parents = np.array([
            4294967295, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 12, 13, 14,
            16, 17, 18, 19, 20, 21
        ]).astype(np.int32)

        # print batch_size
        self.N = batch_size
        self.shapedirs_repeat_f = self.shapedirs_f.unsqueeze(0).repeat(
            self.N, 1, 1, 1).permute(0, 2, 1, 3).unsqueeze(0)
        self.shapedirs_repeat_m = self.shapedirs_m.unsqueeze(0).repeat(
            self.N, 1, 1, 1).permute(0, 2, 1, 3).unsqueeze(0)
        self.shapedirs_repeat = torch.cat(
            (self.shapedirs_repeat_f, self.shapedirs_repeat_m),
            0)  # this is 2 x N x B x R x D
        self.B = self.shapedirs_repeat.size()[2]  # this is 10
        self.R = self.shapedirs_repeat.size()[
            3]  # this is 6890, or num of verts
        self.D = self.shapedirs_repeat.size()[
            4]  # this is 3, or num dimensions
        self.shapedirs_repeat = self.shapedirs_repeat.permute(
            1, 0, 2, 3, 4).view(self.N, 2, self.B * self.R * self.D)

        self.v_template_repeat_f = self.v_template_f.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.v_template_repeat_m = self.v_template_m.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.v_template_repeat = torch.cat(
            (self.v_template_repeat_f, self.v_template_repeat_m),
            0)  # this is 2 x N x R x D
        self.v_template_repeat = self.v_template_repeat.permute(
            1, 0, 2, 3).view(self.N, 2, self.R * self.D)

        self.J_regressor_repeat_f = self.J_regressor_f.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.J_regressor_repeat_m = self.J_regressor_m.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.J_regressor_repeat = torch.cat(
            (self.J_regressor_repeat_f, self.J_regressor_repeat_m),
            0)  # this is 2 x N x R x 24
        self.J_regressor_repeat = self.J_regressor_repeat.permute(
            1, 0, 2, 3).view(self.N, 2, self.R * 24)

        self.posedirs_repeat_f = self.posedirs_f.unsqueeze(0).repeat(
            self.N, 1, 1, 1).unsqueeze(0)
        self.posedirs_repeat_m = self.posedirs_m.unsqueeze(0).repeat(
            self.N, 1, 1, 1).unsqueeze(0)
        self.posedirs_repeat = torch.cat(
            (self.posedirs_repeat_f, self.posedirs_repeat_m), 0)
        #self.posedirs_repeat = self.posedirs_repeat.permute(1, 0, 2, 3, 4).view(self.N, 2, self.R*self.D*207)
        self.posedirs_repeat = self.posedirs_repeat.permute(
            1, 0, 2, 3, 4).view(self.N, 2, self.R, self.D * 207)
        self.posedirs_repeat = torch.stack([
            self.posedirs_repeat[:, :, 1325, :], self.posedirs_repeat[:, :,
                                                                      336, :],
            self.posedirs_repeat[:, :, 1046, :], self.posedirs_repeat[:, :,
                                                                      4530, :],
            self.posedirs_repeat[:, :, 3333, :], self.posedirs_repeat[:, :,
                                                                      6732, :],
            self.posedirs_repeat[:, :, 1664, :], self.posedirs_repeat[:, :,
                                                                      5121, :],
            self.posedirs_repeat[:, :, 2208, :], self.posedirs_repeat[:, :,
                                                                      5669, :]
        ])
        self.posedirs_repeat = self.posedirs_repeat.permute(
            1, 2, 0, 3).contiguous().view(self.N, 2, 10 * self.D * 207)

        self.weights_repeat_f = self.weights_f.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.weights_repeat_m = self.weights_m.unsqueeze(0).repeat(
            self.N, 1, 1).unsqueeze(0)
        self.weights_repeat = torch.cat(
            (self.weights_repeat_f, self.weights_repeat_m), 0)
        #self.weights_repeat = self.weights_repeat.permute(1, 0, 2, 3).view(self.N, 2, self.R * 24)
        self.weights_repeat = self.weights_repeat.permute(1, 0, 2, 3).view(
            self.N, 2, self.R, 24)
        self.weights_repeat = torch.stack([
            self.weights_repeat[:, :, 1325, :], self.weights_repeat[:, :,
                                                                    336, :],
            self.weights_repeat[:, :, 1046, :], self.weights_repeat[:, :,
                                                                    4530, :],
            self.weights_repeat[:, :, 3333, :], self.weights_repeat[:, :,
                                                                    6732, :],
            self.weights_repeat[:, :, 1664, :], self.weights_repeat[:, :,
                                                                    5121, :],
            self.weights_repeat[:, :, 2208, :], self.weights_repeat[:, :,
                                                                    5669, :]
        ])
        self.weights_repeat = self.weights_repeat.permute(
            1, 2, 0, 3).contiguous().view(self.N, 2, 10 * 24)
コード例 #11
0
def get_depth_cont_maps_from_synth():
    #all_data_names = [#["f", "lay", 8549, "1to40", "train"]]
    #                  ["f", "lside", 8136, "1to40", "train"],
    #                  ["f", "rside", 7677, "1to40", "train"],
    #                  ["m", "lay", 8493, "1to40", "train"],
    #                  ["m", "lside", 7761, "1to40", "train"],
    #                  ["m", "rside", 7377, "1to40", "train"]]

    #all_data_names = [["f", "lay", 6608, "41to70", "train"],
    #                  ["f", "lside", 6158, "41to70", "train"],
    #                  ["f", "rside", 6006, "41to70", "train"]]

    #all_data_names = [["m", "lay", 6597, "41to70", "train"],
    #                  ["m", "lside", 5935, "41to70", "train"],
    #                  ["m", "rside", 5817, "41to70", "train"]]

    all_data_names = [["f", "lay", 2184, "71to80", "train"],
                      ["f", "lside", 2058, "71to80", "train"],
                      ["f", "rside", 2010, "71to80", "train"],
                      ["m", "lay", 2188, "71to80", "train"],
                      ["m", "lside", 2002, "71to80", "train"],
                      ["m", "rside", 1939, "71to80", "train"]]

    from visualization_lib import VisualizationLib

    filler_taxels = []
    for i in range(27):
        for j in range(64):
            filler_taxels.append([i, j, 20000])
    filler_taxels = np.array(filler_taxels)

    for gpsn in all_data_names:
        gender = gpsn[0]
        posture = gpsn[1]
        num_resting_poses = gpsn[2]
        subj_nums = gpsn[3]
        dattype = gpsn[4]

        bed_angle = np.deg2rad(1.0)

        if posture == "sit":
            bed_angle = np.deg2rad(60.0)
        elif posture == "lay":
            bed_angle = np.deg2rad(1.0)

        # training_data_dict['v_template'] = []
        # training_data_dict['shapedirs'] = []

        model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_' + gender + '_lbs_10_207_0_v1.0.0.pkl'
        m = load_model(model_path)

        filename =  '/home/henry/git/sim_camera_resting_scene/data_BR/synth/slp/' + dattype + '_slp_'\
            + posture + '_' + gender + '_'+subj_nums+'_' + str(num_resting_poses) + '.p'
        #filename =  '/home/henry/data/synth/home_poses/home_pose_'+gender+'.p'

        training_data_dict = load_pickle(filename)
        print "loaded ", filename

        betas = training_data_dict['body_shape']
        pose = training_data_dict['joint_angles']
        images = training_data_dict['images']
        training_data_dict['mesh_depth'] = []
        training_data_dict['mesh_contact'] = []
        root_xyz_shift = training_data_dict['root_xyz_shift']

        ct = 0
        for index in range(len(betas)):
            #index += 4
            for beta_idx in range(10):
                m.betas[beta_idx] = betas[index][beta_idx]
            for pose_idx in range(72):
                m.pose[pose_idx] = pose[index][pose_idx]

            #print images[index]
            images[index][images[index] > 0] += 1
            #print images[index]
            training_data_dict['images'][index] = images[index].astype(
                int8)  #convert the original pmat to an int to save space
            #print training_data_dict['images'][index]
            curr_root_shift = np.array(root_xyz_shift[index])

            #print curr_root_shift,'currroot'
            #print m.J_transformed, 'Jest'

            joints = np.array(m.J_transformed) + curr_root_shift + np.array(
                [0.0, 0.0, -0.075]) - np.array(m.J_transformed)[0:1, :]
            vertices = np.array(m.r) + curr_root_shift + np.array(
                [0.0, 0.0, -0.075]) - np.array(m.J_transformed)[0:1, :]
            vertices_rot = np.copy(vertices)

            #print vertices.shape
            #print vertices[0:10, :], 'verts'

            #print curr_root_shift, 'curr shift' #[0.59753822 1.36742909 0.09295963]

            #vertices[0, :] = np.array([0.0, 1.173, -5.0])

            bend_loc = 48 * 0.0286

            #import matplotlib.pyplot as plt
            #plt.plot(-vertices[:, 1], vertices[:, 2], 'r.')
            #print vertices.dtype
            #vertices = vertices.astype(float32)

            vertices_rot[:, 1] = vertices[:, 2] * np.sin(bed_angle) - (
                bend_loc - vertices[:, 1]) * np.cos(bed_angle) + bend_loc
            vertices_rot[:, 2] = vertices[:, 2] * np.cos(bed_angle) + (
                bend_loc - vertices[:, 1]) * np.sin(bed_angle)

            #vertices =
            vertices_rot = vertices_rot[vertices_rot[:, 1] >= bend_loc]
            vertices = np.concatenate(
                (vertices[vertices[:, 1] < bend_loc], vertices_rot), axis=0)
            #print vertices.shape

            #plt.plot(-vertices[:, 1], vertices[:, 2], 'k.')

            #plt.axis([-1.8, -0.2, -0.3, 1.0])
            #plt.show()

            #print vertices.shape

            joints_taxel = joints / 0.0286
            vertices_taxel = vertices / 0.0286
            vertices_taxel[:, 2] *= 1000
            vertices_taxel[:, 0] *= 1.04
            vertices_taxel[:, 0] -= 10
            vertices_taxel[:, 1] -= 10

            time_orig = time.time()

            #joints_taxel_int = (joints_taxel).astype(int)
            vertices_taxel_int = (vertices_taxel).astype(int)

            vertices_taxel_int = np.concatenate(
                (filler_taxels, vertices_taxel_int), axis=0)

            vertice_sorting_method = vertices_taxel_int[:,
                                                        0] * 10000000 + vertices_taxel_int[:,
                                                                                           1] * 100000 + vertices_taxel_int[:,
                                                                                                                            2]
            vertices_taxel_int = vertices_taxel_int[
                vertice_sorting_method.argsort()]

            vertice_sorting_method_2 = vertices_taxel_int[:,
                                                          0] * 100 + vertices_taxel_int[:,
                                                                                        1]
            unique_keys, indices = np.unique(vertice_sorting_method_2,
                                             return_index=True)

            vertices_taxel_int_unique = vertices_taxel_int[indices]

            #print vertices_taxel_int_unique.shape

            vertices_taxel_int_unique = vertices_taxel_int_unique[
                vertices_taxel_int_unique[:, 0] < 27, :]
            vertices_taxel_int_unique = vertices_taxel_int_unique[
                vertices_taxel_int_unique[:, 0] >= 0, :]
            vertices_taxel_int_unique = vertices_taxel_int_unique[
                vertices_taxel_int_unique[:, 1] < 64, :]
            vertices_taxel_int_unique = vertices_taxel_int_unique[
                vertices_taxel_int_unique[:, 1] >= 0, :]
            #print vertices_taxel_int_unique

            #print vertices_taxel_int_unique

            mesh_matrix = np.flipud(vertices_taxel_int_unique[:, 2].reshape(
                27, 64).T).astype(float)

            mesh_matrix[mesh_matrix == 20000] = 0
            mesh_matrix *= 0.0286

            #fix holes
            abc = np.zeros((66, 29, 4))
            abc[1:65, 1:28, 0] = np.copy(mesh_matrix)
            abc[1:65, 1:28, 0][abc[1:65, 1:28, 0] > 0] = 0
            abc[1:65, 1:28, 0] = abc[0:64, 0:27, 0] + abc[1:65, 0:27, 0] + abc[2:66, 0:27, 0] + \
                                 abc[0:64, 1:28, 0] + abc[2:66, 1:28, 0] + \
                                 abc[0:64, 2:29, 0] + abc[1:65, 2:29, 0] + abc[2:66, 2:29, 0]
            abc = abc[1:65, 1:28, :]
            abc[:, :, 0] /= 8
            abc[:, :, 1] = np.copy(mesh_matrix)
            abc[:, :, 1][abc[:, :, 1] < 0] = 0
            abc[:, :, 1][abc[:, :, 1] >= 0] = 1
            abc[:, :, 2] = abc[:, :, 0] * abc[:, :, 1]
            abc[:, :, 3] = np.copy(abc[:, :, 2])
            abc[:, :, 3][abc[:, :, 3] != 0] = 1.
            abc[:, :, 3] = 1 - abc[:, :, 3]
            mesh_matrix = mesh_matrix * abc[:, :, 3]
            mesh_matrix += abc[:, :, 2]
            #print np.min(mesh_matrix), np.max(mesh_matrix)
            mesh_matrix = mesh_matrix.astype(int32)
            #print np.min(mesh_matrix), np.max(mesh_matrix)

            #make a contact matrix
            contact_matrix = np.copy(mesh_matrix)
            contact_matrix[contact_matrix >= 0] = 0
            contact_matrix[contact_matrix >= 0] = 0
            contact_matrix[contact_matrix < 0] = 1
            contact_matrix = contact_matrix.astype(bool)

            ct += 1
            print time.time() - time_orig, ct

            training_data_dict['mesh_depth'].append(mesh_matrix)
            training_data_dict['mesh_contact'].append(contact_matrix)

            #print training_data_dict['images'][index].dtype
            #print training_data_dict['mesh_depth'][index].dtype
            #print training_data_dict['mesh_contact'][index].dtype

            #print m.J_transformed

            #print np.min(mesh_matrix), np.max(mesh_matrix)

            #VisualizationLib().visualize_pressure_map(pmat, joints, None, mesh_matrix+50, joints)
            #time.sleep(5)

            #break


        filename =  '/home/henry/git/sim_camera_resting_scene/data_BR/synth/slp/' + dattype + '_slp_'\
            + posture + '_' + gender + '_'+subj_nums+'_' + str(num_resting_poses) + '.p'

        #filename =  '/home/henry/data/synth/home_poses/home_pose_'+gender+'.p'

        pickle.dump(training_data_dict, open(os.path.join(filename), 'wb'))
コード例 #12
0
def reprocess_synth_data():
    # fix_angles_in_dataset()

    #all_data_names = [#["f", "lay", 1944, "1to10", "train"],
    #                  ["f", "lay", 2210, "11to20", "train"],
    #                  ["f", "lay", 2201, "21to30", "train"],
    #                  ["f", "lay", 2194, "31to40", "train"]]
    #all_data_names = [#["f", "lside", 1857, "1to10", "train"],
    #                  ["f", "lside", 2087, "11to20", "train"],
    #                  ["f", "lside", 2086, "21to30", "train"],
    #                  ["f", "lside", 2106, "31to40", "train"]]
    #all_data_names = [#["f", "rside", 1805, "1to10", "train"],
    #                  ["f", "rside", 2001, "11to20", "train"],
    #                  ["f", "rside", 1922, "21to30", "train"],
    #                  ["f", "rside", 1949, "31to40", "train"]]
    #all_data_names = [#["m", "lay", 1946, "1to10", "train"],
    #                  ["m", "lay", 2192, "11to20", "train"]]
    #                  ["m", "lay", 2178, "21to30", "train"],
    #                  ["m", "lay", 2177, "31to40", "train"]]
    #all_data_names = [#["m", "lside", 1731, "1to10", "train"],
    #                  ["m", "lside", 2007, "11to20", "train"]]
    #                  ["m", "lside", 2002, "21to30", "train"],
    #                  ["m", "lside", 2021, "31to40", "train"]]
    all_data_names = [  #["m", "rside", 1704, "1to10", "train"],
        #                  ["m", "rside", 1927, "11to20", "train"]]
        #                  ["m", "rside", 1844, "21to30", "train"],
        ["m", "rside", 1902, "31to40", "train"]
    ]

    #all_data_names = [#["f", "lay", 2198, "41to50", "train"],
    #["f", "lay", 2197, "51to60", "train"],
    #                  ["f", "lay", 2213, "61to70", "train"],
    #                  ["f", "lay", 2184, "71to80", "train"]]
    #all_data_names = [#["f", "lside", 2091, "41to50", "train"],
    #["f", "lside", 2053, "51to60", "train"],
    #["f", "lside", 2014, "61to70", "train"]]#,
    #                  ["f", "lside", 2058, "71to80", "train"]]
    #all_data_names = [#["f", "rside", 1976, "41to50", "train"],
    #["f", "rside", 2043, "51to60", "train"],
    #                  ["f", "rside", 1987, "61to70", "train"]]#,
    #                  ["f", "rside", 2010, "71to80", "train"]]
    #all_data_names = [#["m", "lay", 2195, "41to50", "train"],
    #                  ["m", "lay", 2199, "51to60", "train"],
    #                  ["m", "lay", 2203, "61to70", "train"]]#,
    #                  ["m", "lay", 2188, "71to80", "train"]]
    #all_data_names = [#["m", "lside", 2049, "41to50", "train"],
    #["m", "lside", 1952, "51to60", "train"],
    #                  ["m", "lside", 1934, "61to70", "train"]]#,
    #                  ["m", "lside", 2002, "71to80", "train"]]
    #all_data_names = [#["m", "rside", 1904, "41to50", "train"],
    #                  ["m", "rside", 1973, "51to60", "train"],
    #                  ["m", "rside", 1940, "61to70", "train"]]#,
    #                  ["m", "rside", 1939, "71to80", "train"]] #messed up

    amount_to_add_ct = 1895

    num_data_points = 0

    training_data_dict = {}
    training_data_dict['markers_xyz_m'] = []
    training_data_dict['root_xyz_shift'] = []
    training_data_dict['joint_angles'] = []
    training_data_dict['body_shape'] = []
    training_data_dict['body_mass'] = []
    training_data_dict['body_height'] = []
    training_data_dict['bed_angle_deg'] = []
    training_data_dict['images'] = []

    for gpsn in all_data_names:
        gender = gpsn[0]
        posture = gpsn[1]
        num_resting_poses = gpsn[2]
        subj_nums = gpsn[3]
        dattype = gpsn[4]

        model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_' + gender + '_lbs_10_207_0_v1.0.0.pkl'
        m = load_model(model_path)

        resting_pose_data_list = np.load(
            '/home/henry/data/02_resting_poses/slp_filtered/resting_pose_slp_'
            + subj_nums + '_' + posture + '_' + gender + '_' +
            str(num_resting_poses) + '_filtered.npy',
            allow_pickle=True)

        training_database_pmat_height_list = np.load(
            '/home/henry/data/03a_pmat_height/slp/pmat_height_slp_' +
            subj_nums + '_' + posture + '_' + gender + '_' +
            str(num_resting_poses) + '_filtered.npy',
            allow_pickle=True)

        print len(resting_pose_data_list), np.shape(
            training_database_pmat_height_list)[0]
        print np.shape(training_database_pmat_height_list[0])

        for resting_pose_data_ct in range(len(resting_pose_data_list)):

            resting_pose_data_ct += amount_to_add_ct

            num_data_points += 1
            resting_pose_data = resting_pose_data_list[resting_pose_data_ct]
            pmat = training_database_pmat_height_list[resting_pose_data_ct]
            capsule_angles = resting_pose_data[0].tolist()
            root_joint_pos_list = resting_pose_data[1]
            body_shape_list = resting_pose_data[2]
            body_mass = resting_pose_data[3]

            #print(np.max(pmat))

            if math.isnan(np.max(pmat)): continue

            # print "shape", body_shape_list

            #print np.shape(resting_pose_data), np.shape(pmat), np.shape(capsule_angles), np.shape(
            #    root_joint_pos_list), np.shape(body_shape_list)

            for shape_param in range(10):
                m.betas[shape_param] = float(body_shape_list[shape_param])

            m.pose[:] = np.random.rand(m.pose.size) * 0.

            training_data_dict['body_mass'].append(body_mass)
            training_data_dict['body_height'].append(
                np.abs(np.min(m.r[:, 1]) - np.max(m.r[:, 1])))

            #print training_data_dict['body_mass'][-1] * 2.20462, 'MASS, lbs'
            #print training_data_dict['body_height'][-1] * 3.28084, 'HEIGHT, ft'

            m.pose[0:3] = capsule_angles[0:3]
            m.pose[3:6] = capsule_angles[6:9]
            m.pose[6:9] = capsule_angles[9:12]
            m.pose[9:12] = capsule_angles[12:15]
            m.pose[12:15] = capsule_angles[15:18]
            m.pose[15:18] = capsule_angles[18:21]
            m.pose[18:21] = capsule_angles[21:24]
            m.pose[21:24] = capsule_angles[24:27]
            m.pose[24:27] = capsule_angles[27:30]
            m.pose[27:30] = capsule_angles[30:33]
            m.pose[36:39] = capsule_angles[33:36]  # neck
            m.pose[39:42] = capsule_angles[36:39]
            m.pose[42:45] = capsule_angles[39:42]
            m.pose[45:48] = capsule_angles[42:45]  # head
            m.pose[48:51] = capsule_angles[45:48]
            m.pose[51:54] = capsule_angles[48:51]
            m.pose[54:57] = capsule_angles[51:54]
            m.pose[57:60] = capsule_angles[54:57]
            m.pose[60:63] = capsule_angles[57:60]
            m.pose[63:66] = capsule_angles[60:63]

            training_data_dict['joint_angles'].append(
                np.array(m.pose).astype(float))
            training_data_dict['body_shape'].append(
                np.array(m.betas).astype(float))
            # print "dict", training_data_dict['body_shape'][-1]

            # training_data_dict['v_template'].append(np.asarray(m.v_template))
            # training_data_dict['shapedirs'].append(np.asarray(m.shapedirs))

            # print np.sum(np.array(m.v_template))
            # print np.sum(np.array(m.shapedirs))
            # print np.sum(np.zeros((np.shape(np.array(m.J_regressor)))) + np.array(m.J_regressor))

            root_shift_x = root_joint_pos_list[
                0] + 0.374648 + 10 * INTER_SENSOR_DISTANCE
            root_shift_y = root_joint_pos_list[
                1] + 0.927099 + 10 * INTER_SENSOR_DISTANCE
            # root_shift_z = height
            root_shift_z = root_joint_pos_list[2] - 0.15
            #print root_shift_z

            x_positions = np.asarray(m.J_transformed)[:, 0] - np.asarray(
                m.J_transformed)[0, 0] + root_shift_x
            y_positions = np.asarray(m.J_transformed)[:, 1] - np.asarray(
                m.J_transformed)[0, 1] + root_shift_y
            z_positions = np.asarray(m.J_transformed)[:, 2] - np.asarray(
                m.J_transformed)[0, 2] + root_shift_z

            if resting_pose_data_ct == 0:
                print m.betas
                print m.pose
                print "J x trans", m.J_transformed[:, 0]

            xyz_positions = np.transpose(
                [x_positions, y_positions, z_positions])
            xyz_positions_shape = np.shape(xyz_positions)
            xyz_positions = xyz_positions.reshape(xyz_positions_shape[0] *
                                                  xyz_positions_shape[1])
            training_data_dict['markers_xyz_m'].append(xyz_positions)
            training_data_dict['root_xyz_shift'].append(
                [root_shift_x, root_shift_y, root_shift_z])
            training_data_dict['images'].append(pmat.reshape(64 * 27))

            training_data_dict['bed_angle_deg'].append(0.)

            print set, resting_pose_data_ct, len(training_data_dict['images'])
            #if resting_pose_data_ct == 249: break
            #if len(training_data_dict['images']) == 1500: break

            visualize_pressure_map(
                training_data_dict['images'][-1].reshape(64, 27),
                training_data_dict['markers_xyz_m'][-1], None, None, None)

        print training_data_dict['markers_xyz_m'][0]

        #print "RECHECKING!"
        #for entry in range(len(training_data_dict['markers_xyz_m'])):
        #print entry, training_data_dict['markers_xyz_m'][entry][0:2], training_data_dict['body_shape'][entry][0:2], \
        #training_data_dict['joint_angles'][entry][0:2]

    #pickle.dump(training_data_dict, open(os.path.join(
    #    '/home/henry/data/synth/random/train_' + gender + '_' + posture + '_' + str(num_data_points) + '_' + stiffness + '_stiff.p'), 'wb'))
    pickle.dump(
        training_data_dict,
        open(
            os.path.join(
                '/home/henry/git/sim_camera_resting_scene/data_BR/synth/slp/' +
                dattype + '_slp_' + posture + '_' + gender + '_41to50_' +
                str(len(training_data_dict['images'])) + '.p'), 'wb'))
コード例 #13
0
ファイル: demo.py プロジェクト: antoalli/shape_completion
'''
    This is a short demo to see how to load and use the SMAL model.
    Please read the README.txt file for requirements.

'''

from smpl.smpl_webuser.serialization import load_model
from my_mesh.mesh import myMesh
import pickle as pkl

# Load the smal model
model_path = 'smal_CVPR2017.pkl'
model = load_model(model_path)

# Save the mean model
# model.r are the model vertexes, and model.f are the mesh faces.
m = myMesh(v=model.r, f=model.f)
m.save_ply('smal_mean_shape.ply')
print 'saved mean shape'

# Load the family clusters data (see paper for details)
# and save the mean per-family shape
# 0-felidae(cats); 1-canidae(dogs); 2-equidae(horses);
# 3-bovidae(cows); 4-hippopotamidae(hippos);
# The clusters are over the shape coefficients (betas);
# setting different betas changes the shape of the model
model_data_path = 'smal_CVPR2017_data.pkl'
data = pkl.load(open(model_data_path))
for i, betas in enumerate(data['cluster_means']):
    model.betas[:] = betas
    model.pose[:] = 0.
    def val_convnet_general(self, epoch):

        if GENDER == "m":
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_m_lbs_10_207_0_v1.0.0.pkl'
        else:
            model_path = '/home/henry/git/SMPL_python_v.1.0.0/smpl/models/basicModel_f_lbs_10_207_0_v1.0.0.pkl'

        self.m = load_model(model_path)

        self.m.pose[41] = -np.pi / 6 * 0.9
        self.m.pose[44] = np.pi / 6 * 0.9
        self.m.pose[50] = -np.pi / 3 * 0.9
        self.m.pose[53] = np.pi / 3 * 0.9
        ALL_VERTS = np.array(self.m.r)

        self.pyRender = libPyRender.pyRenderMesh(render=False)
        '''
        Train the model for one epoch.
        '''
        # Some models use slightly different forward passes and train and test
        # time (e.g., any model with Dropout). This puts the model in train mode
        # (as opposed to eval mode) so it knows which one to use.
        self.criterion = nn.L1Loss()
        self.criterion2 = nn.MSELoss()

        RESULTS_DICT = {}
        RESULTS_DICT['j_err'] = []
        RESULTS_DICT['betas'] = []
        RESULTS_DICT['dir_v_err'] = []
        RESULTS_DICT['v2v_err'] = []
        RESULTS_DICT['dir_v_limb_err'] = []
        RESULTS_DICT['v_to_gt_err'] = []
        RESULTS_DICT['v_limb_to_gt_err'] = []
        RESULTS_DICT['gt_to_v_err'] = []
        RESULTS_DICT['precision'] = []
        RESULTS_DICT['recall'] = []
        RESULTS_DICT['overlap_d_err'] = []
        RESULTS_DICT['all_d_err'] = []
        RESULTS_DICT['overlapping_pix'] = []
        init_time = time.time()

        with torch.autograd.set_detect_anomaly(True):

            # This will loop a total = training_images/batch_size times
            for batch_idx, batch in enumerate(self.train_loader):

                batch1 = batch[1].clone()

                betas_gt = torch.mean(batch[1][:, 72:82], dim=0).numpy()
                angles_gt = torch.mean(batch[1][:, 82:154], dim=0).numpy()
                root_shift_est_gt = torch.mean(batch[1][:, 154:157],
                                               dim=0).numpy()

                NUMOFOUTPUTDIMS = 3
                NUMOFOUTPUTNODES_TRAIN = 24
                self.output_size_train = (NUMOFOUTPUTNODES_TRAIN,
                                          NUMOFOUTPUTDIMS)

                dropout_variance = None

                smpl_verts = np.concatenate(
                    (ALL_VERTS[:, 1:2] + 0.0143 + 32 * 0.0286 + .286,
                     ALL_VERTS[:, 0:1] + 0.0143 + 13.5 * 0.0286,
                     -ALL_VERTS[:, 2:3]),
                    axis=1)

                smpl_faces = np.array(self.m.f)

                camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                bedangle = 0.0
                # print smpl_verts

                pmat = batch[0][0, 1, :, :].clone().numpy() * 25.50538629767412
                #print pmat.shape

                for beta in range(betas_gt.shape[0]):
                    self.m.betas[beta] = betas_gt[beta]
                for angle in range(angles_gt.shape[0]):
                    self.m.pose[angle] = angles_gt[angle]

                smpl_verts_gt = np.array(self.m.r)
                for s in range(root_shift_est_gt.shape[0]):
                    smpl_verts_gt[:, s] += (root_shift_est_gt[s] -
                                            float(self.m.J_transformed[0, s]))

                smpl_verts_gt = np.concatenate(
                    (smpl_verts_gt[:, 1:2] - 0.286 + 0.0143,
                     smpl_verts_gt[:, 0:1] - 0.286 + 0.0143,
                     -smpl_verts_gt[:, 2:3]),
                    axis=1)

                joint_cart_gt = np.array(self.m.J_transformed).reshape(24, 3)
                for s in range(root_shift_est_gt.shape[0]):
                    joint_cart_gt[:, s] += (root_shift_est_gt[s] -
                                            float(self.m.J_transformed[0, s]))

                #print joint_cart_gt, 'gt'

                camera_point = [1.09898028, 0.46441343, -CAM_BED_DIST]

                # render everything
                RESULTS_DICT = self.pyRender.render_mesh_pc_bed_pyrender_everything_synth(
                    smpl_verts,
                    smpl_faces,
                    camera_point,
                    bedangle,
                    RESULTS_DICT,
                    smpl_verts_gt=smpl_verts_gt,
                    pmat=pmat,
                    markers=None,
                    dropout_variance=dropout_variance)

                #time.sleep(300)

                print np.mean(RESULTS_DICT['precision'])
                print time.time() - init_time, "  Batch idx:", batch_idx
                #break

        #save here

        pkl.dump(
            RESULTS_DICT,
            open(
                '/home/henry/git/bodies-at-rest/data_BR/final_results/results_synth_'
                + TESTING_FILENAME + '_' + NETWORK_2 + '.p', 'wb'))