def gen_multi_seqs():
    global start_time
    start_time = time.time()
    log_message("Remove out/, and tmp/")
    if os.path.exists("../out/"):
        shutil.rmtree("../out/")
    if os.path.exists("../tmp/"):
        shutil.rmtree("../tmp/")

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')
    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']

    print("Resource information: ")
    bg_splits, cloth_splits, nseqs, nshapes = print_resource_info(
        bg_path, smpl_data_folder, smpl_data_filename)
    for gender in ["female", "male"]:
        for split in ["train", "test"]:
            for irun in range(100):
                nbg = len(bg_splits[split])
                ncloth = len(cloth_splits[gender + "_" + split])
                # setup params
                setting = edict()
                setting.split = split  ##
                setting.gender = gender  ## gender, 0/1
                setting.iseq = random.randint(0, nseqs)  ## seq id, rand
                setting.ibg = random.randint(0, nbg)  ## bg image, rand
                setting.ishape = random.randint(0, nshapes)  ## shape idx, rand
                setting.icloth = random.randint(0, ncloth)  ## cloth idx, rand
                setting.irun = irun
                print("Generate a sequence with settings: ", setting)
                nframes = gen_one_seqs(setting)
                print("#%d frames are generated." % (nframes))
예제 #2
0
def main():
    # time logging
    global start_time
    start_time = time.time()

    import argparse
    
    # parse commandline arguments
    log_message(sys.argv)
    parser = argparse.ArgumentParser(description='Generate synth dataset images.')
    parser.add_argument('--idx', type=int,
                        help='idx of the requested sequence')
    parser.add_argument('--ishape', type=int,
                        help='requested cut, according to the stride')
    parser.add_argument('--stride', type=int,
                        help='stride amount, default 50')

    args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])
    
    idx = args.idx
    ishape = args.ishape
    stride = args.stride
    
    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)
    
    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50
    
    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))
    idx_info = [x for x in idx_info if x['name'][:4] != 'h36m']

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))
    
    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)
    idx_info = idx_info[idx]
    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')
    
    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    resy = params['resy']
    resx = params['resx']
    clothing_option = params['clothing_option'] # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    # compute number of cuts
    nb_ishape = max(1, int(np.ceil((idx_info['nb_frames'] - (clipsize - stride))/stride)))
    log_message("Max ishape: %d" % (nb_ishape - 1))
    
    if ishape == None:
        exit(1)
    
    assert(ishape < nb_ishape)
    
    # name is set given idx
    name = idx_info['name']
    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    params['output_path'] = output_path
    tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
    params['tmp_path'] = tmp_path
    
    # check if already computed
    #  + clean up existing tmp folders if any
    if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
        os.system('rm -rf %s' % tmp_path)
    rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace(' ', '')), (ishape + 1))
    #if os.path.isfile(rgb_vid_filename):
    #    log_message("ALREADY COMPUTED - existing: %s" % rgb_vid_filename)
    #    return 0
    
    # create tmp directory
    if not exists(tmp_path):
        mkdir_safe(tmp_path)
    
    # >> don't use random generator before this point <<

    # initialize RNG with seeds from sequence id
    import hashlib
    s = "synth_data:%d:%d:%d" % (idx, runpass,ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)
    
    if(output_types['vblur']):
        vblur_factor = np.random.normal(0.5, 0.5)
        params['vblur_factor'] = vblur_factor
    
    log_message("Setup Blender")

    # create copy-spher.harm. directory if not exists
    sh_dir = join(tmp_path, 'spher_harm')
    if not exists(sh_dir):
        mkdir_safe(sh_dir)
    sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
    os.system('cp spher_harm/sh.osl %s' % sh_dst)

    genders = {0: 'female', 1: 'male'}
    # pick random gender
    gender = choice(genders)

    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    log_message("Listing background images")
    bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
    nh_txt_paths = []
    with open(bg_names) as f:
        for line in f:
            nh_txt_paths.append(join(bg_path, line))

    # grab clothing names
    log_message("clothing: %s" % clothing_option)
    with open( join(smpl_data_folder, 'textures', '%s_%s.txt' % ( gender, idx_info['use_split'] ) ) ) as f:
        txt_paths = f.read().splitlines()

    # if using only one source of clothing
    if clothing_option == 'nongrey':
        txt_paths = [k for k in txt_paths if 'nongrey' in k]
    elif clothing_option == 'grey':
        txt_paths = [k for k in txt_paths if 'nongrey' not in k]
    
    # random clothing texture
    cloth_img_name = choice(txt_paths)
    cloth_img_name = join(smpl_data_folder, cloth_img_name)
    cloth_img = bpy.data.images.load(cloth_img_name)

    # random background
    bg_img_name = choice(nh_txt_paths)[:-1]
    bg_img = bpy.data.images.load(bg_img_name)

    log_message("Loading parts segmentation")
    beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))
    
    log_message("Building materials tree")
    mat_tree = bpy.data.materials['Material'].node_tree
    create_sh_material(mat_tree, sh_dst, cloth_img)
    res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx)

    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
    
    log_message("Initializing scene")
    camera_distance = np.random.normal(8.0, 1)
    params['camera_distance'] = camera_distance
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob
    segmented_materials = True #True: 0-24, False: expected to have 0-1 bg/fg
    
    log_message("Creating materials segmentation")
    # create material segmentation
    if segmented_materials:
        materials = create_segmentation(ob, params)
        prob_dressed = {'leftLeg':.5, 'leftArm':.9, 'leftHandIndex1':.01,
                        'rightShoulder':.8, 'rightHand':.01, 'neck':.01,
                        'rightToeBase':.9, 'leftShoulder':.8, 'leftToeBase':.9,
                        'rightForeArm':.5, 'leftHand':.01, 'spine':.9,
                        'leftFoot':.9, 'leftUpLeg':.9, 'rightUpLeg':.9,
                        'rightFoot':.9, 'head':.01, 'leftForeArm':.5,
                        'rightArm':.5, 'spine1':.9, 'hips':.9,
                        'rightHandIndex1':.01, 'spine2':.9, 'rightLeg':.5}
    else:
        materials = {'FullBody': bpy.data.materials['Material']}
        prob_dressed = {'FullBody': .6}

    orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()) - Vector((-1., 1., 1.))
    orig_cam_loc = cam_ob.location.copy()

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data, ob, obname, idx=idx, gender=gender)
    
    log_message("Loaded body data for %s" % name)
    
    nb_fshapes = len(fshapes)
    if idx_info['use_split'] == 'train':
        fshapes = fshapes[:int(nb_fshapes*0.8)]
    elif idx_info['use_split'] == 'test':
        fshapes = fshapes[int(nb_fshapes*0.8):]
    
    # pick random real body shape
    shape = choice(fshapes) #+random_shape(.5) can add noise
    #shape = random_shape(3.) # random body shape
    
    # example shapes
    #shape = np.zeros(10) #average
    #shape = np.array([ 2.25176191, -3.7883464 ,  0.46747496,  3.89178988,  2.20098416,  0.26102114, -3.07428093,  0.55708514, -3.94442258, -2.88552087]) #fat
    #shape = np.array([-2.26781107,  0.88158132, -0.93788176, -0.23480508,  1.17088298,  1.55550789,  0.44383225,  0.37688275, -0.27983086,  1.77102953]) #thin
    #shape = np.array([ 0.00404852,  0.8084637 ,  0.32332591, -1.33163664,  1.05008727,  1.60955275,  0.22372946, -0.10738459,  0.89456312, -1.22231216]) #short
    #shape = np.array([ 3.63453289,  1.20836171,  3.15674431, -0.78646793, -1.93847355, -0.32129994, -0.97771656,  0.94531640,  0.52825811, -0.99324327]) #tall

    ndofs = 10

    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname+'_Pelvis'].location).copy()

    # create output directory
    if not exists(output_path):
        mkdir_safe(output_path)

    # spherical harmonics material needs a script to be loaded and compiled
    scs = []
    for mname, material in materials.items():
        scs.append(material.node_tree.nodes['Script'])
        scs[-1].filepath = sh_dst
        scs[-1].update()

    rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1)
    rgb_path = join(tmp_path, rgb_dirname)

    data = cmu_parms[name]
    
    fbegin = ishape*stepsize*stride
    fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
    
    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    # force recomputation of joint angles unless shape is all zeros
    curr_shape = np.zeros_like(shape)
    nframes = len(data['poses'][::stepsize])

    matfile_info = join(output_path, name.replace(" ", "") + "_c%04d_info.mat" % (ishape+1))
    log_message('Working on %s' % matfile_info)

    # allocate
    dict_info = {}
    dict_info['bg'] = np.zeros((N,), dtype=np.object) # background image path
    dict_info['camLoc'] = np.empty(3) # (1, 3)
    dict_info['clipNo'] = ishape +1
    dict_info['cloth'] = np.zeros((N,), dtype=np.object) # clothing texture image path
    dict_info['gender'] = np.empty(N, dtype='uint8') # 0 for male, 1 for female
    dict_info['joints2D'] = np.empty((2, 24, N), dtype='float32') # 2D joint positions in pixel space
    dict_info['joints3D'] = np.empty((3, 24, N), dtype='float32') # 3D joint positions in world coordinates
    dict_info['light'] = np.empty((9, N), dtype='float32')
    dict_info['pose'] = np.empty((data['poses'][0].size, N), dtype='float32') # joint angles from SMPL (CMU)
    dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
    dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
    dict_info['zrot'] = np.empty(N, dtype='float32')
    dict_info['camDist'] = camera_distance
    dict_info['stride'] = stride

    if name.replace(" ", "").startswith('h36m'):
        dict_info['source'] = 'h36m'
    else:
        dict_info['source'] = 'cmu'

    if(output_types['vblur']):
        dict_info['vblur_factor'] = np.empty(N, dtype='float32')

    # for each clipsize'th frame in the sequence
    get_real_frame = lambda ifr: ifr
    random_zrot = 0
    reset_loc = False
    batch_it = 0
    curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene,
                                       cam_ob, smpl_data['regression_verts'], smpl_data['joint_regressor'])
    random_zrot = 2*np.pi*np.random.rand()
    
    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()
    arm_ob.rotation_euler.x -= math.pi / 2

    # create a keyframe animation with pose, translation, blendshapes and camera motion
    # LOOP TO CREATE 3D ANIMATION
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame
        scene.frame_set(get_real_frame(seq_frame))

        # apply the translation, pose and shape to the character
        apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname, scene, cam_ob, get_real_frame(seq_frame))
        dict_info['shape'][:, iframe] = shape[:ndofs]
        dict_info['pose'][:, iframe] = pose
        dict_info['gender'][iframe] = list(genders)[list(genders.values()).index(gender)]
        if(output_types['vblur']):
            dict_info['vblur_factor'][iframe] = vblur_factor

        arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))
        arm_ob.pose.bones[obname+'_root'].keyframe_insert('rotation_quaternion', frame=get_real_frame(seq_frame))
        dict_info['zrot'][iframe] = random_zrot

        scene.update()

        # Bodies centered only in each minibatch of clipsize frames
        if seq_frame == 0 or reset_loc: 
            reset_loc = False
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()
            rotated_orig = Vector([orig_pelvis_loc.copy()[0], orig_pelvis_loc.copy()[2], -orig_pelvis_loc.copy()[1]])
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() - rotated_orig.copy())
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
            dict_info['camLoc'] = np.array(cam_ob.location)

    scene.node_tree.nodes['Image'].image = bg_img

    for part, material in materials.items():
        material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (0, 0)

    # random light
    sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
    sh_coeffs[0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum  is ambient. Rest is uniformly distributed, higher means brighter.
    sh_coeffs[1] = -.7 * np.random.rand()

    for ish, coeff in enumerate(sh_coeffs):
        for sc in scs:
            sc.inputs[ish+1].default_value = coeff

    # iterate over the keyframes and render
    # LOOP TO RENDER
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        scene.frame_set(get_real_frame(seq_frame))
        iframe = seq_frame

        dict_info['bg'][iframe] = bg_img_name
        dict_info['cloth'][iframe] = cloth_img_name
        dict_info['light'][:, iframe] = sh_coeffs

        scene.render.use_antialiasing = False
        scene.render.filepath = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))

        log_message("Rendering frame %d" % seq_frame)
        
        # disable render output
        logfile = '/dev/null'
        open(logfile, 'a').close()
        old = os.dup(1)
        sys.stdout.flush()
        os.close(1)
        os.open(logfile, os.O_WRONLY)

        # Render
        bpy.ops.render.render(write_still=True)

        # disable output redirection
        os.close(1)
        os.dup(old)
        os.close(old)

        # NOTE:
        # ideally, pixels should be readable from a viewer node, but I get only zeros
        # --> https://ammous88.wordpress.com/2015/01/16/blender-access-render-results-pixels-directly-from-python-2/
        # len(np.asarray(bpy.data.images['Render Result'].pixels) is 0
        # Therefore we write them to temporary files and read with OpenEXR library (available for python2) in main_part2.py
        # Alternatively, if you don't want to use OpenEXR library, the following commented code does loading with Blender functions, but it can cause memory leak.
        # If you want to use it, copy necessary lines from main_part2.py such as definitions of dict_normal, matfile_normal...

        #for k, folder in res_paths.items():
        #   if not k== 'vblur' and not k=='fg':
        #       path = join(folder, 'Image%04d.exr' % get_real_frame(seq_frame))
        #       render_img = bpy.data.images.load(path)
        #       # render_img.pixels size is width * height * 4 (rgba)
        #       arr = np.array(render_img.pixels[:]).reshape(resx, resy, 4)[::-1,:, :] # images are vertically flipped 
        #       if k == 'normal':# 3 channels, original order
        #           mat = arr[:,:, :3]
        #           dict_normal['normal_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'gtflow':
        #           mat = arr[:,:, 1:3]
        #           dict_gtflow['gtflow_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'depth':
        #           mat = arr[:,:, 0]
        #           dict_depth['depth_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'segm':
        #           mat = arr[:,:,0]
        #           dict_segm['segm_%d' % (iframe + 1)] = mat.astype(np.uint8, copy=False)
        #
        #       # remove the image to release memory, object handles, etc.
        #       render_img.user_clear()
        #       bpy.data.images.remove(render_img)

        # bone locations should be saved after rendering so that the bones are updated
        bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene, cam_ob)
        dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
        dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)

        reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any()
        arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion((1, 0, 0, 0))

    # save a .blend file for debugging:
    # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))
    
    # save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3)
    cmd_ffmpeg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(rgb_path, 'Image%04d.png'), join(output_path, name.replace(' ', '')), (ishape + 1))
    log_message("Generating RGB video (%s)" % cmd_ffmpeg)
    os.system(cmd_ffmpeg)
    
    if(output_types['vblur']):
        cmd_ffmpeg_vblur = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" ''%s_c%04d.mp4''' % (join(res_paths['vblur'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_vblur'), (ishape + 1))
        log_message("Generating vblur video (%s)" % cmd_ffmpeg_vblur)
        os.system(cmd_ffmpeg_vblur)
   
    if(output_types['fg']):
        cmd_ffmpeg_fg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(res_paths['fg'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_fg'), (ishape + 1))
        log_message("Generating fg video (%s)" % cmd_ffmpeg_fg)
        os.system(cmd_ffmpeg_fg)
   
    cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname, tmp_path, rgb_dirname)
    log_message("Tarballing the images (%s)" % cmd_tar)
    os.system(cmd_tar)
    
    # save annotation excluding png/exr data to _info.mat file
    import scipy.io
    scipy.io.savemat(matfile_info, dict_info, do_compression=True)
예제 #3
0
def main():
    # time logging
    global start_time
    start_time = time.time()

    import argparse

    # parse commandline arguments
    log_message(sys.argv)
    parser = argparse.ArgumentParser(
        description='Generate synth dataset images.')
    parser.add_argument('--idx',
                        type=int,
                        help='idx of the requested sequence')
    parser.add_argument('--name',
                        type=str,
                        help='name of the requested sequence')
    parser.add_argument('--ishape',
                        type=int,
                        help='requested cut, according to the stride')
    parser.add_argument('--stride', type=int, help='stride amount, default 50')
    parser.add_argument('--direction',
                        type=str,
                        help='subject direction, default forward')
    parser.add_argument('--subject_id',
                        type=int,
                        help='local subject id, default 0')

    args = parser.parse_args(sys.argv[sys.argv.index("---") + 1:])

    idx = args.idx
    name = args.name
    ishape = args.ishape
    stride = args.stride
    direction = args.direction
    subject_id = args.subject_id

    log_message("input idx: %d" % idx)
    log_message("input name: %s" % name)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)
    log_message("Subject direction: %s" % direction)
    log_message("Local subject id: %d" % subject_id)

    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50

    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))
    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))

    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)

    for dic in idx_info:
        if dic['name'] == name:
            idx_info = dic
            break
    else:
        idx_info = idx_info[idx]

    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    #log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    resy = params['resy']
    resx = params['resx']
    clothing_option = params['clothing_option']  # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    # compute number of cuts
    nb_ishape = max(
        1, int(np.ceil(
            (idx_info['nb_frames'] - (clipsize - stride)) / stride)))
    log_message("Max ishape: %d" % (nb_ishape - 1))

    if ishape == None:
        exit(1)

    assert (ishape < nb_ishape)

    # name is set given idx
    name = idx_info['name']
    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    params['output_path'] = output_path
    tmp_path = join(
        tmp_path,
        'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
    params['tmp_path'] = tmp_path

    # check if already computed
    #  + clean up existing tmp folders if any
    if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
        os.system('rm -rf %s' % tmp_path)
    rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace(
        ' ', '')), (ishape + 1))

    # create tmp directory
    if not exists(tmp_path):
        mkdir_safe(tmp_path)

    # >> don't use random generator before this point <<

    # initialize RNG with seeds from sequence id
    import hashlib
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10**
                                                                          8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    if (output_types['vblur']):
        vblur_factor = np.random.normal(0.5, 0.5)
        params['vblur_factor'] = vblur_factor

    log_message("Setup Blender")

    # create copy-spher.harm. directory if not exists
    sh_dir = join(tmp_path, 'spher_harm')
    if not exists(sh_dir):
        mkdir_safe(sh_dir)
    sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
    os.system('cp spher_harm/sh.osl %s' % sh_dst)

    genders = {0: 'male', 1: 'female'}
    # pick random gender
    gender = genders[sum(divmod(subject_id, 2)) %
                     2]  #genders[subject_id % 2]#choice(genders)

    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    log_message("Listing background images")
    #bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
    bg_names = join(bg_path, 'bg.txt')
    nh_txt_paths = []
    with open(bg_names) as f:
        for line in f:
            nh_txt_paths.append(join(bg_path, line))

    # grab clothing names
    log_message("clothing: %s" % clothing_option)
    with open(join(smpl_data_folder, 'textures',
                   '%s_train.txt' % gender)) as f:
        txt_paths = f.read().splitlines()

    # if using only one source of clothing
    if clothing_option == 'nongrey':
        txt_paths = [k for k in txt_paths if 'nongrey' in k]
    elif clothing_option == 'grey':
        txt_paths = [k for k in txt_paths if 'nongrey' not in k]

    # random clothing texture
    cloth_img_name = choice(
        txt_paths)  #txt_paths[subject_id]#choice(txt_paths)
    cloth_img_name = join(smpl_data_folder, cloth_img_name)
    cloth_img = bpy.data.images.load(cloth_img_name)

    # random background
    bg_img_name = choice(nh_txt_paths)[:-1]
    bg_img = bpy.data.images.load(bg_img_name)

    log_message("Loading parts segmentation")
    beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))

    log_message("Building materials tree")
    mat_tree = bpy.data.materials['Material'].node_tree
    create_sh_material(mat_tree, sh_dst, cloth_img)
    res_paths = create_composite_nodes(scene.node_tree,
                                       params,
                                       img=bg_img,
                                       idx=idx)

    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))

    log_message("Initializing scene")
    camera_distance = 11.0  #np.random.normal(8.0, 1)
    params['camera_distance'] = camera_distance
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob
    segmented_materials = True  #True: 0-24, False: expected to have 0-1 bg/fg

    log_message("Creating materials segmentation")
    # create material segmentation
    if segmented_materials:
        materials = create_segmentation(ob, params)
        prob_dressed = {
            'leftLeg': .5,
            'leftArm': .9,
            'leftHandIndex1': .01,
            'rightShoulder': .8,
            'rightHand': .01,
            'neck': .01,
            'rightToeBase': .9,
            'leftShoulder': .8,
            'leftToeBase': .9,
            'rightForeArm': .5,
            'leftHand': .01,
            'spine': .9,
            'leftFoot': .9,
            'leftUpLeg': .9,
            'rightUpLeg': .9,
            'rightFoot': .9,
            'head': .01,
            'leftForeArm': .5,
            'rightArm': .5,
            'spine1': .9,
            'hips': .9,
            'rightHandIndex1': .01,
            'spine2': .9,
            'rightLeg': .5
        }
    else:
        materials = {'FullBody': bpy.data.materials['Material']}
        prob_dressed = {'FullBody': .6}

    orig_pelvis_loc = None
    random_zrot = get_zrot(name, direction)
    if direction == 'forward':
        orig_pelvis_loc = (
            arm_ob.matrix_world.copy() *
            arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector(
                (-1., 0.75, -1.3))
    elif direction == 'backward':
        orig_pelvis_loc = (
            arm_ob.matrix_world.copy() *
            arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector(
                (-1., 0.75, 3.1))

    orig_cam_loc = cam_ob.location.copy()
    print("CAM LOC:", orig_cam_loc, type(orig_cam_loc))

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data,
                                              ob,
                                              obname,
                                              name,
                                              gender=gender)

    log_message("Loaded body data for %s" % name)

    nb_fshapes = len(fshapes)
    #if idx_info['use_split'] == 'train':
    #    fshapes = fshapes[:int(nb_fshapes*0.8)]
    #elif idx_info['use_split'] == 'test':
    #    fshapes = fshapes[int(nb_fshapes*0.8):]

    # pick random real body shape
    shape = fshapes[
        subject_id %
        nb_fshapes]  #+random_shape(.5)#choice(fshapes) #+random_shape(.5) can add noise
    #shape = random_shape(3.) # random body shape

    ndofs = 10

    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname +
                                              '_Pelvis'].location).copy()

    # create output directory
    if not exists(output_path):
        mkdir_safe(output_path)

    # spherical harmonics material needs a script to be loaded and compiled
    scs = []
    for mname, material in materials.items():
        scs.append(material.node_tree.nodes['Script'])
        scs[-1].filepath = sh_dst
        scs[-1].update()

    rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1)
    rgb_path = join(tmp_path, rgb_dirname)

    data = cmu_parms[name]
    data = cut_sequence(name, data)

    fbegin = ishape * stepsize * stride
    fend = min(ishape * stepsize * stride + stepsize * clipsize,
               len(data['poses']))

    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    # force recomputation of joint angles unless shape is all zeros
    curr_shape = np.zeros_like(shape)
    nframes = len(data['poses'][::stepsize])

    matfile_info = join(
        output_path,
        name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1))
    log_message('Working on %s' % matfile_info)

    # allocate
    dict_info = {}
    dict_info['bg'] = np.zeros((N, ), dtype=np.object)  # background image path
    dict_info['camLoc'] = np.empty(3)  # (1, 3)
    dict_info['clipNo'] = ishape + 1
    dict_info['cloth'] = np.zeros(
        (N, ), dtype=np.object)  # clothing texture image path
    dict_info['gender'] = np.empty(N,
                                   dtype='uint8')  # 0 for male, 1 for female
    dict_info['joints2D'] = np.empty(
        (2, 24, N), dtype='float32')  # 2D joint positions in pixel space
    dict_info['joints3D'] = np.empty(
        (3, 24, N), dtype='float32')  # 3D joint positions in world coordinates
    dict_info['light'] = np.empty((9, N), dtype='float32')
    dict_info['pose'] = np.empty(
        (data['poses'][0].size, N),
        dtype='float32')  # joint angles from SMPL (CMU)
    dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
    dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
    dict_info['zrot'] = np.empty(N, dtype='float32')
    dict_info['camDist'] = camera_distance
    dict_info['stride'] = stride

    if name.replace(" ", "").startswith('h36m'):
        dict_info['source'] = 'h36m'
    else:
        dict_info['source'] = 'cmu'

    if (output_types['vblur']):
        dict_info['vblur_factor'] = np.empty(N, dtype='float32')

    # for each clipsize'th frame in the sequence
    get_real_frame = lambda ifr: ifr
    reset_loc = False
    batch_it = 0
    curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname,
                                       scene, cam_ob,
                                       smpl_data['regression_verts'],
                                       smpl_data['joint_regressor'])
    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()

    # create a keyframe animation with pose, translation, blendshapes and camera motion
    # LOOP TO CREATE 3D ANIMATION
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame
        scene.frame_set(get_real_frame(seq_frame))

        # Change shape
        if is_arbitrary_shape and iframe % 2 == 0:
            shape = choice(fshapes)
            shape += np.random.normal(0, .1, shape.shape)

        # apply the translation, pose and shape to the character
        apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname,
                               scene, cam_ob, get_real_frame(seq_frame))
        dict_info['shape'][:, iframe] = shape[:ndofs]
        dict_info['pose'][:, iframe] = pose
        dict_info['gender'][iframe] = list(genders)[list(
            genders.values()).index(gender)]
        if (output_types['vblur']):
            dict_info['vblur_factor'][iframe] = vblur_factor

        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            Euler((0, 0, random_zrot), 'XYZ'))
        arm_ob.pose.bones[obname + '_root'].keyframe_insert(
            'rotation_quaternion', frame=get_real_frame(seq_frame))
        dict_info['zrot'][iframe] = random_zrot

        scene.update()

        # Bodies centered only in each minibatch of clipsize frames
        if seq_frame == 0 or reset_loc:
            reset_loc = False
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() -
                                                     orig_pelvis_loc.copy())
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
            dict_info['camLoc'] = np.array(cam_ob.location)

    scene.node_tree.nodes['Image'].image = bg_img

    for part, material in materials.items():
        material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (
            0, 0)

    # random light
    sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
    sh_coeffs[0] = .5 + .9 * np.random.rand(
    )  # Ambient light (first coeff) needs a minimum  is ambient. Rest is uniformly distributed, higher means brighter.
    sh_coeffs[1] = -.7 * np.random.rand()

    for ish, coeff in enumerate(sh_coeffs):
        for sc in scs:
            sc.inputs[ish + 1].default_value = coeff

    # iterate over the keyframes and render
    # LOOP TO RENDER
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        scene.frame_set(get_real_frame(seq_frame))
        iframe = seq_frame

        dict_info['bg'][iframe] = bg_img_name
        dict_info['cloth'][iframe] = cloth_img_name
        dict_info['light'][:, iframe] = sh_coeffs

        img_path = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))
        scene.render.use_antialiasing = False
        scene.render.filepath = img_path

        log_message("Rendering frame %d" % seq_frame)

        # disable render output
        logfile = '/dev/null'
        open(logfile, 'a').close()
        old = os.dup(1)
        sys.stdout.flush()
        os.close(1)
        os.open(logfile, os.O_WRONLY)

        # Render
        bpy.ops.render.render(write_still=True)

        # disable output redirection
        os.close(1)
        os.dup(old)
        os.close(old)

        # bone locations should be saved after rendering so that the bones are updated
        bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene,
                                                   cam_ob)
        dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
        dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)

        #Draw skeleton
        if is_visualization:
            draw_skeleton(img_path, dict_info['joints2D'][:, :, iframe])

        reset_loc = (bone_locs_2D.max(axis=-1) >
                     256).any() or (bone_locs_2D.min(axis=0) < 0).any()
        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            (1, 0, 0, 0))

    # save a .blend file for debugging:
    # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))

    # save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3)
    cmd_ffmpeg = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 ' '%s_c%04d.mp4' '' % (
        join(rgb_path, 'Image%04d.png'),
        join(output_path, name.replace(' ', '')), (ishape + 1))
    log_message("Generating RGB video (%s)" % cmd_ffmpeg)
    os.system(cmd_ffmpeg)

    if (output_types['vblur']):
        cmd_ffmpeg_vblur = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" ' '%s_c%04d.mp4' '' % (
            join(res_paths['vblur'], 'Image%04d.png'),
            join(output_path,
                 name.replace(' ', '') + '_vblur'), (ishape + 1))
        log_message("Generating vblur video (%s)" % cmd_ffmpeg_vblur)
        os.system(cmd_ffmpeg_vblur)

    if (output_types['fg']):
        cmd_ffmpeg_fg = 'ffmpeg -y -r 25 -i ' '%s' ' -c:v h264 -pix_fmt yuv420p -crf 23 ' '%s_c%04d.mp4' '' % (
            join(res_paths['fg'], 'Image%04d.png'),
            join(output_path,
                 name.replace(' ', '') + '_fg'), (ishape + 1))
        log_message("Generating fg video (%s)" % cmd_ffmpeg_fg)
        os.system(cmd_ffmpeg_fg)

    cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname,
                                                   tmp_path, rgb_dirname)
    log_message("Tarballing the images (%s)" % cmd_tar)
    os.system(cmd_tar)

    # save annotation excluding png/exr data to _info.mat file
    import scipy.io
    scipy.io.savemat(matfile_info, dict_info, do_compression=True)
예제 #4
0
    log_message("start part 2")

    import hashlib
    import random
    # initialize random seeds with sequence id
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10**
                                                                          8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    # import configuration
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    resy = params['resy']
    resx = params['resx']
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    # check whether openexr_py2_path is loaded from configuration file
    if 'openexr_py2_path' in locals() or 'openexr_py2_path' in globals():
        for exr_path in openexr_py2_path.split(':'):
예제 #5
0
def main():
    # time logging
    global start_time
    start_time = time.time()

    import argparse

    # parse commandline arguments
    #log_message(sys.argv)
    parser = argparse.ArgumentParser(
        description='Generate synth dataset images.')
    parser.add_argument('--idx',
                        type=int,
                        help='idx of the requested sequence')
    parser.add_argument('--ishape',
                        type=int,
                        help='requested cut, according to the stride')
    parser.add_argument('--stride', type=int, help='stride amount, default 50')
    parser.add_argument('--gender', type=str, help='gender {male, female}')
    parser.add_argument(
        '--body_shape_idx',
        type=int,
        help=
        'body shape idx (height, weight etc.) < 1682 for female, < 1360 for male)'
    )
    parser.add_argument('--outdir', type=str, help='out directory')

    args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])

    idx = args.idx
    ishape = args.ishape
    stride = args.stride
    gender = args.gender
    body_shape_idx = args.body_shape_idx
    outdir = args.outdir

    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)
    log_message("gender: %s" % gender)
    log_message("body_shape_idx: %d" % body_shape_idx)

    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50

    # Custom checks for specially added fields (gender & body_shape_idx)
    if gender is 'male':
        assert body_shape_idx < 1360
    elif gender is 'female':
        assert body_shape_idx < 1682
    else:
        assert (gender in ['male', 'female'])

    idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))

    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)
    idx_info = idx_info[idx]
    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    clothing_option = params['clothing_option']  # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']

    # compute number of cuts
    nb_ishape = max(
        1, int(np.ceil(
            (idx_info['nb_frames'] - (clipsize - stride)) / stride)))
    log_message("Max ishape: %d" % (nb_ishape - 1))

    if ishape == None:
        exit(1)

    assert (ishape < nb_ishape)

    # name is set given idx
    name = idx_info['name']
    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    params['output_path'] = output_path
    tmp_path = join(
        tmp_path,
        'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
    params['tmp_path'] = tmp_path

    # check if already computed
    #  + clean up existing tmp folders if any
    if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
        os.system('rm -rf %s' % tmp_path)

    # >> don't use random generator before this point <<

    # initialize RNG with seeds from sequence id
    import hashlib
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10**
                                                                          8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    if (output_types['vblur']):
        vblur_factor = np.random.normal(0.5, 0.5)
        params['vblur_factor'] = vblur_factor

    log_message("Setup Blender")

    genders = {0: 'female', 1: 'male'}

    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    # grab clothing names
    log_message("clothing: %s" % clothing_option)
    with open(
            join(smpl_data_folder, 'textures',
                 '%s_%s.txt' % (gender, idx_info['use_split']))) as f:
        txt_paths = f.read().splitlines()

    # if using only one source of clothing
    if clothing_option == 'nongrey':
        txt_paths = [k for k in txt_paths if 'nongrey' in k]
    elif clothing_option == 'grey':
        txt_paths = [k for k in txt_paths if 'nongrey' not in k]

    # random clothing texture
    cloth_img_name = choice(txt_paths)
    cloth_img_name = join(smpl_data_folder, cloth_img_name)

    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))

    log_message("Initializing scene")
    camera_distance = 4.0  # Not rendering images so camera distance can be any #
    params['camera_distance'] = camera_distance
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob

    orig_pelvis_loc = (
        arm_ob.matrix_world.copy() *
        arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector(
            (-1., 1., 1.))
    orig_cam_loc = cam_ob.location.copy()

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data,
                                              ob,
                                              obname,
                                              idx=idx,
                                              gender=gender)

    log_message("Loaded body data for %s" % name)

    nb_fshapes = len(fshapes)

    # Force the train split
    fshapes = fshapes[:int(nb_fshapes * 0.8)]

    shape = fshapes[body_shape_idx]

    # example shapes
    #shape = np.zeros(10) #average
    #shape = np.array([ 2.25176191, -3.7883464 ,  0.46747496,  3.89178988,  2.20098416,  0.26102114, -3.07428093,  0.55708514, -3.94442258, -2.88552087]) #fat
    #shape = np.array([-2.26781107,  0.88158132, -0.93788176, -0.23480508,  1.17088298,  1.55550789,  0.44383225,  0.37688275, -0.27983086,  1.77102953]) #thin
    #shape = np.array([ 0.00404852,  0.8084637 ,  0.32332591, -1.33163664,  1.05008727,  1.60955275,  0.22372946, -0.10738459,  0.89456312, -1.22231216]) #short
    #shape = np.array([ 3.63453289,  1.20836171,  3.15674431, -0.78646793, -1.93847355, -0.32129994, -0.97771656,  0.94531640,  0.52825811, -0.99324327]) #tall

    ndofs = 10

    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname +
                                              '_Pelvis'].location).copy()

    data = cmu_parms[name]

    fbegin = ishape * stepsize * stride
    fend = min(ishape * stepsize * stride + stepsize * clipsize,
               len(data['poses']))

    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    # force recomputation of joint angles unless shape is all zeros
    curr_shape = np.zeros_like(shape)

    matfile_info = join(
        output_path,
        name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1))
    log_message('Working on %s' % matfile_info)

    # allocate
    dict_info = {}
    dict_info['bg'] = np.zeros((N, ), dtype=np.object)  # background image path
    dict_info['camLoc'] = np.empty(3)  # (1, 3)
    dict_info['clipNo'] = ishape + 1
    dict_info['cloth'] = np.zeros(
        (N, ), dtype=np.object)  # clothing texture image path
    dict_info['gender'] = np.empty(N,
                                   dtype='uint8')  # 0 for male, 1 for female
    dict_info['joints2D'] = np.empty(
        (2, 24, N), dtype='float32')  # 2D joint positions in pixel space
    dict_info['joints3D'] = np.empty(
        (3, 24, N), dtype='float32')  # 3D joint positions in world coordinates
    dict_info['light'] = np.empty((9, N), dtype='float32')
    dict_info['pose'] = np.empty(
        (data['poses'][0].size, N),
        dtype='float32')  # joint angles from SMPL (CMU)
    dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
    dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
    dict_info['zrot'] = np.empty(N, dtype='float32')
    dict_info['camDist'] = camera_distance
    dict_info['stride'] = stride

    # Note: Necessary for the HumANav dataset to canonically center the human.
    dict_info['rightFootPos'] = np.empty((3, N), dtype='float32')
    dict_info['rightToePos'] = np.empty((3, N), dtype='float32')
    dict_info['leftFootPos'] = np.empty((3, N), dtype='float32')
    dict_info['leftToePos'] = np.empty((3, N), dtype='float32')

    if name.replace(" ", "").startswith('h36m'):
        dict_info['source'] = 'h36m'
    else:
        dict_info['source'] = 'cmu'

    if (output_types['vblur']):
        dict_info['vblur_factor'] = np.empty(N, dtype='float32')

    # for each clipsize'th frame in the sequence
    get_real_frame = lambda ifr: ifr
    random_zrot = 0
    reset_loc = False
    curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname,
                                       scene, cam_ob,
                                       smpl_data['regression_verts'],
                                       smpl_data['joint_regressor'])
    random_zrot = 2 * np.pi * np.random.rand()

    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()

    # Needed to extracting toe and foot position & direction
    with open('pkl/segm_per_v_overlap.pkl', 'rb') as f:
        vsegm = load(f)

    # Where the meshes and centering information is stored
    base_dir = outdir
    vs = np.arange(0, 1.85, .2)

    # VS corresponds to the approximate velocity of the human (computer heuristically from mocap data), but after examining
    # the actual data we decided to rebin the estimated human velocity for more realistic visual cues.
    rebinned_vs = np.array([0., .2, .5, .6])
    velocity_folders = make_velocity_dirs(base_dir, rebinned_vs)
    pose_ishape_stride_str = 'pose_{:d}_ishape_{:d}_stride_{:d}'.format(
        idx, ishape, stride)
    body_shape_str = 'body_shape_{:d}'.format(body_shape_idx)
    gender_str = gender

    # create a keyframe animation with pose, translation, blendshapes and camera motion
    # LOOP TO CREATE 3D ANIMATION
    dt = 1. / 30.
    prev_human_pos_3 = None
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame
        scene.frame_set(get_real_frame(seq_frame))

        # apply the translation, pose and shape to the character
        apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname,
                               scene, cam_ob, get_real_frame(seq_frame))

        dict_info['shape'][:, iframe] = shape[:ndofs]
        dict_info['pose'][:, iframe] = pose
        dict_info['gender'][iframe] = list(genders)[list(
            genders.values()).index(gender)]
        if (output_types['vblur']):
            dict_info['vblur_factor'][iframe] = vblur_factor

        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            Euler((0, 0, random_zrot), 'XYZ'))
        arm_ob.pose.bones[obname + '_root'].keyframe_insert(
            'rotation_quaternion', frame=get_real_frame(seq_frame))
        dict_info['zrot'][iframe] = random_zrot

        scene.update()

        # Bodies centered only in each minibatch of clipsize frames
        if seq_frame == 0 or reset_loc:
            reset_loc = False
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() -
                                                     orig_pelvis_loc.copy())
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
            dict_info['camLoc'] = np.array(cam_ob.location)

        # Compute the human (x, y, theta) position based on the location and orientation
        # of its feet
        human_pos_3 = compute_human_pos_3(scene, vsegm, ob)
        human_speed = compute_human_speed(trans, prev_human_pos_3, dt=dt)
        prev_human_pos_3 = trans
        centering_data = {'human_pos_3': human_pos_3}

        # If we can't estimate the speed from finite differencing (i.e. at the first timestep)
        # do not save the mesh
        if human_speed is not None:
            print(human_speed)
            # Compute the closest velocity bin and the corresponding folder name
            # it will be None if the human_speed is not in the range [vmin, vmax]
            velocity_folder = compute_velocity_folder(human_speed,
                                                      vs,
                                                      rebinned_vs,
                                                      velocity_folders,
                                                      vmax=1.85,
                                                      vmin=0.0)

            if velocity_folder is not None:
                human_data_output_folder = os.path.join(
                    velocity_folder, pose_ishape_stride_str, body_shape_str,
                    gender_str)
                if not os.path.exists(human_data_output_folder):
                    os.makedirs(human_data_output_folder)

                centeringFile = os.path.join(
                    human_data_output_folder,
                    'human_centering_info_{:d}.pkl'.format(seq_frame))
                with open(centeringFile, 'wb') as f:
                    pickle.dump(centering_data, f)

                # Exporting Human Mesh Here for Use in HumANav
                meshFile = os.path.join(
                    human_data_output_folder,
                    'human_mesh_{:d}.obj'.format(seq_frame))
                bpy.ops.export_scene.obj(filepath=meshFile,
                                         keep_vertex_order=True,
                                         group_by_object=True)
    os._exit(0)
예제 #6
0
def main():
    # time logging
    global start_time
    start_time = time.time()

    import argparse

    # parse commandline arguments
    log_message(sys.argv)
    parser = argparse.ArgumentParser(
        description='Generate synth dataset images.')
    parser.add_argument('--idx',
                        type=int,
                        help='idx of the requested sequence')
    parser.add_argument('--ishape',
                        type=int,
                        help='requested cut, according to the stride')
    parser.add_argument('--stride', type=int, help='stride amount, default 50')
    parser.add_argument('--index',
                        type=int,
                        help='index of the texture in all.txt file')
    parser.add_argument('--img_number',
                        type=int,
                        help='index of the image that is stored')

    args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])

    idx = args.idx
    ishape = args.ishape
    stride = args.stride
    index = args.index
    img_number = args.img_number

    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)
    log_message("input index: %d" % index)
    log_message("input img_number: %d" % img_number)

    if idx is None:
        exit(1)
    if ishape is None:
        exit(1)
    if stride is None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50

    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))

    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)
    idx_info = idx_info[idx]
    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    resy = params['resy']
    resx = params['resx']
    clothing_option = params['clothing_option']  # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    # compute number of cuts
    nb_ishape = max(
        1, int(np.ceil(
            (idx_info['nb_frames'] - (clipsize - stride)) / stride)))
    log_message("Max ishape: %d" % (nb_ishape - 1))

    if ishape is None:
        exit(1)

    assert (ishape < nb_ishape)

    # name is set given idx
    output_path = '/tmp'
    tmp_path = '/mnt/hdd1/tanya/surreal_out/new_less_bright/Images'

    # initialize RNG with seeds from sequence id
    import hashlib
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10**
                                                                          8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    if (output_types['vblur']):
        vblur_factor = np.random.normal(0.5, 0.5)
        params['vblur_factor'] = vblur_factor

    log_message("Setup Blender")

    # create copy-spher.harm. directory if not exists
    sh_dir = join(tmp_path, 'spher_harm')
    if not exists(sh_dir):
        mkdir_safe(sh_dir)
    sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
    os.system('cp spher_harm/sh.osl %s' % sh_dst)

    genders = {0: 'female', 1: 'male'}
    # pick random gender
    gender = choice(genders)

    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    log_message("Listing background images")
    bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
    nh_txt_paths = []
    with open(bg_names) as f:
        for line in f:
            nh_txt_paths.append(join(bg_path, line))

    # grab clothing names
    log_message("clothing: %s" % clothing_option)
    with open(join(smpl_data_folder, 'textures', 'all.txt')) as f:
        txt_paths = f.read().splitlines()

    txt_paths = sorted(txt_paths)
    # if using only one source of clothing
    # if clothing_option == 'nongrey':
    # txt_paths = [k for k in txt_paths if 'nongrey' in k]
    # elif clothing_option == 'grey':
    # txt_paths = [k for k in txt_paths if 'nongrey' not in k]

    # random clothing texture
    cloth_img_name = txt_paths[index]
    gender = cloth_img_name.split('/')[1]
    cloth_img_name = join(smpl_data_folder, cloth_img_name)
    cloth_img = bpy.data.images.load(cloth_img_name)

    # random background
    bg_img_name = choice(nh_txt_paths)[:-1]
    bg_img = bpy.data.images.load(bg_img_name)

    log_message("Loading parts segmentation")
    beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))

    log_message("Building materials tree")
    mat_tree = bpy.data.materials['Material'].node_tree
    create_sh_material(mat_tree, sh_dst, cloth_img)
    res_paths = create_composite_nodes(scene.node_tree,
                                       params,
                                       img=bg_img,
                                       idx=idx)

    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))

    log_message("Initializing scene")
    camera_distance = np.random.normal(8.0, 0.01)
    params['camera_distance'] = camera_distance
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob
    segmented_materials = True  # True: 0-24, False: expected to have 0-1 bg/fg

    log_message("Creating materials segmentation")
    # create material segmentation
    if segmented_materials:
        materials = create_segmentation(ob, params)
        prob_dressed = {
            'leftLeg': .5,
            'leftArm': .9,
            'leftHandIndex1': .01,
            'rightShoulder': .8,
            'rightHand': .01,
            'neck': .01,
            'rightToeBase': .9,
            'leftShoulder': .8,
            'leftToeBase': .9,
            'rightForeArm': .5,
            'leftHand': .01,
            'spine': .9,
            'leftFoot': .9,
            'leftUpLeg': .9,
            'rightUpLeg': .9,
            'rightFoot': .9,
            'head': .01,
            'leftForeArm': .5,
            'rightArm': .5,
            'spine1': .9,
            'hips': .9,
            'rightHandIndex1': .01,
            'spine2': .9,
            'rightLeg': .5
        }
    else:
        materials = {'FullBody': bpy.data.materials['Material']}
        prob_dressed = {'FullBody': .6}

    orig_pelvis_loc = (
        arm_ob.matrix_world.copy() *
        arm_ob.pose.bones[obname + '_Pelvis'].head.copy()) - Vector(
            (-1., 1., 1.))
    orig_cam_loc = cam_ob.location.copy()

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data,
                                              ob,
                                              obname,
                                              idx=idx,
                                              gender=gender)

    log_message("Loaded body data for %s" % name)

    nb_fshapes = len(fshapes)
    if idx_info['use_split'] == 'train':
        fshapes = fshapes[:int(nb_fshapes * 0.8)]
    elif idx_info['use_split'] == 'test':
        fshapes = fshapes[int(nb_fshapes * 0.8):]

    # pick random real body shape
    shape = choice(fshapes)  # +random_shape(.5) can add noise
    # shape = random_shape(3.) # random body shape

    ndofs = 10

    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname +
                                              '_Pelvis'].location).copy()

    # create output directory
    if not exists(output_path):
        mkdir_safe(output_path)

    # spherical harmonics material needs a script to be loaded and compiled
    scs = []
    for mname, material in materials.items():
        scs.append(material.node_tree.nodes['Script'])
        scs[-1].filepath = sh_dst
        scs[-1].update()

    rgb_path = tmp_path

    data = cmu_parms[name]

    fbegin = ishape * stepsize * stride
    fend = min(ishape * stepsize * stride + stepsize * clipsize,
               len(data['poses']))

    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    # force recomputation of joint angles unless shape is all zeros
    curr_shape = np.zeros_like(shape)
    nframes = len(data['poses'][::stepsize])

    matfile_info = join(
        output_path,
        name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1))
    log_message('Working on %s' % matfile_info)

    # allocate
    dict_info = {}
    dict_info['bg'] = np.zeros((N, ), dtype=np.object)  # background image path
    dict_info['camLoc'] = np.empty(3)  # (1, 3)
    dict_info['clipNo'] = ishape + 1
    # clothing texture image path
    dict_info['cloth'] = np.zeros((N, ), dtype=np.object)
    dict_info['gender'] = np.empty(N,
                                   dtype='uint8')  # 0 for male, 1 for female
    # 2D joint positions in pixel space
    dict_info['joints2D'] = np.empty((2, 24, N), dtype='float32')
    # 3D joint positions in world coordinates
    dict_info['joints3D'] = np.empty((3, 24, N), dtype='float32')
    dict_info['light'] = np.empty((9, N), dtype='float32')
    # joint angles from SMPL (CMU)
    dict_info['pose'] = np.empty((data['poses'][0].size, N), dtype='float32')
    dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
    dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
    dict_info['zrot'] = np.empty(N, dtype='float32')
    dict_info['camDist'] = camera_distance
    dict_info['stride'] = stride

    if name.replace(" ", "").startswith('h36m'):
        dict_info['source'] = 'h36m'
    else:
        dict_info['source'] = 'cmu'

    if (output_types['vblur']):
        dict_info['vblur_factor'] = np.empty(N, dtype='float32')

    # for each clipsize'th frame in the sequence
    def get_real_frame(ifr):
        return ifr

    random_zrot = 0
    reset_loc = False
    batch_it = 0
    curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname,
                                       scene, cam_ob,
                                       smpl_data['regression_verts'],
                                       smpl_data['joint_regressor'])
    random_zrot = 2 * np.pi * np.random.rand()

    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()

    # create a keyframe animation with pose, translation, blendshapes and
    # camera motion
    # LOOP TO CREATE 3D ANIMATION
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame
        scene.frame_set(get_real_frame(seq_frame))

        # apply the translation, pose and shape to the character
        apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname,
                               scene, cam_ob, get_real_frame(seq_frame))
        dict_info['shape'][:, iframe] = shape[:ndofs]
        dict_info['pose'][:, iframe] = pose
        dict_info['gender'][iframe] = list(genders)[list(
            genders.values()).index(gender)]
        if (output_types['vblur']):
            dict_info['vblur_factor'][iframe] = vblur_factor

        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            Euler((0, 0, random_zrot), 'XYZ'))
        arm_ob.pose.bones[obname + '_root'].keyframe_insert(
            'rotation_quaternion', frame=get_real_frame(seq_frame))
        dict_info['zrot'][iframe] = random_zrot

        scene.update()

        # Bodies centered only in each minibatch of clipsize frames
        if seq_frame == 0 or reset_loc:
            reset_loc = False
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() -
                                                     orig_pelvis_loc.copy())
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
            dict_info['camLoc'] = np.array(cam_ob.location)

    scene.node_tree.nodes['Image'].image = bg_img

    for part, material in materials.items():
        material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (
            0, 0)
    '''
    # random light
    sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
    # Ambient light (first coeff) needs a minimum  is ambient. Rest is
    # uniformly distributed, higher means brighter.
    sh_coeffs[0] = .5 + .9 * np.random.rand()
    sh_coeffs[1] = -.7 * np.random.rand()
    '''
    sh_coeffs = .3 * np.ones(9)
    # sh_coeffs[1:] = 0.01

    for ish, coeff in enumerate(sh_coeffs):
        for sc in scs:
            sc.inputs[ish + 1].default_value = coeff

    # iterate over the keyframes and render
    # LOOP TO RENDER
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        scene.frame_set(get_real_frame(seq_frame))
        iframe = seq_frame

        dict_info['bg'][iframe] = bg_img_name
        dict_info['cloth'][iframe] = cloth_img_name
        dict_info['light'][:, iframe] = sh_coeffs

        scene.render.use_antialiasing = False
        scene.render.filepath = join(
            rgb_path, 'Image_{}.png'.format(img_number + seq_frame))

        log_message("Rendering frame %d" % seq_frame)
        log_message("Rendering image {}".format(rgb_path))

        # disable render output
        logfile = '/dev/null'
        open(logfile, 'a').close()
        old = os.dup(1)
        sys.stdout.flush()
        os.close(1)
        os.open(logfile, os.O_WRONLY)

        # Render
        bpy.ops.render.render(write_still=True)

        # disable output redirection
        os.close(1)
        os.dup(old)
        os.close(old)

        # bone locations should be saved after rendering so that the bones are
        # updated
        bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene,
                                                   cam_ob)
        dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
        dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)

        reset_loc = (bone_locs_2D.max(axis=-1) >
                     256).any() or (bone_locs_2D.min(axis=0) < 0).any()
        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = \
                Quaternion((1, 0, 0, 0))
def gen_one_seqs(setting, rand_noise=True):
    """
    Note that human shape only has a slight motion within each sequences,
    try to use a larger stepsize
    TODO:
    1. no scaling, as I removed the orig_trans, and each step translation
    2. shape and pose are randomly noised
    """
    ## setup basic variables
    split = setting.split
    gender = setting.gender
    idx_shape = setting.ishape
    idx_seq = setting.iseq
    idx_run = setting.irun
    idx_cloth = setting.icloth
    idx_bg = setting.ibg

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')
    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    clothing_option = params['clothing_option']  # grey, nongrey or all
    tmp_root = params['tmp_path']
    out_path = params['output_path']
    stepsize = params['stepsize']
    # motion blur
    params["vblur"] = np.random.normal(0.5, 0.5)
    for k in ["image", "depth", "mat", "mesh"]:
        if not os.path.exists(out_path + "/" + k + "/"):
            mkdir_safe(out_path + "/" + k + "/")

    # check if already computed
    #  + clean up existing tmp folders if any
    # identifier (run_$split_$gender_$idxrun_$idxseq_$idxshape_$idxframe)
    identifier = "_".join([
        "image", split, gender,
        "%03d" % idx_run,
        "%04d" % idx_seq,
        "%04d" % idx_shape
    ])
    tmp_path = join(tmp_root, identifier)
    if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
        os.system('rm -rf %s' % tmp_path)
    if not exists(tmp_path):
        mkdir_safe(tmp_path)
    res_paths = {
        k: join(tmp_root, identifier.replace("image", k))
        for k in params['output_types'] if params['output_types'][k]
    }

    # create copy-spher.harm. directory if not exists
    sh_dir = join(tmp_path, 'spher_harm')
    if not exists(sh_dir):
        mkdir_safe(sh_dir)
    sh_dst = join(sh_dir, 'sh.osl')
    os.system('cp spher_harm/sh.osl %s' % sh_dst)

    ## reset
    bpy.ops.wm.read_factory_settings()
    ### setup blender
    log_message("Setup blender")
    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    ## load all bg images
    log_message("Listing background images")
    bg_names = join(bg_path, '%s_img.txt' % split)
    nh_txt_paths = []
    with open(bg_names) as f:
        for line in f:
            nh_txt_paths.append(join(bg_path, line))

    ## grab clothing names
    clothing_option = "nongrey"
    log_message("clothing: %s" % clothing_option)
    with open(join(smpl_data_folder, 'textures',
                   '%s_%s.txt' % (gender, split))) as f:
        txt_paths = f.read().splitlines()
        txt_paths = [k for k in txt_paths if clothing_option in k]

    # random clothing texture
    idx_cloth = idx_cloth % len(txt_paths)
    cloth_img_name = txt_paths[idx_cloth]
    cloth_img_name = join(smpl_data_folder, cloth_img_name)
    cloth_img = bpy.data.images.load(cloth_img_name)

    # random background
    idx_bg = idx_bg % len(nh_txt_paths)
    bg_img_name = nh_txt_paths[idx_bg][:-1]
    bg_img = bpy.data.images.load(bg_img_name)

    ## load smpl motion data
    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))

    ## start to setup the scene
    log_message("Building materials tree")
    mat_tree = bpy.data.materials['Material'].node_tree
    create_sh_material(mat_tree, sh_dst, cloth_img)
    create_composite_nodes(scene.node_tree,
                           params,
                           res_paths,
                           img=bg_img,
                           idx=idx_seq)

    log_message("Initializing scene")
    set_salient(1)
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)
    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob
    segmented_materials = True  #True: 0-24, False: expected to have 0-1 bg/fg
    set_salient(0)

    # create material segmentation
    log_message("Creating materials segmentation")
    if segmented_materials:
        materials = create_segmentation(ob, params)
    else:
        materials = {'FullBody': bpy.data.materials['Material']}

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    # load shape, pose for the given sequence index.
    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data,
                                              ob,
                                              obname,
                                              idx=idx_seq,
                                              gender=gender)

    # pick random real body shape
    log_message("Loaded body data for %s" % name)
    if split == 'train':
        fshapes = fshapes[:int(len(fshapes) * 0.6)]
    elif split == 'val':
        fshapes = fshapes[int(len(fshapes) * 0.6):int(len(fshapes) * 0.8)]
    elif split == 'test':
        fshapes = fshapes[int(len(fshapes) * 0.8):]
    shape = fshapes[idx_shape %
                    (len(fshapes))]  #+random_shape(.5) can add noise

    ## setup the pelvis to origin for ease
    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname +
                                              '_Pelvis'].location).copy()

    # create output directory
    if not exists(out_path):
        mkdir_safe(out_path)

    # spherical harmonics material needs a script to be loaded and compiled
    scs = []
    for mname, material in materials.items():
        scs.append(material.node_tree.nodes['Script'])
        scs[-1].filepath = sh_dst
        scs[-1].update()

    data = cmu_parms[name]
    fbegin = 0
    fend = len(data['poses']) // 50 * 50
    stepsize = len(data['poses']) // 50
    #  stepsize*min(stepsize, 150)

    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    ## clearup, and put the origin at zeros
    orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy())\
            - Vector((-1., 1., 1.))
    orig_cam_loc = cam_ob.location.copy()
    reset_joint_positions(Vector((0, 0, 0)), shape, ob, arm_ob, obname, scene,
                          cam_ob, smpl_data['regression_verts'],
                          smpl_data['joint_regressor'])
    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()

    ### lighting and rendering parameters
    scene.node_tree.nodes['Image'].image = bg_img
    ## lighting random:
    sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
    # Ambient light (first coeff) needs a minimum  is ambient. Rest is uniformly distributed, higher means brighter.
    sh_coeffs[0] = .5 + .9 * np.random.rand()
    sh_coeffs[1] = -.7 * np.random.rand()
    # TODO: what's the function??
    for part, material in materials.items():
        material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (
            0, 0)
    for ish, coeff in enumerate(sh_coeffs):
        for sc in scs:
            sc.inputs[ish + 1].default_value = coeff
    get_real_frame = lambda ifr: ifr

    shape_orig = shape
    # iterate over the keyframes and render
    # LOOP TO RENDER
    render_path = res_paths["image"]
    for seq_frame, (pose, trans) in enumerate(
            zip(data['poses'][fbegin:fend:stepsize],
                data['trans'][fbegin:fend:stepsize])):
        ## random set pose or shape
        pose = pose + (np.random.rand(len(pose)) * 2 - 1) * 0.1 * rand_noise
        shape = shape_orig + (np.random.rand(10) * 2 - 1) * 0.05 * rand_noise

        # disable render output
        set_salient(1)
        scene.frame_set(get_real_frame(seq_frame))

        # apply the translation, pose and shape to the character
        # shape at the origin, and translate the camera inversly
        apply_trans_pose_shape(Vector((0, 0, 0)), pose, shape, ob,
                               arm_ob, obname, scene, cam_ob,
                               get_real_frame(seq_frame))
        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            Euler((0, 0, 0), 'XYZ'))
        arm_ob.pose.bones[obname + '_root'].keyframe_insert(
            'rotation_quaternion', frame=get_real_frame(seq_frame))
        scene.update()

        if seq_frame == 0:
            # Bodies centered only in each minibatch of clipsize frames
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() -
                                                     orig_pelvis_loc.copy())
            #  cam_ob.location = orig_cam_loc.copy() - Vector(trans)
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))

        # Render the view from all given cameras
        log_message("Rendering frame %d" % seq_frame)
        scene.render.use_antialiasing = False
        res_paths[
            "image"] = render_path + "_%04d.png" % get_real_frame(seq_frame)
        scene.render.filepath = res_paths["image"]
        bpy.ops.render.render(write_still=True)

        # disable output redirection
        set_salient(0)

        # each frame data as a mat file
        ## save key information (bg, cloth, light, shape, pose, vblur, gender, split)
        dict_info = {}
        dict_info['bg'] = bg_img_name
        dict_info['cloth'] = cloth_img_name
        dict_info['light'] = sh_coeffs
        dict_info['cam_loc'] = np.array(cam_ob.location)
        dict_info['pose'] = pose
        dict_info['shape'] = shape
        dict_info['split'] = split
        dict_info['gender'] = gender
        dict_info['seqs'] = np.array([idx_seq, idx_shape, stepsize], np.int32)
        dict_info['vblur'] = params["vblur"]
        dict_info['camera_K'] = np.array(
            get_calibration_matrix_K_from_blender(cam_ob.data))
        dict_info['camera_matrix_world'] = np.array(cam_ob.matrix_world)
        R, T = get_RT_matrix_from_blender(cam_ob)
        dict_info['camera_R'] = np.array(R)
        dict_info['camera_T'] = np.array(T)
        dict_info['pelvis_loc'] = np.array(
            arm_ob.matrix_world.copy() *
            arm_ob.pose.bones[obname + '_Pelvis'].head.copy())
        # NOTE:
        # ideally, pixels should be readable from a viewer node, but I get only zeros
        # --> https://ammous88.wordpress.com/2015/01/16/blender-access-render-results-pixels-directly-from-python-2/
        # len(np.asarray(bpy.data.images['Render Result'].pixels) is 0
        # Therefore we write them to temporary files and read with OpenEXR library (available for python2) in main_part2.py
        # Alternatively, if you don't want to use OpenEXR library, the following commented code does loading with Blender functions, but it can cause memory leak.
        # If you want to use it, copy necessary lines from main_part2.py such as definitions of dict_normal, matfile_normal...
        ## TODO: where to store???
        ## list(image, depth, seg, normal, gt_flow, vblur, fg)
        imagefile_path = join(
            out_path,
            "image/" + identifier + "_%04d.png" % get_real_frame(seq_frame))
        for k, folder in res_paths.items():
            if not k == 'vblur' and not k == 'fg':
                filename = "Image%04d.exr" % get_real_frame(seq_frame)
                if k == "image":
                    render_img = bpy.data.images.load(folder)
                else:
                    render_img = bpy.data.images.load(folder + "/" + filename)
                # render_img.pixels size is width * height * 4 (rgba)
                arr = np.array(render_img.pixels[:]).reshape(
                    (params["resx"], params["resy"],
                     -1))[::-1, :, :]  # images are vertically flipped
                if k == 'normal':  # 3 channels, original order
                    mat = arr[:, :, :3]
                elif k == 'gtflow':
                    mat = arr[:, :, 1:3]
                elif k == 'depth':
                    mat = arr[:, :, 0]
                    mat_c = mat.copy()
                    depth_m = sorted(mat_c.flatten())[-2]
                    mat_c[mat_c > depth_m] = depth_m + 1.0
                    cv2.imwrite(imagefile_path.replace("image", "depth"),
                                255.0 / (mat_c + 1e-5))
                elif k == 'segm':
                    mat = arr[:, :, 0]
                elif k == "image":
                    mat = arr[:, :, :3]
                    cv2.imwrite(imagefile_path, mat * 255)
                if (k in params['output_types']):
                    dict_info[k] = mat
                # remove the image to release memory, object handles, etc.
                render_img.user_clear()
                bpy.data.images.remove(render_img)
        dict_info['image'] = cv2.imread(scene.render.filepath)
        # bone locations should be saved after rendering so that the bones are updated
        bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene,
                                                   cam_ob)
        dict_info['joints2D'] = np.transpose(bone_locs_2D)
        dict_info['joints3D'] = np.transpose(bone_locs_3D)
        ## export rendering, parameters, and meshes to the output directory
        log_message("Export parameters, images, and meshes for " +
                    imagefile_path)
        dd.io.save(imagefile_path.replace("image", "mat"), dict_info)
        # export human body
        bpy.ops.object.select_all(action='DESELECT')
        ob.select = True
        bpy.ops.export_scene.obj(filepath=imagefile_path.replace(
            "image", "mesh").replace(".png", ".obj"),
                                 use_selection=True,
                                 use_uvs=False,
                                 use_materials=False)
        bpy.ops.object.select_all(action='DESELECT')

        # TODO: ????
        arm_ob.pose.bones[obname + '_root'].rotation_quaternion = Quaternion(
            (1, 0, 0, 0))

    ## clear up the imported objects, as we import them each loading.
    bpy.ops.object.select_all(action='DESELECT')
    ob.select = True
    arm_ob.select = True
    bpy.ops.object.delete(use_global=False)
    return fend // stepsize + 1
예제 #8
0
def main(runpass=None, idx=None, idx_info=None, stride=None, cmu_idx=0):
    # time logging
    global start_time
    start_time = time.time()
    import argparse

    #idx = 3
    #ishape = 0
    #stride = 50

    # # parse commandline arguments
    # log_message(sys.argv)
    # parser = argparse.ArgumentParser(description='Generate synth dataset images.')
    # parser.add_argument('--idx', type=int,
    #                     help='idx of the requested sequence')
    # parser.add_argument('--ishape', type=int,
    #                     help='requested cut, according to the stride')
    # parser.add_argument('--stride', type=int,
    #                     help='stride amount, default 50')
    #
    # args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])
    #
    # idx = args.idx
    # ishape = args.ishape
    # stride = args.stride

    #if runpass == None:
    #    log_message("WARNING: runpass not specified, using default value 0")
    #    runpass = 0

    #if idx == None:
    #    log_message("WARNING: idx not specified, using default value 0")
    #    idx = 0

    #if stride == None:
    #    log_message("WARNING: stride not specified, using default value 50")
    #    stride = 50

    log_message("input idx: %d" % idx)
    log_message("input stride: %d" % stride)

    # import idx info (name, split)
    #idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    #(runpass, idx) = divmod(idx, len(idx_info))

    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)
    idx_info = idx_info[idx]
    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    resy = params['resy']
    resx = params['resx']
    clothing_option = params['clothing_option']  # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    #openexr_py2_path = params['openexr_py2_path']

    # compute number of cuts
    #nb_ishape = max(1, int(np.ceil((idx_info['nb_frames'] - (clipsize - stride))/stride)))
    nb_ishape = 1
    log_message("Max ishape: %d" % (nb_ishape - 1))

    #if ishape == None:
    #    exit(1)

    #assert(ishape < nb_ishape)

    # name is set given idx
    name = idx_info['name']
    #output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    #params['output_path'] = output_path

    #tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
    #params['tmp_path'] = tmp_path

    # check if already computed
    #  + clean up existing tmp folders if any
    #if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
    #    os.system('rm -rf %s' % tmp_path)

    #rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace(' ', '')), (ishape + 1))
    #if os.path.isfile(rgb_vid_filename):
    #    log_message("ALREADY COMPUTED - existing: %s" % rgb_vid_filename)
    #    return 0

    # create tmp directory
    #if not exists(tmp_path):
    #    mkdir_safe(tmp_path)

    # >> don't use random generator before this point <<

    # initialize RNG with seeds from sequence id
    #import hashlib
    #s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    #seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
    #log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    #random.seed(seed_number)
    #np.random.seed(seed_number)

    #if(output_types['vblur']):
    #    vblur_factor = np.random.normal(0.5, 0.5)
    #    params['vblur_factor'] = vblur_factor

    #log_message("Setup Blender")

    # create copy-spher.harm. directory if not exists
    #sh_dir = join(tmp_path, 'spher_harm')
    #if not exists(sh_dir):
    #    mkdir_safe(sh_dir)

    #sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
    #os.system('cp spher_harm/sh.osl %s' % sh_dst)

    genders = {0: 'female', 1: 'male'}
    # pick random gender
    #gender = choice(genders)

    for ishape in range(nb_ishape):
        #for ishape in range(1):

        s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
        seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(),
                          16) % (10**8)
        log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))

        random.seed(seed_number)
        gender = choice(genders)
        #gender = 'male'

        output_path = join(output_path, 'run%d' % runpass,
                           name.replace(" ", "") + "_%s" % (gender))
        params['output_path'] = output_path

        log_message("current ishape: %d" % ishape)
        scene = bpy.data.scenes['Scene']
        scene.render.engine = 'CYCLES'
        bpy.data.materials['Material'].use_nodes = True
        scene.cycles.shading_system = True
        scene.use_nodes = True

        #log_message("Listing background images")
        #bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
        #nh_txt_paths = []
        #with open(bg_names) as f:
        #    for line in f:
        #        nh_txt_paths.append(join(bg_path, line))

        # grab clothing names
        #log_message("clothing: %s" % clothing_option)
        #with open( join(smpl_data_folder, 'textures', '%s_%s.txt' % ( gender, idx_info['use_split'] ) ) ) as f:
        #    txt_paths = f.read().splitlines()

        # if using only one source of clothing
        #if clothing_option == 'nongrey':
        #    txt_paths = [k for k in txt_paths if 'nongrey' in k]
        #elif clothing_option == 'grey':
        #    txt_paths = [k for k in txt_paths if 'nongrey' not in k]

        # random clothing texture
        #cloth_img_name = choice(txt_paths)
        #cloth_img_name = join(smpl_data_folder, cloth_img_name)
        #cloth_img = bpy.data.images.load(cloth_img_name)

        # random background
        #bg_img_name = choice(nh_txt_paths)[:-1]
        #bg_img = bpy.data.images.load(bg_img_name)

        log_message("Loading parts segmentation")
        beta_stds = np.load(
            join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))

        log_message("Building materials tree")
        # mat_tree = bpy.data.materials['Material'].node_tree
        # create_sh_material(mat_tree, sh_dst, cloth_img)
        # res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx)	# Only human model is saved in this step, no background

        log_message("Loading smpl data")
        smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))

        log_message("Initializing scene")
        #camera_distance = np.random.normal(8.0, 1)
        camera_distance = CAMERA_DISTANCE
        params[
            'camera_distance'] = camera_distance  ###############################################################
        ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

        setState0()
        ob.select = True
        bpy.context.scene.objects.active = ob
        segmented_materials = False  #True: 0-24, False: expected to have 0-1 bg/fg

        log_message("Creating materials segmentation")

        # create material segmentation
        if segmented_materials:
            materials = create_segmentation(ob, params)
            prob_dressed = {
                'leftLeg': .5,
                'leftArm': .9,
                'leftHandIndex1': .01,
                'rightShoulder': .8,
                'rightHand': .01,
                'neck': .01,
                'rightToeBase': .9,
                'leftShoulder': .8,
                'leftToeBase': .9,
                'rightForeArm': .5,
                'leftHand': .01,
                'spine': .9,
                'leftFoot': .9,
                'leftUpLeg': .9,
                'rightUpLeg': .9,
                'rightFoot': .9,
                'head': .01,
                'leftForeArm': .5,
                'rightArm': .5,
                'spine1': .9,
                'hips': .9,
                'rightHandIndex1': .01,
                'spine2': .9,
                'rightLeg': .5
            }
        else:
            materials = {'FullBody': bpy.data.materials['Material']}
            prob_dressed = {'FullBody': .6}

        # orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()) - Vector((-1., 1., 1.))
        orig_cam_loc = cam_ob.location.copy()

        # unblocking both the pose and the blendshape limits
        for k in ob.data.shape_keys.key_blocks.keys():
            bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
            bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

        log_message("Loading body data")
        cmu_parms, fshapes, name = load_body_data(smpl_data,
                                                  ob,
                                                  obname,
                                                  idx=cmu_idx,
                                                  gender=gender)

        log_message("Loaded body data for %s" % name)

        nb_fshapes = len(fshapes)
        if idx_info['use_split'] == 'train':
            fshapes = fshapes[:int(nb_fshapes * 0.8)]
        elif idx_info['use_split'] == 'test':
            fshapes = fshapes[int(nb_fshapes * 0.8):]

        # pick random real body shape
        random.seed(seed_number)
        shape = choice(fshapes)  #+random_shape(.5) can add noise

        # example shapes
        #shape = np.zeros(10) #average
        #shape = np.array([ 2.25176191, -3.7883464 ,  0.46747496,  3.89178988,  2.20098416,  0.26102114, -3.07428093,  0.55708514, -3.94442258, -2.88552087]) #fat

        ndofs = 10

        scene.objects.active = arm_ob
        orig_trans = np.asarray(arm_ob.pose.bones[obname +
                                                  '_Pelvis'].location).copy()

        # create output directory
        if not exists(output_path):
            mkdir_safe(output_path)

        # spherical harmonics material needs a script to be loaded and compiled
        #  scs = []
        #  for mname, material in materials.items():
        #      scs.append(material.node_tree.nodes['Script'])
        #      scs[-1].filepath = sh_dst
        #      scs[-1].update()

        #rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1)
        #rgb_path = join(tmp_path, rgb_dirname)

        data = cmu_parms[name]

        #fbegin = ishape*stepsize*stride
        #fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
        fbegin = 0
        fend = len(data['poses'])

        log_message("Computing how many frames to allocate")
        #N = len(data['poses'][fbegin:fend:stepsize])
        N = len(data['poses'][fbegin:fend:stepsize]) + PREAMBLE_FRAME_NUM
        log_message("Allocating %d frames in mat file" % N)

        # force recomputation of joint angles unless shape is all zeros
        curr_shape = np.zeros_like(shape)
        nframes = len(data['poses'][::stepsize])

        matfile_info = join(
            output_path,
            name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1))
        log_message('Working on %s' % matfile_info)

        # allocate
        dict_info = {}
        # dict_info['bg'] = np.zeros((N,), dtype=np.object) # background image path
        dict_info['camLoc'] = np.empty((3, N), dtype='float32')  # (1, 3)
        dict_info['clipNo'] = ishape + 1
        # dict_info['cloth'] = np.zeros((N,), dtype=np.object) # clothing texture image path
        dict_info['gender'] = np.empty(
            N, dtype='uint8')  # 0 for male, 1 for female
        dict_info['joints2D'] = np.empty(
            (2, 24, N), dtype='float32')  # 2D joint positions in pixel space
        dict_info['joints3D'] = np.empty(
            (3, 24, N),
            dtype='float32')  # 3D joint positions in world coordinates
        # dict_info['light'] = np.empty((9, N), dtype='float32')
        dict_info['pose'] = np.empty(
            (data['poses'][0].size, N),
            dtype='float32')  # joint angles from SMPL (CMU)
        dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
        dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
        # dict_info['zrot'] = np.empty(N, dtype='float32')
        dict_info['camDist'] = camera_distance
        dict_info['stride'] = stride
        dict_info['seednumber'] = seed_number

        if name.replace(" ", "").startswith('h36m'):
            dict_info['source'] = 'h36m'
        else:
            dict_info['source'] = 'cmu'

        #if(output_types['vblur']):
        #    dict_info['vblur_factor'] = np.empty(N, dtype='float32') #############################################

        # for each clipsize'th frame in the sequence
        get_real_frame = lambda ifr: ifr
        #random_zrot = 0
        reset_loc = False
        batch_it = 0
        curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob,
                                           obname, scene, cam_ob,
                                           smpl_data['regression_verts'],
                                           smpl_data['joint_regressor'])
        #random_zrot = 2*np.pi*np.random.rand()

        arm_ob.animation_data_clear()
        cam_ob.animation_data_clear(
        )  #####################################################################################################

        # create a keyframe animation with pose, translation, blendshapes and camera motion
        # LOOP TO CREATE 3D ANIMATION

        # rotate model to front up-right
        #arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((math.radians(-90), 0, 0), 'XYZ'))
        #JIANL: Weired! Weired! Weired!
        #JIANL: The value is different for system-terminal and blender's python console (if the rotation was applied to "root")
        #JIANL: To solve this issue, modify arm_ob.matrix_world instead!
        # This is to make local coordinate the same as global coordinate
        arm_ob.matrix_world = Matrix(
            ((1.0000, 0.0000, 0.0000, 0.0000), (0.0000, 1.0000, 0.0000,
                                                0.0000),
             (0.0000, 0.0000, 1.0000, 0.0000), (0.0000, 0.0000, 0.0000,
                                                1.0000)))

        curr_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
            obname + '_Pelvis'].head.copy()

        # After resetting the local coordinate, curr_pelvis_loc[1] is y-axis value,
        # curr_pelvis_loc[2] is the z-axis value,
        # [math.radians(90), 0, 0] rotation to get T-pose
        # so, move curr_pelvis_loc[1] along y-axis (negative)
        # and move (curr_pelvis_loc[1]-curr_pelvis_loc[2]) along z-axis (positive)
        # this will result in a right pelvis position, which makes foot touching the ground
        TRANS_OFFSET = [
            0., curr_pelvis_loc[1], -(curr_pelvis_loc[1] - curr_pelvis_loc[2])
        ]  # OFFSET to make model stand on ground at origin
        #print(TRANS_OFFSET)

        # interpolate frames to add T-pose transition
        for seq_frame in range(PREAMBLE_FRAME_NUM):
            iframe = seq_frame
            scene.frame_set(get_real_frame(seq_frame))
            #trans = data['trans'][fbegin:fend:stepsize][0].copy()
            #trans = np.zeros(3) - [0., 0.91, 0.]
            trans = np.zeros(
                3) - TRANS_OFFSET  # make model stand on ground at origin
            pose = data['poses'][fbegin:fend:stepsize][0].copy()
            pose[0] = math.radians(90) + seq_frame * (
                (pose[0] - math.radians(90)) / PREAMBLE_FRAME_NUM)
            pose[1] = seq_frame * (pose[1] / PREAMBLE_FRAME_NUM
                                   )  # interpolate rotation
            pose[2] = seq_frame * (pose[2] / PREAMBLE_FRAME_NUM
                                   )  # interpolate rotation
            pose[3:] = seq_frame * (pose[3:] / PREAMBLE_FRAME_NUM
                                    )  # interpolate rotation
            apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob,
                                   obname, scene, cam_ob,
                                   get_real_frame(seq_frame))
            dict_info['shape'][:, iframe] = shape[:ndofs]
            dict_info['pose'][:, iframe] = pose
            dict_info['gender'][iframe] = list(genders)[list(
                genders.values()).index(gender)]
            #if(output_types['vblur']):
            #    dict_info['vblur_factor'][iframe] = vblur_factor
            #arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))
            arm_ob.pose.bones[obname + '_root'].keyframe_insert(
                'rotation_quaternion', frame=get_real_frame(seq_frame))
            #dict_info['zrot'][iframe] = random_zrot
            scene.update()
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            # keep camera tracking the model on a plane, modify if axis locking is required
            # camera location is recorded for rendering
            dict_info['camLoc'][:, iframe] = orig_cam_loc.copy(
            ) + new_pelvis_loc - Vector([
                0, 0, 1
            ])  #####################################################

        for seq_frame_, (pose_, trans_) in enumerate(
                zip(data['poses'][fbegin:fend:stepsize],
                    data['trans'][fbegin:fend:stepsize])):
            seq_frame = seq_frame_ + PREAMBLE_FRAME_NUM
            iframe = seq_frame
            scene.frame_set(get_real_frame(seq_frame))
            # apply the translation, pose and shape to the character
            pose = pose_.copy()
            #trans = trans_.copy() - data['trans'][fbegin:fend:stepsize][0] - [0., 0.91, 0.]
            trans = trans_.copy() - data['trans'][fbegin:fend:stepsize][
                0] - TRANS_OFFSET  # make model stand on ground at origin
            #pose[0:3] = 0
            #trans[:] = trans[[0,2,1]]
            #print(trans)
            apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob,
                                   obname, scene, cam_ob,
                                   get_real_frame(seq_frame))
            dict_info['shape'][:, iframe] = shape[:ndofs]
            dict_info['pose'][:, iframe] = pose
            dict_info['gender'][iframe] = list(genders)[list(
                genders.values()).index(gender)]
            #if(output_types['vblur']):
            #    dict_info['vblur_factor'][iframe] = vblur_factor
            #arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))
            arm_ob.pose.bones[obname + '_root'].keyframe_insert(
                'rotation_quaternion', frame=get_real_frame(seq_frame))
            #dict_info['zrot'][iframe] = random_zrot
            scene.update()
            #print(seq_frame)
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[
                obname + '_Pelvis'].head.copy()
            dict_info['camLoc'][:, iframe] = orig_cam_loc.copy(
            ) + new_pelvis_loc - Vector([
                0, 0, 1
            ])  #####################################################

        matfile_info = join(
            output_path,
            name.replace(" ", "") + "_c%04d_info.mat" % (ishape + 1))
        # SAVE DATASET
        # save .obj for human model
        scene.frame_set(get_real_frame(0))
        obname = '%s_avg' % gender[0]
        ob = bpy.data.objects[obname]
        scn = bpy.context.scene
        scn.objects.active = ob
        #obj_filename = name + '_' + gender + '.obj'
        obj_body_saved = join(
            output_path,
            name.replace(" ", "") + "_c%04d_%s.obj" % (ishape + 1, gender))
        bpy.ops.export_scene.obj(filepath=obj_body_saved,
                                 check_existing=True,
                                 axis_forward='-Z',
                                 axis_up='Y',
                                 filter_glob="*.obj;*.mtl",
                                 use_selection=True,
                                 use_animation=False,
                                 use_mesh_modifiers=True,
                                 use_edges=True,
                                 use_smooth_groups=False,
                                 use_smooth_groups_bitflags=False,
                                 use_normals=True,
                                 use_uvs=True,
                                 use_materials=False,
                                 use_triangles=False,
                                 use_nurbs=False,
                                 use_vertex_groups=False,
                                 use_blen_objects=True,
                                 group_by_object=False,
                                 group_by_material=False,
                                 keep_vertex_order=False,
                                 global_scale=1,
                                 path_mode='AUTO')

        # save .mdd for animation
        #mdd_filename = name + '_' + gender + '.mdd'
        mdd_body_saved = join(
            output_path,
            name.replace(" ", "") + "_c%04d_%s.mdd" % (ishape + 1, gender))
        # to align .mdd to .fbx, frame_start is set to 0, and use_rest_frame is true
        bpy.ops.export_shape.mdd(filepath=mdd_body_saved,
                                 check_existing=True,
                                 filter_glob="*.mdd",
                                 fps=25,
                                 frame_start=0,
                                 frame_end=N,
                                 use_rest_frame=True)

        # save .fbx for animation
        #fbx_filename = name + '_' + gender + '.fbx'
        fbx_body_saved = join(
            output_path,
            name.replace(" ", "") + "_c%04d_%s.fbx" % (ishape + 1, gender))
        # export "ARMATURE" only to save space
        bpy.ops.export_scene.fbx(filepath=fbx_body_saved,
                                 check_existing=True,
                                 axis_forward='-Z',
                                 axis_up='Y',
                                 filter_glob="*.fbx",
                                 version='BIN7400',
                                 ui_tab='MAIN',
                                 use_selection=False,
                                 global_scale=1,
                                 apply_unit_scale=True,
                                 bake_space_transform=False,
                                 object_types={'ARMATURE'},
                                 use_mesh_modifiers=False,
                                 mesh_smooth_type='OFF',
                                 use_mesh_edges=False,
                                 use_tspace=False,
                                 use_custom_props=False,
                                 add_leaf_bones=True,
                                 primary_bone_axis='Y',
                                 secondary_bone_axis='X',
                                 use_armature_deform_only=False,
                                 armature_nodetype='NULL',
                                 bake_anim=True,
                                 bake_anim_use_all_bones=True,
                                 bake_anim_use_nla_strips=True,
                                 bake_anim_use_all_actions=True,
                                 bake_anim_force_startend_keying=True,
                                 bake_anim_step=1,
                                 bake_anim_simplify_factor=1,
                                 use_anim=True,
                                 use_anim_action_all=True,
                                 use_default_take=True,
                                 use_anim_optimize=True,
                                 anim_optimize_precision=6,
                                 path_mode='AUTO',
                                 embed_textures=False,
                                 batch_mode='OFF',
                                 use_batch_own_dir=True,
                                 use_metadata=True)

        # iterate over the keyframes and record annotations
        # LOOP TO ANNOTATE
        #for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        for seq_frame in range(N):
            scene.frame_set(get_real_frame(seq_frame))
            iframe = seq_frame
            # dict_info['bg'][iframe] = bg_img_name
            # dict_info['cloth'][iframe] = cloth_img_name
            # dict_info['light'][:, iframe] = sh_coeffs
            # scene.render.use_antialiasing = False
            # scene.render.filepath = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))
            log_message("Annotating frame %d" % seq_frame)
            # # disable render output
            # logfile = '/dev/null'
            # open(logfile, 'a').close()
            # old = os.dup(1)
            # sys.stdout.flush()
            # os.close(1)
            # os.open(logfile, os.O_WRONLY)
            # # Render
            # bpy.ops.render.render(write_still=True)
            # # disable output redirection
            # os.close(1)
            # os.dup(old)
            # os.close(old)
            # bone locations should be saved after rendering so that the bones are updated
            bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene,
                                                       cam_ob)
            dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
            dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)
            #reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any()
            #arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion((1, 0, 0, 0))

        # save a .blend file for debugging:
        # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))

        # save annotation excluding png/exr data to _info.mat file
        import scipy.io
        scipy.io.savemat(matfile_info, dict_info, do_compression=True)

        # Initialize Blender for the next clip generation
        bpy.ops.wm.read_homefile()
import os
import shutil
import sys

sys.path.insert(0, ".")

female_train_filename = 'txt/female_train_textures.txt'
male_train_filename = 'txt/male_train_textures.txt'
female_test_filename = 'txt/female_test_textures.txt'
male_test_filename = 'txt/male_test_textures.txt'

base_outdir = 'human_textures'

import config
params = config.load_file('config_local', 'SYNTH_DATA')
smpl_data_dir = params['smpl_data_folder']


def mkdir_if_missing(dirname):
    if not os.path.exists(dirname):
        os.makedirs(dirname)


def copy_human_textures(mode, gender, filename):
    outdir = os.path.join(base_outdir, mode, gender)
    mkdir_if_missing(outdir)

    with open(filename, 'r') as f:
        textures = f.read().split('\n')

    # Append base directory to each texture
예제 #10
0
def main_part2(file_cfg):
    # time logging
    global start_time
    start_time = time.time()

    # param list:
    with open(file_cfg, "rb") as f:
        print("load configs from: " + file_cfg)
        config = pickle.load(f)

    seed_number = config.seed_number
    idx_gender = config.idx_gender
    idx_bg = config.idx_bg
    idx_fshape = config.idx_fshape
    idx_cloth = config.idx_cloth
    disp_bg = config.disp_bg

    # human data source:
    idx = config.idx_seq
    ishape = config.idx_ishape
    stride = config.stride


    from pickle import load
    import argparse
    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)

    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50

    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))

    log_message("start part 2")

    import hashlib
    import random
    # initialize random seeds with sequence id
    s = "synth_data:%d:%d:%d" % (idx, runpass, ishape)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)

    # import configuration
    import config
    params = config.load_file('config', 'SYNTH_DATA')

    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    resy = params['resy']
    resx = params['resx']
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    name = idx_info[idx]["name"]

    # compute number of cuts
    nb_ishape = max(1, int(np.ceil((idx_info[idx]['nb_frames'] - (clipsize - stride))/stride)))
    ishape = ishape%nb_ishape


    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    log_message("output path: " + output_path)
    tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))

    # check whether openexr_py2_path is loaded from configuration file
    if 'openexr_py2_path' in locals() or 'openexr_py2_path' in globals():
        for exr_path in openexr_py2_path.split(':'):
            sys.path.insert(1, exr_path)

    # to read exr imgs
    import OpenEXR
    import array
    import Imath

    log_message("Loading SMPL data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
    cmu_parms, name = load_body_data(smpl_data, idx)

    res_paths = {k:join(tmp_path, '%05d_%s'%(idx, k)) for k in output_types if output_types[k]}

    data = cmu_parms[name]
    nframes = len(data['poses'][::stepsize])

    # .mat files
    matfile_normal = join(output_path, name.replace(" ", "") + "_c%04d_normal.mat" % (ishape + 1))
    matfile_gtflow = join(output_path, name.replace(" ", "") + "_c%04d_gtflow.mat" % (ishape + 1))
    matfile_depth = join(output_path, name.replace(" ", "") + "_c%04d_depth.mat" % (ishape + 1))
    matfile_segm = join(output_path, name.replace(" ", "") + "_c%04d_segm.mat" % (ishape + 1))
    dict_normal = {}
    dict_gtflow = {}
    dict_depth = {}
    dict_segm = {}
    get_real_frame = lambda ifr: ifr
    FLOAT = Imath.PixelType(Imath.PixelType.FLOAT)

    # overlap determined by stride (# subsampled frames to skip)
    fbegin = ishape*stepsize*stride
    fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
    # LOOP OVER FRAMES
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame

        log_message("Processing frame %d" % iframe)

        for k, folder in res_paths.items():
            if not k== 'vblur' and not k=='fg':
                for ii in range(2):
                    path = join(folder, 'Image%04d_%d.exr' % (get_real_frame(seq_frame), ii))
                    exr_file = OpenEXR.InputFile(path)
                    if k == 'normal':
                        mat = np.transpose(np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G", "B")], (3, resx, resy)), (1, 2, 0))
                        dict_normal['normal_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False) # +1 for the 1-indexing
                    elif k == 'gtflow':
                        mat = np.transpose(np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G")], (2, resx, resy)), (1, 2, 0))
                        dict_gtflow['gtflow_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False)
                    elif k == 'depth':
                        mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R")], (resx, resy))
                        dict_depth['depth_%04d_%01d' % (iframe + 1, ii+1)] = mat.astype(np.float32, copy=False)
                    elif k == 'segm':
                        mat = np.reshape([array.array('f', exr_file.channel(Chan, FLOAT)).tolist() for Chan in ("R")], (resx, resy))
                        dict_segm['segm_%04dd_%01d' % (iframe + 1, ii+1)] = mat.astype(np.uint8, copy=False)
                    # remove(path)

    print("render infos: ")
    print(res_paths)
    print("#normal: %d"%(len(dict_normal.keys())))
    print("#depth: %d"%(len(dict_depth.keys())))
    print("#segm: %d"%(len(dict_segm.keys())))
    import scipy.io
    scipy.io.savemat(matfile_normal, dict_normal, do_compression=True)
    scipy.io.savemat(matfile_gtflow, dict_gtflow, do_compression=True)
    scipy.io.savemat(matfile_depth, dict_depth, do_compression=True)
    scipy.io.savemat(matfile_segm, dict_segm, do_compression=True)

    # cleaning up tmp
    if tmp_path != "" and tmp_path != "/":
        log_message("Cleaning up tmp")
        os.system('rm -rf %s' % tmp_path)

    log_message("Completed batch")
예제 #11
0
def main():
    # time logging
    global start_time
    start_time = time.time()

    import argparse
    
    # parse commandline arguments
    log_message(sys.argv)
    parser = argparse.ArgumentParser(description='Generate synth dataset images.')
    parser.add_argument('--idx', type=int,
                        help='idx of the requested sequence')
    parser.add_argument('--ishape', type=int,
                        help='requested cut, according to the stride')
    parser.add_argument('--stride', type=int,
                        help='stride amount, default 50')

    args = parser.parse_args(sys.argv[sys.argv.index("--") + 1:])
    
    idx = args.idx
    ishape = args.ishape
    stride = args.stride
    
    log_message("input idx: %d" % idx)
    log_message("input ishape: %d" % ishape)
    log_message("input stride: %d" % stride)
    
    if idx == None:
        exit(1)
    if ishape == None:
        exit(1)
    if stride == None:
        log_message("WARNING: stride not specified, using default value 50")
        stride = 50
    
    # import idx info (name, split)
    idx_info = load(open("pkl/idx_info.pickle", 'rb'))

    # get runpass
    (runpass, idx) = divmod(idx, len(idx_info))
    
    log_message("runpass: %d" % runpass)
    log_message("output idx: %d" % idx)
    idx_info = idx_info[idx]
    log_message("sequence: %s" % idx_info['name'])
    log_message("nb_frames: %f" % idx_info['nb_frames'])
    log_message("use_split: %s" % idx_info['use_split'])

    # import configuration
    log_message("Importing configuration")
    import config
    params = config.load_file('config', 'SYNTH_DATA')
    
    smpl_data_folder = params['smpl_data_folder']
    smpl_data_filename = params['smpl_data_filename']
    bg_path = params['bg_path']
    resy = params['resy']
    resx = params['resx']
    clothing_option = params['clothing_option'] # grey, nongrey or all
    tmp_path = params['tmp_path']
    output_path = params['output_path']
    output_types = params['output_types']
    stepsize = params['stepsize']
    clipsize = params['clipsize']
    openexr_py2_path = params['openexr_py2_path']

    # compute number of cuts
    nb_ishape = max(1, int(np.ceil((idx_info['nb_frames'] - (clipsize - stride))/stride)))
    log_message("Max ishape: %d" % (nb_ishape - 1))
    
    if ishape == None:
        exit(1)
    
    assert(ishape < nb_ishape)
    
    # name is set given idx
    name = idx_info['name']
    output_path = join(output_path, 'run%d' % runpass, name.replace(" ", ""))
    params['output_path'] = output_path
    tmp_path = join(tmp_path, 'run%d_%s_c%04d' % (runpass, name.replace(" ", ""), (ishape + 1)))
    params['tmp_path'] = tmp_path
    
    # check if already computed
    #  + clean up existing tmp folders if any
    if exists(tmp_path) and tmp_path != "" and tmp_path != "/":
        os.system('rm -rf %s' % tmp_path)
    rgb_vid_filename = "%s_c%04d.mp4" % (join(output_path, name.replace(' ', '')), (ishape + 1))
    #if os.path.isfile(rgb_vid_filename):
    #    log_message("ALREADY COMPUTED - existing: %s" % rgb_vid_filename)
    #    return 0
    
    # create tmp directory
    if not exists(tmp_path):
        mkdir_safe(tmp_path)
    
    # >> don't use random generator before this point <<

    # initialize RNG with seeds from sequence id
    import hashlib
    s = "synth_data:%d:%d:%d" % (idx, runpass,ishape)
    seed_number = int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16) % (10 ** 8)
    log_message("GENERATED SEED %d from string '%s'" % (seed_number, s))
    random.seed(seed_number)
    np.random.seed(seed_number)
    
    if(output_types['vblur']):
        vblur_factor = np.random.normal(0.5, 0.5)
        params['vblur_factor'] = vblur_factor
    
    log_message("Setup Blender")

    # create copy-spher.harm. directory if not exists
    sh_dir = join(tmp_path, 'spher_harm')
    if not exists(sh_dir):
        mkdir_safe(sh_dir)
    sh_dst = join(sh_dir, 'sh_%02d_%05d.osl' % (runpass, idx))
    os.system('cp spher_harm/sh.osl %s' % sh_dst)

    genders = {0: 'female', 1: 'male'}
    # pick random gender
    gender = choice(genders)

    scene = bpy.data.scenes['Scene']
    scene.render.engine = 'CYCLES'
    bpy.data.materials['Material'].use_nodes = True
    scene.cycles.shading_system = True
    scene.use_nodes = True

    log_message("Listing background images")
    bg_names = join(bg_path, '%s_img.txt' % idx_info['use_split'])
    nh_txt_paths = []
    with open(bg_names) as f:
        for line in f:
            nh_txt_paths.append(join(bg_path, line))

    # grab clothing names
    log_message("clothing: %s" % clothing_option)
    with open( join(smpl_data_folder, 'textures', '%s_%s.txt' % ( gender, idx_info['use_split'] ) ) ) as f:
        txt_paths = f.read().splitlines()

    # if using only one source of clothing
    if clothing_option == 'nongrey':
        txt_paths = [k for k in txt_paths if 'nongrey' in k]
    elif clothing_option == 'grey':
        txt_paths = [k for k in txt_paths if 'nongrey' not in k]
    
    # random clothing texture
    cloth_img_name = choice(txt_paths)
    cloth_img_name = join(smpl_data_folder, cloth_img_name)
    cloth_img = bpy.data.images.load(cloth_img_name)

    # random background
    bg_img_name = choice(nh_txt_paths)[:-1]
    bg_img = bpy.data.images.load(bg_img_name)

    log_message("Loading parts segmentation")
    beta_stds = np.load(join(smpl_data_folder, ('%s_beta_stds.npy' % gender)))
    
    log_message("Building materials tree")
    mat_tree = bpy.data.materials['Material'].node_tree
    create_sh_material(mat_tree, sh_dst, cloth_img)
    res_paths = create_composite_nodes(scene.node_tree, params, img=bg_img, idx=idx)

    log_message("Loading smpl data")
    smpl_data = np.load(join(smpl_data_folder, smpl_data_filename))
    
    log_message("Initializing scene")
    camera_distance = np.random.normal(8.0, 1)
    params['camera_distance'] = camera_distance
    ob, obname, arm_ob, cam_ob = init_scene(scene, params, gender)

    setState0()
    ob.select = True
    bpy.context.scene.objects.active = ob
    segmented_materials = True #True: 0-24, False: expected to have 0-1 bg/fg
    
    log_message("Creating materials segmentation")
    # create material segmentation
    if segmented_materials:
        materials = create_segmentation(ob, params)
        prob_dressed = {'leftLeg':.5, 'leftArm':.9, 'leftHandIndex1':.01,
                        'rightShoulder':.8, 'rightHand':.01, 'neck':.01,
                        'rightToeBase':.9, 'leftShoulder':.8, 'leftToeBase':.9,
                        'rightForeArm':.5, 'leftHand':.01, 'spine':.9,
                        'leftFoot':.9, 'leftUpLeg':.9, 'rightUpLeg':.9,
                        'rightFoot':.9, 'head':.01, 'leftForeArm':.5,
                        'rightArm':.5, 'spine1':.9, 'hips':.9,
                        'rightHandIndex1':.01, 'spine2':.9, 'rightLeg':.5}
    else:
        materials = {'FullBody': bpy.data.materials['Material']}
        prob_dressed = {'FullBody': .6}

    orig_pelvis_loc = (arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()) - Vector((-1., 1., 1.))
    orig_cam_loc = cam_ob.location.copy()

    # unblocking both the pose and the blendshape limits
    for k in ob.data.shape_keys.key_blocks.keys():
        bpy.data.shape_keys["Key"].key_blocks[k].slider_min = -10
        bpy.data.shape_keys["Key"].key_blocks[k].slider_max = 10

    log_message("Loading body data")
    cmu_parms, fshapes, name = load_body_data(smpl_data, ob, obname, idx=idx, gender=gender)
    
    log_message("Loaded body data for %s" % name)
    
    nb_fshapes = len(fshapes)
    if idx_info['use_split'] == 'train':
        fshapes = fshapes[:int(nb_fshapes*0.8)]
    elif idx_info['use_split'] == 'test':
        fshapes = fshapes[int(nb_fshapes*0.8):]
    
    # pick random real body shape
    shape = choice(fshapes) #+random_shape(.5) can add noise
    #shape = random_shape(3.) # random body shape
    
    # example shapes
    #shape = np.zeros(10) #average
    #shape = np.array([ 2.25176191, -3.7883464 ,  0.46747496,  3.89178988,  2.20098416,  0.26102114, -3.07428093,  0.55708514, -3.94442258, -2.88552087]) #fat
    #shape = np.array([-2.26781107,  0.88158132, -0.93788176, -0.23480508,  1.17088298,  1.55550789,  0.44383225,  0.37688275, -0.27983086,  1.77102953]) #thin
    #shape = np.array([ 0.00404852,  0.8084637 ,  0.32332591, -1.33163664,  1.05008727,  1.60955275,  0.22372946, -0.10738459,  0.89456312, -1.22231216]) #short
    #shape = np.array([ 3.63453289,  1.20836171,  3.15674431, -0.78646793, -1.93847355, -0.32129994, -0.97771656,  0.94531640,  0.52825811, -0.99324327]) #tall

    ndofs = 10

    scene.objects.active = arm_ob
    orig_trans = np.asarray(arm_ob.pose.bones[obname+'_Pelvis'].location).copy()

    # create output directory
    if not exists(output_path):
        mkdir_safe(output_path)

    # spherical harmonics material needs a script to be loaded and compiled
    scs = []
    for mname, material in materials.items():
        scs.append(material.node_tree.nodes['Script'])
        scs[-1].filepath = sh_dst
        scs[-1].update()

    rgb_dirname = name.replace(" ", "") + '_c%04d.mp4' % (ishape + 1)
    rgb_path = join(tmp_path, rgb_dirname)

    data = cmu_parms[name]
    
    fbegin = ishape*stepsize*stride
    fend = min(ishape*stepsize*stride + stepsize*clipsize, len(data['poses']))
    
    log_message("Computing how many frames to allocate")
    N = len(data['poses'][fbegin:fend:stepsize])
    log_message("Allocating %d frames in mat file" % N)

    # force recomputation of joint angles unless shape is all zeros
    curr_shape = np.zeros_like(shape)
    nframes = len(data['poses'][::stepsize])

    matfile_info = join(output_path, name.replace(" ", "") + "_c%04d_info.mat" % (ishape+1))
    log_message('Working on %s' % matfile_info)

    # allocate
    dict_info = {}
    dict_info['bg'] = np.zeros((N,), dtype=np.object) # background image path
    dict_info['camLoc'] = np.empty(3) # (1, 3)
    dict_info['clipNo'] = ishape +1
    dict_info['cloth'] = np.zeros((N,), dtype=np.object) # clothing texture image path
    dict_info['gender'] = np.empty(N, dtype='uint8') # 0 for male, 1 for female
    dict_info['joints2D'] = np.empty((2, 24, N), dtype='float32') # 2D joint positions in pixel space
    dict_info['joints3D'] = np.empty((3, 24, N), dtype='float32') # 3D joint positions in world coordinates
    dict_info['light'] = np.empty((9, N), dtype='float32')
    dict_info['pose'] = np.empty((data['poses'][0].size, N), dtype='float32') # joint angles from SMPL (CMU)
    dict_info['sequence'] = name.replace(" ", "") + "_c%04d" % (ishape + 1)
    dict_info['shape'] = np.empty((ndofs, N), dtype='float32')
    dict_info['zrot'] = np.empty(N, dtype='float32')
    dict_info['camDist'] = camera_distance
    dict_info['stride'] = stride

    if name.replace(" ", "").startswith('h36m'):
        dict_info['source'] = 'h36m'
    else:
        dict_info['source'] = 'cmu'

    if(output_types['vblur']):
        dict_info['vblur_factor'] = np.empty(N, dtype='float32')

    # for each clipsize'th frame in the sequence
    get_real_frame = lambda ifr: ifr
    random_zrot = 0
    reset_loc = False
    batch_it = 0
    curr_shape = reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene,
                                       cam_ob, smpl_data['regression_verts'], smpl_data['joint_regressor'])
    random_zrot = 2*np.pi*np.random.rand()
    
    arm_ob.animation_data_clear()
    cam_ob.animation_data_clear()

    # create a keyframe animation with pose, translation, blendshapes and camera motion
    # LOOP TO CREATE 3D ANIMATION
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        iframe = seq_frame
        scene.frame_set(get_real_frame(seq_frame))

        # apply the translation, pose and shape to the character
        apply_trans_pose_shape(Vector(trans), pose, shape, ob, arm_ob, obname, scene, cam_ob, get_real_frame(seq_frame))
        dict_info['shape'][:, iframe] = shape[:ndofs]
        dict_info['pose'][:, iframe] = pose
        dict_info['gender'][iframe] = list(genders)[list(genders.values()).index(gender)]
        if(output_types['vblur']):
            dict_info['vblur_factor'][iframe] = vblur_factor

        arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion(Euler((0, 0, random_zrot), 'XYZ'))
        arm_ob.pose.bones[obname+'_root'].keyframe_insert('rotation_quaternion', frame=get_real_frame(seq_frame))
        dict_info['zrot'][iframe] = random_zrot

        scene.update()

        # Bodies centered only in each minibatch of clipsize frames
        if seq_frame == 0 or reset_loc: 
            reset_loc = False
            new_pelvis_loc = arm_ob.matrix_world.copy() * arm_ob.pose.bones[obname+'_Pelvis'].head.copy()
            cam_ob.location = orig_cam_loc.copy() + (new_pelvis_loc.copy() - orig_pelvis_loc.copy())
            cam_ob.keyframe_insert('location', frame=get_real_frame(seq_frame))
            dict_info['camLoc'] = np.array(cam_ob.location)

    scene.node_tree.nodes['Image'].image = bg_img

    for part, material in materials.items():
        material.node_tree.nodes['Vector Math'].inputs[1].default_value[:2] = (0, 0)

    # random light
    sh_coeffs = .7 * (2 * np.random.rand(9) - 1)
    sh_coeffs[0] = .5 + .9 * np.random.rand() # Ambient light (first coeff) needs a minimum  is ambient. Rest is uniformly distributed, higher means brighter.
    sh_coeffs[1] = -.7 * np.random.rand()

    for ish, coeff in enumerate(sh_coeffs):
        for sc in scs:
            sc.inputs[ish+1].default_value = coeff

    # iterate over the keyframes and render
    # LOOP TO RENDER
    for seq_frame, (pose, trans) in enumerate(zip(data['poses'][fbegin:fend:stepsize], data['trans'][fbegin:fend:stepsize])):
        scene.frame_set(get_real_frame(seq_frame))
        iframe = seq_frame

        dict_info['bg'][iframe] = bg_img_name
        dict_info['cloth'][iframe] = cloth_img_name
        dict_info['light'][:, iframe] = sh_coeffs

        scene.render.use_antialiasing = False
        scene.render.filepath = join(rgb_path, 'Image%04d.png' % get_real_frame(seq_frame))

        log_message("Rendering frame %d" % seq_frame)
        
        # disable render output
        logfile = '/dev/null'
        open(logfile, 'a').close()
        old = os.dup(1)
        sys.stdout.flush()
        os.close(1)
        os.open(logfile, os.O_WRONLY)

        # Render
        bpy.ops.render.render(write_still=True)

        # disable output redirection
        os.close(1)
        os.dup(old)
        os.close(old)

        # NOTE:
        # ideally, pixels should be readable from a viewer node, but I get only zeros
        # --> https://ammous88.wordpress.com/2015/01/16/blender-access-render-results-pixels-directly-from-python-2/
        # len(np.asarray(bpy.data.images['Render Result'].pixels) is 0
        # Therefore we write them to temporary files and read with OpenEXR library (available for python2) in main_part2.py
        # Alternatively, if you don't want to use OpenEXR library, the following commented code does loading with Blender functions, but it can cause memory leak.
        # If you want to use it, copy necessary lines from main_part2.py such as definitions of dict_normal, matfile_normal...

        #for k, folder in res_paths.items():
        #   if not k== 'vblur' and not k=='fg':
        #       path = join(folder, 'Image%04d.exr' % get_real_frame(seq_frame))
        #       render_img = bpy.data.images.load(path)
        #       # render_img.pixels size is width * height * 4 (rgba)
        #       arr = np.array(render_img.pixels[:]).reshape(resx, resy, 4)[::-1,:, :] # images are vertically flipped 
        #       if k == 'normal':# 3 channels, original order
        #           mat = arr[:,:, :3]
        #           dict_normal['normal_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'gtflow':
        #           mat = arr[:,:, 1:3]
        #           dict_gtflow['gtflow_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'depth':
        #           mat = arr[:,:, 0]
        #           dict_depth['depth_%d' % (iframe + 1)] = mat.astype(np.float32, copy=False)
        #       elif k == 'segm':
        #           mat = arr[:,:,0]
        #           dict_segm['segm_%d' % (iframe + 1)] = mat.astype(np.uint8, copy=False)
        #
        #       # remove the image to release memory, object handles, etc.
        #       render_img.user_clear()
        #       bpy.data.images.remove(render_img)

        # bone locations should be saved after rendering so that the bones are updated
        bone_locs_2D, bone_locs_3D = get_bone_locs(obname, arm_ob, scene, cam_ob)
        dict_info['joints2D'][:, :, iframe] = np.transpose(bone_locs_2D)
        dict_info['joints3D'][:, :, iframe] = np.transpose(bone_locs_3D)

        reset_loc = (bone_locs_2D.max(axis=-1) > 256).any() or (bone_locs_2D.min(axis=0) < 0).any()
        arm_ob.pose.bones[obname+'_root'].rotation_quaternion = Quaternion((1, 0, 0, 0))

    # save a .blend file for debugging:
    # bpy.ops.wm.save_as_mainfile(filepath=join(tmp_path, 'pre.blend'))
    
    # save RGB data with ffmpeg (if you don't have h264 codec, you can replace with another one and control the quality with something like -q:v 3)
    cmd_ffmpeg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(rgb_path, 'Image%04d.png'), join(output_path, name.replace(' ', '')), (ishape + 1))
    log_message("Generating RGB video (%s)" % cmd_ffmpeg)
    os.system(cmd_ffmpeg)
    
    if(output_types['vblur']):
        cmd_ffmpeg_vblur = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 -vf "scale=trunc(iw/2)*2:trunc(ih/2)*2" ''%s_c%04d.mp4''' % (join(res_paths['vblur'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_vblur'), (ishape + 1))
        log_message("Generating vblur video (%s)" % cmd_ffmpeg_vblur)
        os.system(cmd_ffmpeg_vblur)
   
    if(output_types['fg']):
        cmd_ffmpeg_fg = 'ffmpeg -y -r 30 -i ''%s'' -c:v h264 -pix_fmt yuv420p -crf 23 ''%s_c%04d.mp4''' % (join(res_paths['fg'], 'Image%04d.png'), join(output_path, name.replace(' ', '')+'_fg'), (ishape + 1))
        log_message("Generating fg video (%s)" % cmd_ffmpeg_fg)
        os.system(cmd_ffmpeg_fg)
   
    cmd_tar = 'tar -czvf %s/%s.tar.gz -C %s %s' % (output_path, rgb_dirname, tmp_path, rgb_dirname)
    log_message("Tarballing the images (%s)" % cmd_tar)
    os.system(cmd_tar)
    
    # save annotation excluding png/exr data to _info.mat file
    import scipy.io
    scipy.io.savemat(matfile_info, dict_info, do_compression=True)