def main(): udb.connect('localhost', 9000) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) output_dir = os.path.abspath('./generated_data') mask_dir = os.path.abspath('./masked') if not os.path.isdir(output_dir): os.mkdir(output_dir) if not os.path.isdir(mask_dir): os.mkdir(mask_dir) render_params = load_render_params() obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) param = render_params[0] animal.set_mesh(param[0]) env = udb.CvEnv() for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = '%08d' % i animal.set_animation(anim, ratio) animal.set_tracking_camera(dist, az, el) img = animal.get_img() imageio.imwrite(os.path.join(output_dir, filename + '_img.png'), img)
def main(): udb.connect('localhost', 9000) # reset the program map_name = 'EmptyPlane' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 400 0 300') udb.client.request('vset /camera/0/rotation 0 180 0') human_mask_color = [0, 128, 0] human = CvCharacter('human') human.spawn() human.set_mesh(MESH.MAN) print(udb.client.request('vget /objects')) car = CvCar('car') car.spawn() car_type = 'suv' car.set_mesh(car_type) dist = 350 az = 0 el = -5 human.set_tracking_camera(dist, az, el) img = human.get_img() seg = human.get_seg() act_opt = anim_paths[0] frame_count = 64 frames = make_frames(act_opt, frame_count, car_type) for frame_id, frame in enumerate(tqdm(frames)): act_label = frame['act_label'] door_angles = frame['car']['door_angles'] animation_path = frame['human']['animation_path'] ratio = frame['human']['ratio'] human_pose = frame['human']['pose'] # location, rotation mesh_path = frame['human']['mesh_path'] x, y, z, pitch, yaw, roll = human_pose fl, fr, bl, br, hood, trunk = door_angles car.set_part_angles(fl, fr, bl, br, hood, trunk) # Set car and human location using asset data human.set_animation(animation_path, ratio) human.set_loc(x, y, z) human.set_rot(pitch, yaw, roll) # Get 3d bounding box of an object. res = udb.client.request('vget /object/human/bounds') print(res) res = udb.client.request('vget /object/car/bounds') print(res) cv2.imwrite('bb.png', img[:, :, 2::-1])
def main(args): udb.connect('localhost', 9000) # reset the program map_name = 'EmptyPlane' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) out_path = args.out_path if not os.path.exists(out_path): os.makedirs(out_path) obj_id = 'tiger' animal = udb.CvCharacter(obj_id) animal.spawn() env = udb.CvEnv() SKEL_GIRL = "Skeleton'/Game/Girl_01/meshes/girl_01_Skeleton.girl_01_Skeleton'" SKEL_RP = "Skeleton'/Game/RP_Character/00_rp_master/UE4_Mannequin_Skeleton.UE4_Mannequin_Skeleton'" skel_Girl = skeleton_mapping[SKEL_GIRL] skel_RP = skeleton_mapping[SKEL_RP] rp_mesh = skel_RP rp_anim = [ACT_SKEL_RP[0]] rp_pack = dict(mesh=rp_mesh, anim=rp_anim) girl_mesh = [skel_Girl[3], skel_Girl[4], skel_Girl[5], skel_Girl[7]] girl_anim = [ACT_SKEL_GIRL[0]] girl_pack = dict(mesh=girl_mesh, anim=girl_anim) if args.char == 'Girl': character_pack = girl_pack elif args.char == 'RP': character_pack = rp_pack var = args.var assert var in ['az', 'el', 'dist'] if var == 'az': render_params = azimuth_render_params(character_pack) azimuth_load_data(out_path, animal, render_params) elif var == 'el': render_params = elevation_render_params(character_pack) elevation_load_data(out_path, animal, render_params) elif var == 'dist': render_params = distance_render_params(character_pack) distance_load_data(out_path, animal, render_params)
def main(args): udb.connect('localhost', 9900) global_animal = args.animal # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') val2017_dir = os.path.abspath(args.random_texture_path) bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) env = udb.CvEnv() output_dir = args.output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) img_idx = 0 for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) sky_texture = random.choice(bg_path_list) floor_texture = random.choice(bg_path_list) animal_texture = random.choice(texture_path_list) # Update the scene env.set_random_light() env.set_floor(floor_texture) env.set_sky(sky_texture) if args.use_random_texture: animal.set_texture(animal_texture) animal.set_animation(anim, ratio) # if global_animal=='horse': # # set different original textures # _, animal_texture = random.choice(list(udb.asset.animal.horse_material.items())) # _, animal_texture_fur = random.choice(list(udb.asset.animal.horse_material.items())) # animal.set_material(0, animal_texture) # animal.set_material(1, animal_texture_fur) # Capture data animal.set_tracking_camera(dist, az, el) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts #TODO get rid of PIL img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] if sum(kpts[kp_18_id, 2]) >= 6: imageio.imwrite(os.path.join(output_dir, filename + '_img.png'), img) imageio.imwrite(os.path.join(output_dir, filename + '_seg.png'), seg_mask) np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) np.save(os.path.join(output_dir, filename + '_kpts.npy'), kpts) np.save(os.path.join(output_dir, filename + '_kpts_z.npy'), kpts_z) img_idx += 1 if img_idx > args.num_imgs - 1: break
def retrieve(animal, num_images, use_random_texture): udb.connect('localhost', 9900) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') random_texture_path = "val2017" # this path needs to be on the server!! val2017_dir = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/" + random_texture_path # os.path.abspath(random_texture_path) beautiful_textures = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/texture_images/" bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) beautiful_textures_path_list = glob_images(beautiful_textures) output_path = f"synthetic_animals_triplet/{animal}/" global_animal = animal render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': # Dog animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) # from here todo! elif global_animal == 'cat': animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='zebra': # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='celtic_wolfhound': # Dog # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='pug': # mops -> dog # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='cane_corso': # a dog # animal.set_mesh(udb.asset.MESH_CAT) elif global_animal == 'scotland_cattle': # a scottish cow animal.set_mesh(udb.asset.MESH_SCOTTLAND_CATTLE) # elif global_animal=='longhorn_cattle': # a cow # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='longhorn_cattle_v2': # a cow # animal.set_mesh(udb.asset.MESH_CAT) env = udb.CvEnv() output_dir = output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) # masked_frames = [] # whitened_frames = [] # frame_names = [] # extracted_kpts = [] p0a0_frame_names = [] p0a1_frame_names = [] p1a1_frame_names = [] p1a0_frame_names = [] p0a0_extracted_kpts = [] p0a1_extracted_kpts = [] p1a1_extracted_kpts = [] p1a0_extracted_kpts = [] p0a0_list_whitened = [] p0a1_list_whitened = [] p1a1_list_whitened = [] p1a0_list_whitened = [] p0a0_list_masked = [] p0a1_list_masked = [] p1a1_list_masked = [] p1a0_list_masked = [] img_idx = 0 sky_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" # random.choice(bg_path_list) floor_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" # random.choice(bg_path_list) # random.choice(texture_path_list) # process_params = random.choices(render_params, k=num_images) random.shuffle(render_params) for i, param in enumerate(tqdm(render_params)): random_animal_texture = random.randint( 0, len(beautiful_textures_path_list) - 1) animal_texture = beautiful_textures_path_list[random_animal_texture] animal.set_texture(animal_texture) mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) p0a0, p0a1, p1a1, p1a0 = False, False, False, False p0a0_tried = False goto_p1a1 = False def check_triplet(): return p0a0 and p0a1 and p1a1 and p1a0 # Update the scene env.set_random_light() break_while = False #print("Here before while.") while not check_triplet(): print("Image idx:", img_idx) appearance_zero = beautiful_textures_path_list[ random_animal_texture] for triplet in ["p0a0", "p0a1", "p1a1", "p1a0"]: print(triplet, p0a0, p0a1, p1a1, p1a0) if triplet == "p0a0": if p0a0_tried and p0a0: goto_p1a1 = True continue p0a0_tried = True elif triplet == "p0a1": if (p0a0_tried and not p0a0): # p0a0 was false so p0a1 will be false as well # we set all of them true to break the while loop p0a0, p0a1, p1a1, p1a0 = True, True, True, True break_while = True #print("Breaking the loop.") break if goto_p1a1: continue # update the appearance but leave the pose as is random_texture = random_animal_texture while random_animal_texture == random_texture: random_texture = random.randint( 0, len(beautiful_textures_path_list) - 1) animal_texture = beautiful_textures_path_list[ random_texture] animal.set_texture(animal_texture) elif triplet == "p1a1": if p1a1: continue if break_while: break # update the pose but leave the appearance as is # print("Setting new pose.") param = random.choice(render_params) mesh, anim, ratio, dist, az, el = param elif triplet == "p1a0": animal.set_texture(appearance_zero) if break_while: break if not p1a1: continue env.set_floor(floor_texture) env.set_sky(sky_texture) animal.set_animation(anim, ratio) # Capture data animal.set_tracking_camera(dist, az, el) shift_camera_animal(global_animal) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] else: print( "WARNING THIS ANIMAL HAS NO CORRECT KEYPOINTS YET - DO NOT USE!!" ) kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] if not sum(kpts[kp_18_id, 2]) >= 4: print(triplet, "Not enough keypoints.") if sum(kpts[kp_18_id, 2]) >= 4: arr = kpts[kp_18_id] # set non visible points to zero arr[arr[:, 2] == 0] = [0, 0, 0] arr = arr[:, :2] # create output folder for images e.g. synthetic_animals/{animal}/{video} sequence_output_dir = output_dir sequence_dir_filename = os.path.join( sequence_output_dir, filename.replace(".png", f"_{triplet}.png")) filename_mask = filename.replace(".png", f"_mask_{triplet}.png") filename_mask_whitened = filename.replace( ".png", f"_mask_white_{triplet}.png") sequence_dir_filename_mask = os.path.join( sequence_output_dir, filename_mask) sequence_dir_filename_mask_whitened = os.path.join( sequence_output_dir, filename_mask_whitened) if not os.path.isdir(sequence_output_dir): os.makedirs(sequence_output_dir) whitened_img = np.array(copy.deepcopy(img)) whitened_img[~mask] = 255 imageio.imwrite(sequence_dir_filename_mask, seg_mask) imageio.imwrite(sequence_dir_filename_mask_whitened, whitened_img) imageio.imwrite(sequence_dir_filename, img) if triplet == "p0a0": p0a0 = True p0a0_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p0a0_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p0a0_frame_names.append(sequence_dir_filename) p0a0_extracted_kpts.append(arr) if triplet == "p0a1": p0a1 = True p0a1_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p0a1_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p0a1_frame_names.append(sequence_dir_filename) p0a1_extracted_kpts.append(arr) if triplet == "p1a1": p1a1 = True p1a1_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p1a1_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p1a1_frame_names.append(sequence_dir_filename) p1a1_extracted_kpts.append(arr) if triplet == "p1a0": p1a0 = True p1a0_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p1a0_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p1a0_frame_names.append(sequence_dir_filename) p1a0_extracted_kpts.append(arr) img_idx += 1 if img_idx == num_images: # assert len(p0a0_list_whitened) == len(p0a0_list_masked) == len(p0a0_frame_names) == len( # p0a0_extracted_kpts) return p0a0_list_whitened, p0a0_list_masked, p0a0_frame_names, \ np.array(p0a0_extracted_kpts), p0a1_list_whitened, p0a1_list_masked, \ p0a1_frame_names, np.array(p0a1_extracted_kpts), p1a1_list_whitened, p1a1_list_masked, \ p1a1_frame_names, np.array(p1a1_extracted_kpts), p1a0_list_whitened, p1a0_list_masked, \ p1a0_frame_names, np.array(p1a0_extracted_kpts)
def main(): udb.connect('localhost', 9000) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 400 0 300') udb.client.request('vset /camera/0/rotation 0 180 0') val2017_dir = '/data/qiuwch/val2017' bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) render_params = load_render_params() # random.shuffle(render_params) num_img = 0 obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) animal.set_mesh(udb.asset.MESH_TIGER) env = udb.CvEnv() # for delay in range(10): for delay in [0]: output_dir = os.path.join(str(delay), 'generated_data') mask_dir = os.path.join(str(delay), 'masked') if not os.path.isdir(output_dir): os.makedirs(output_dir) if not os.path.isdir(mask_dir): os.makedirs(mask_dir) for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(i, mesh, anim, ratio, dist, az, el) # sky_texture = random.choice(bg_path_list) # floor_texture = random.choice(bg_path_list) # animal_texture = random.choice(texture_path_list) # Update the scene # env.set_floor(floor_texture) # env.set_sky(sky_texture) # animal.set_texture(animal_texture) # this will crash animal.set_animation(anim, ratio) # Capture data animal.set_tracking_camera(dist, az, el) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) obj_img = udb.mask_img(img, mask) imageio.imwrite(os.path.join(output_dir, filename + '_img.png'), img) imageio.imwrite(os.path.join(output_dir, filename + '_seg.png'), seg) np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) imageio.imwrite(os.path.join(mask_dir, filename + '_mask.png'), obj_img)
], [ "AnimSequence'/Game/Animal_pack_ultra_2/Animations/tiger_death_anim.tiger_death_anim'", 0.4, 'death' ], [ "AnimSequence'/Game/Animal_pack_ultra_2/Animations/tiger_roar_anim.tiger_roar_anim'", 0.4, 'roar' ], [ "AnimSequence'/Game/Animal_pack_ultra_2/Animations/tiger_run_anim.tiger_run_anim'", 0.5, 'run' ], ] udb.connect('localhost', 9090) map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 400 0 300') udb.client.request('vset /camera/0/rotation 0 180 0') obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) animal.set_mesh(udb.asset.MESH_TIGER) animal.set_tracking_camera(350, 0, -5)
def main(): udb.connect('localhost', 9000) opt = ObjectView(dict()) opt.DEBUG = False opt.capture = True env = CvEnv() # human_mask_color = [0, 128, 0] human = CvCharacter('human') human.DEBUG = opt.DEBUG human.spawn() human.set_mesh(MESH.MAN) r, g, b = human_mask_color human.set_mask_color(r, g, b) car_mask_color = [128, 0, 0] car = CvCar('car') car.DEBUG = opt.DEBUG car.spawn() car_type = 'suv' car.set_mesh(car_type) car.set_loc(-200, -200, 30) r, g, b = car_mask_color car.set_mask_color(r, g, b) cam = CvCamera('cam') # traj = pickle.load(open('cam_traj_1004.p','rb')) traj = {0: (183.022, -521.394, 896.025, -60, 500.0, 0.0)} occluder1 = CvCar('occ1') occluder1.spawn() car_type = 'hatchback' occluder1.set_mesh(car_type) occluder1.set_loc(np.random.randint(75, 125), np.random.randint(-250, -150), 30) occluder2 = CvCar('occ2') occluder2.spawn() car_type = 'sedan2door' occluder2.set_mesh(car_type) occluder2.set_loc(np.random.randint(-525, -475), np.random.randint(-250, -150), 30) root_dir = '.' for seq_id, act_opt in enumerate(anim_paths): for cam_id in range(len(traj)): cam = CvCamera('1') cam.DEBUG = True cam.spawn() cam.set_loc(traj[cam_id][0], traj[cam_id][1], traj[cam_id][2]) cam.set_rot(traj[cam_id][3], traj[cam_id][4], traj[cam_id][5]) cam.DEBUG = opt.DEBUG cam.spawn() frame_count = random.randint(32, 64) frames = make_frames(act_opt, frame_count, car_type) frame = frames[0] mesh_path = frame['human']['mesh_path'] human.set_mesh(mesh_path) if act_opt[0] == MESH.GIRL1: # meshes = ["SkeletalMesh'/Game/Girl_01/meshes/girl_01_a.girl_01_a'", "SkeletalMesh'/Game/Girl_01/meshes/girl_01_b.girl_01_b'", \ # "SkeletalMesh'/Game/Girl_01/meshes/girl_01_c.girl_01_c'", "SkeletalMesh'/Game/Girl_01/meshes/girl_01_e.girl_01_e'", \ # "SkeletalMesh'/Game/Girl_01/meshes/girl_01_f.girl_01_f'", "SkeletalMesh'/Game/Girl_01/meshes/girl_01_h.girl_01_h'", \ # "SkeletalMesh'/Game/Girl_01/meshes/girl_01_i.girl_01_i'"] # mesh = random.choice(meshes) # actor_name = self.id # self.request('vset /human/{actor_name}/mesh {mesh}'.format(**locals())) # human.set_girl_random_texture() pass # material_filename = np.random.choice(texture_images) # car.set_texture(material_filename) # material_filename = np.random.choice(texture_images) # occluder1.set_texture(material_filename) # material_filename = np.random.choice(texture_images) # occluder2.set_texture(material_filename) # material_filename = np.random.choice(texture_images) # env.set_floor_texture(material_filename) # flip the action if act_opt[5]: if act_opt[1] in [ ANIM.OpenEnterClose3, ANIM.ManOpenExitClose3, ANIM.ManOpenEnterClose3 ]: # these actions flip by y axis human.set_scale(1, -1, 1) else: # these actions flip by x axis human.set_scale(-1, 1, 1) else: human.set_scale(1, 1, 1) Xmin, Ymin, Xmax, Ymax = None, None, None, None crop_imgs = [] crop_masks = [] for frame_id, frame in enumerate(tqdm(frames)): act_label = frame['act_label'] door_angles = frame['car']['door_angles'] animation_path = frame['human']['animation_path'] ratio = frame['human']['ratio'] human_pose = frame['human']['pose'] # location, rotation mesh_path = frame['human']['mesh_path'] x, y, z, pitch, yaw, roll = human_pose fl, fr, bl, br, hood, trunk = door_angles car.set_part_angles(fl, fr, bl, br, hood, trunk) human.set_animation(animation_path, ratio) human.set_loc(x, y, z) human.set_rot(pitch, yaw, roll) # Get images img = cam.get_rgb() mask = cam.get_mask() bb = get_crop(mask, [human_mask_color, car_mask_color], 0.15) if bb: xmin, ymin, xmax, ymax = bb crop_imgs.append(img[ymin:ymax, xmin:xmax]) crop_masks.append(mask[ymin:ymax, xmin:xmax]) if Xmin == None or Xmin > xmin: Xmin = xmin if Ymin == None or Ymin > ymin: Ymin = ymin if Xmax == None or Xmax < xmax: Xmax = xmax if Ymax == None or Ymax < ymax: Ymax = ymax for frame_id in range(len(crop_imgs)): # img = crop_imgs[frame_id] # mask = crop_masks[frame_id] img = cv2.resize(crop_imgs[frame_id], (Xmax - Xmin, Ymax - Ymin), interpolation=cv2.INTER_CUBIC) mask = cv2.resize(crop_masks[frame_id], (Xmax - Xmin, Ymax - Ymin), interpolation=cv2.INTER_CUBIC) if frame_id / len(frames) < 0.3 and act_label in [ 'OpenExitClose', 'OpenEnterClose' ]: save_info(img, mask, root_dir, 'Opening', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) if frame_id / len(frames) > 0.6 and act_label in [ 'OpenExitClose', 'OpenEnterClose' ]: save_info(img, mask, root_dir, 'Closing', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) if frame_id / len(frames) < 0.5 and act_label == 'Exiting': save_info(img, mask, root_dir, 'Opening', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) if frame_id / len(frames) > 0.5 and act_label == 'Entering': save_info(img, mask, root_dir, 'Closing', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) if act_label == 'OpenExitClose': if frame_id / len(frames) >= 0.3 and frame_id / len( frames) <= 0.6: save_info(img, mask, root_dir, 'Exiting', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) elif act_label == 'OpenEnterClose': if frame_id / len(frames) >= 0.3 and frame_id / len( frames) <= 0.6: save_info(img, mask, root_dir, 'Entering', cam_id, seq_id, frame_id, human_mask_color, car_mask_color) else: save_info(img, mask, root_dir, act_label, cam_id, seq_id, frame_id, human_mask_color, car_mask_color)
def retrieve(animal, num_videos, num_images, use_random_texture): udb.connect('localhost', 9900) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') random_texture_path = "val2017" # this path needs to be on the server!! val2017_dir = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/" + random_texture_path #os.path.abspath(random_texture_path) beautiful_textures = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/texture_images/" bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) beautiful_textures_path_list = glob_images(beautiful_textures) output_path = f"synthetic_animals/{animal}/" global_animal = animal render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': #Dog animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) # from here todo! elif global_animal == 'cat': animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='zebra': # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='celtic_wolfhound': # Dog # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='pug': # mops -> dog # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='cane_corso': # a dog # animal.set_mesh(udb.asset.MESH_CAT) elif global_animal == 'scotland_cattle': # a scottish cow animal.set_mesh(udb.asset.MESH_SCOTTLAND_CATTLE) #elif global_animal=='longhorn_cattle': # a cow # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='longhorn_cattle_v2': # a cow # animal.set_mesh(udb.asset.MESH_CAT) env = udb.CvEnv() output_dir = output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) masked_frames = [] whitened_frames = [] frame_names = [] extracted_kpts = [] fid = [] # fram id for sequence dataset vids = [] # list of video ids vid = -1 # video id for video in range(num_videos): vid += 1 img_idx = 0 sky_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" #random.choice(bg_path_list) floor_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" #random.choice(bg_path_list) animal_texture = beautiful_textures_path_list[ video] # random.choice(texture_path_list) if use_random_texture: # Randomly sets the texture for a sequence of images animal.set_texture(animal_texture) #process_params = random.choices(render_params, k=num_images) random.shuffle(render_params) for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) # Update the scene env.set_random_light() env.set_floor(floor_texture) env.set_sky(sky_texture) animal.set_animation(anim, ratio) # if global_animal=='horse': # # set different original textures # _, animal_texture = random.choice(list(udb.asset.animal.horse_material.items())) # _, animal_texture_fur = random.choice(list(udb.asset.animal.horse_material.items())) # animal.set_material(0, animal_texture) # animal.set_material(1, animal_texture_fur) # Capture data animal.set_tracking_camera(dist, az, el) shift_camera_animal(global_animal) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] else: print( "WARNING THIS ANIMAL HAS NO CORRECT KEYPOINTS YET - DO NOT USE!!" ) kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] #if sum(kpts[kp_18_id,2]) < 6: # print(kpts[kpts[:,2]>0].shape) # #import pdb # #pdb.set_trace() if sum(kpts[kp_18_id, 2]) >= 4: arr = kpts[kp_18_id] # set non visible points to zero arr[arr[:, 2] == 0] = [0, 0, 0] arr = arr[:, :2] # create output folder for images e.g. synthetic_animals/{animal}/{video} sequence_output_dir = output_dir + str(video) sequence_dir_filename = os.path.join(sequence_output_dir, filename) filename_mask = filename.replace(".png", "_mask.png") filename_mask_whitened = filename.replace( ".png", "_mask_white.png") sequence_dir_filename_mask = os.path.join( sequence_output_dir, filename_mask) sequence_dir_filename_mask_whitened = os.path.join( sequence_output_dir, filename_mask_whitened) if not os.path.isdir(sequence_output_dir): os.makedirs(sequence_output_dir) whitened_img = np.array(copy.deepcopy(img)) whitened_img[~mask] = 255 imageio.imwrite(sequence_dir_filename_mask, seg_mask) imageio.imwrite(sequence_dir_filename_mask_whitened, whitened_img) imageio.imwrite(sequence_dir_filename, img) masked_frames.append( os.path.join(sequence_output_dir, filename_mask)) whitened_frames.append( os.path.join(sequence_output_dir, filename_mask_whitened)) frame_names.append(sequence_dir_filename) extracted_kpts.append(arr) fid.append(img_idx) vids.append(vid) #imageio.imwrite(os.path.join(sequence_output_dir, filename), seg_mask) #np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) #np.save(os.path.join(sequence_output_dir, filename + '_kpts.npy'), arr) #np.save(os.path.join(output_dir, filename + '_kpts_z.npy'), kpts_z) img_idx += 1 if img_idx > num_images - 1: break assert len(frame_names) == len(extracted_kpts) == len(fid) == len(vids) return frame_names, masked_frames, whitened_frames, np.array( extracted_kpts), np.array(fid), np.array(vids)