def render_from_file(meta_data, max_indx=5): clear_env() car_shape, car_rot, car_trans, cam_location, cam_rotation, img_fov, img_size, kpt_dict = load_meta_data( meta_data) request('vset /camera/1/location {:.6f} {:.6f} {:.6f}'.format( cam_location[0], cam_location[1], cam_location[2])) request('vset /camera/1/rotation {:.6f} {:.6f} {:.6f}'.format( cam_rotation[0], cam_rotation[1], cam_rotation[2])) request('vset /camera/1/fov {:.6f}'.format(img_fov)) car_group = Car_Manager() if max_indx <= 0: max_indx = max_indx + len(car_shape) car_color = [[i, 0, 0] for i in range(len(car_trans))] print(len(car_shape)) for i, shape in enumerate(car_shape): if i >= max_indx: break car_group.add_car(shape) car_group.trans_car(car_trans[i], "car{}".format(i)) #car_group.trans_car([0,0,0], "car{}".format(i)) car_group.rot_car(car_rot[i], "car{}".format(i)) car_group.annotate_car(car_color[i], "car{}".format(i)) car_group.flush() cwd_root, _ = os.path.split(meta_data) base_dir = join(cwd_root, "render_res") if DEBUG: print("Finished write object pose data ") time.sleep(0.1) if DEBUG: print("Acquiring image ...") img = read_png(request('vget /camera/1/lit png')) cv2.imwrite(join(base_dir, "car_arrangment.png"), img[:, :, 2::-1]) mask = read_png(request('vget /camera/1/object_mask png')) cv2.imwrite(join(base_dir, "car_mask.png"), mask[:, :, 2::-1]) png = read_png(request('vget /camera/0/lit png')) cv2.imwrite(join(base_dir, "overview.png"), png[:, :, 2::-1]) for i in range(len(car_color)): obj_mask = udb.get_mask(mask, car_color[i]) [ys, xs] = np.where(obj_mask) bbox = [min(xs), max(xs), min(ys), max(ys)] print(bbox) obj_img = udb.mask_img(img, obj_mask) bbox_img = img[min(ys):max(ys), min(xs):max(xs), :] cv2.imwrite(join(base_dir, "car%d_seg.png" % i), obj_img[:, :, 2::-1]) cv2.imwrite(join(base_dir, "car%d_bbox.png" % i), bbox_img[:, :, 2::-1]) if DEBUG: print("Finished write image to files") return True
def main(args): udb.connect('localhost', 9900) global_animal = args.animal # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') val2017_dir = os.path.abspath(args.random_texture_path) bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) env = udb.CvEnv() output_dir = args.output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) img_idx = 0 for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) sky_texture = random.choice(bg_path_list) floor_texture = random.choice(bg_path_list) animal_texture = random.choice(texture_path_list) # Update the scene env.set_random_light() env.set_floor(floor_texture) env.set_sky(sky_texture) if args.use_random_texture: animal.set_texture(animal_texture) animal.set_animation(anim, ratio) # if global_animal=='horse': # # set different original textures # _, animal_texture = random.choice(list(udb.asset.animal.horse_material.items())) # _, animal_texture_fur = random.choice(list(udb.asset.animal.horse_material.items())) # animal.set_material(0, animal_texture) # animal.set_material(1, animal_texture_fur) # Capture data animal.set_tracking_camera(dist, az, el) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts #TODO get rid of PIL img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] if sum(kpts[kp_18_id, 2]) >= 6: imageio.imwrite(os.path.join(output_dir, filename + '_img.png'), img) imageio.imwrite(os.path.join(output_dir, filename + '_seg.png'), seg_mask) np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) np.save(os.path.join(output_dir, filename + '_kpts.npy'), kpts) np.save(os.path.join(output_dir, filename + '_kpts_z.npy'), kpts_z) img_idx += 1 if img_idx > args.num_imgs - 1: break
def retrieve(animal, num_images, use_random_texture): udb.connect('localhost', 9900) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') random_texture_path = "val2017" # this path needs to be on the server!! val2017_dir = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/" + random_texture_path # os.path.abspath(random_texture_path) beautiful_textures = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/texture_images/" bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) beautiful_textures_path_list = glob_images(beautiful_textures) output_path = f"synthetic_animals_triplet/{animal}/" global_animal = animal render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': # Dog animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) # from here todo! elif global_animal == 'cat': animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='zebra': # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='celtic_wolfhound': # Dog # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='pug': # mops -> dog # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='cane_corso': # a dog # animal.set_mesh(udb.asset.MESH_CAT) elif global_animal == 'scotland_cattle': # a scottish cow animal.set_mesh(udb.asset.MESH_SCOTTLAND_CATTLE) # elif global_animal=='longhorn_cattle': # a cow # animal.set_mesh(udb.asset.MESH_CAT) # elif global_animal=='longhorn_cattle_v2': # a cow # animal.set_mesh(udb.asset.MESH_CAT) env = udb.CvEnv() output_dir = output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) # masked_frames = [] # whitened_frames = [] # frame_names = [] # extracted_kpts = [] p0a0_frame_names = [] p0a1_frame_names = [] p1a1_frame_names = [] p1a0_frame_names = [] p0a0_extracted_kpts = [] p0a1_extracted_kpts = [] p1a1_extracted_kpts = [] p1a0_extracted_kpts = [] p0a0_list_whitened = [] p0a1_list_whitened = [] p1a1_list_whitened = [] p1a0_list_whitened = [] p0a0_list_masked = [] p0a1_list_masked = [] p1a1_list_masked = [] p1a0_list_masked = [] img_idx = 0 sky_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" # random.choice(bg_path_list) floor_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" # random.choice(bg_path_list) # random.choice(texture_path_list) # process_params = random.choices(render_params, k=num_images) random.shuffle(render_params) for i, param in enumerate(tqdm(render_params)): random_animal_texture = random.randint( 0, len(beautiful_textures_path_list) - 1) animal_texture = beautiful_textures_path_list[random_animal_texture] animal.set_texture(animal_texture) mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) p0a0, p0a1, p1a1, p1a0 = False, False, False, False p0a0_tried = False goto_p1a1 = False def check_triplet(): return p0a0 and p0a1 and p1a1 and p1a0 # Update the scene env.set_random_light() break_while = False #print("Here before while.") while not check_triplet(): print("Image idx:", img_idx) appearance_zero = beautiful_textures_path_list[ random_animal_texture] for triplet in ["p0a0", "p0a1", "p1a1", "p1a0"]: print(triplet, p0a0, p0a1, p1a1, p1a0) if triplet == "p0a0": if p0a0_tried and p0a0: goto_p1a1 = True continue p0a0_tried = True elif triplet == "p0a1": if (p0a0_tried and not p0a0): # p0a0 was false so p0a1 will be false as well # we set all of them true to break the while loop p0a0, p0a1, p1a1, p1a0 = True, True, True, True break_while = True #print("Breaking the loop.") break if goto_p1a1: continue # update the appearance but leave the pose as is random_texture = random_animal_texture while random_animal_texture == random_texture: random_texture = random.randint( 0, len(beautiful_textures_path_list) - 1) animal_texture = beautiful_textures_path_list[ random_texture] animal.set_texture(animal_texture) elif triplet == "p1a1": if p1a1: continue if break_while: break # update the pose but leave the appearance as is # print("Setting new pose.") param = random.choice(render_params) mesh, anim, ratio, dist, az, el = param elif triplet == "p1a0": animal.set_texture(appearance_zero) if break_while: break if not p1a1: continue env.set_floor(floor_texture) env.set_sky(sky_texture) animal.set_animation(anim, ratio) # Capture data animal.set_tracking_camera(dist, az, el) shift_camera_animal(global_animal) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] else: print( "WARNING THIS ANIMAL HAS NO CORRECT KEYPOINTS YET - DO NOT USE!!" ) kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] if not sum(kpts[kp_18_id, 2]) >= 4: print(triplet, "Not enough keypoints.") if sum(kpts[kp_18_id, 2]) >= 4: arr = kpts[kp_18_id] # set non visible points to zero arr[arr[:, 2] == 0] = [0, 0, 0] arr = arr[:, :2] # create output folder for images e.g. synthetic_animals/{animal}/{video} sequence_output_dir = output_dir sequence_dir_filename = os.path.join( sequence_output_dir, filename.replace(".png", f"_{triplet}.png")) filename_mask = filename.replace(".png", f"_mask_{triplet}.png") filename_mask_whitened = filename.replace( ".png", f"_mask_white_{triplet}.png") sequence_dir_filename_mask = os.path.join( sequence_output_dir, filename_mask) sequence_dir_filename_mask_whitened = os.path.join( sequence_output_dir, filename_mask_whitened) if not os.path.isdir(sequence_output_dir): os.makedirs(sequence_output_dir) whitened_img = np.array(copy.deepcopy(img)) whitened_img[~mask] = 255 imageio.imwrite(sequence_dir_filename_mask, seg_mask) imageio.imwrite(sequence_dir_filename_mask_whitened, whitened_img) imageio.imwrite(sequence_dir_filename, img) if triplet == "p0a0": p0a0 = True p0a0_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p0a0_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p0a0_frame_names.append(sequence_dir_filename) p0a0_extracted_kpts.append(arr) if triplet == "p0a1": p0a1 = True p0a1_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p0a1_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p0a1_frame_names.append(sequence_dir_filename) p0a1_extracted_kpts.append(arr) if triplet == "p1a1": p1a1 = True p1a1_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p1a1_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p1a1_frame_names.append(sequence_dir_filename) p1a1_extracted_kpts.append(arr) if triplet == "p1a0": p1a0 = True p1a0_list_whitened.append( os.path.join(sequence_output_dir, filename_mask_whitened)) p1a0_list_masked.append( os.path.join(sequence_output_dir, filename_mask)) p1a0_frame_names.append(sequence_dir_filename) p1a0_extracted_kpts.append(arr) img_idx += 1 if img_idx == num_images: # assert len(p0a0_list_whitened) == len(p0a0_list_masked) == len(p0a0_frame_names) == len( # p0a0_extracted_kpts) return p0a0_list_whitened, p0a0_list_masked, p0a0_frame_names, \ np.array(p0a0_extracted_kpts), p0a1_list_whitened, p0a1_list_masked, \ p0a1_frame_names, np.array(p0a1_extracted_kpts), p1a1_list_whitened, p1a1_list_masked, \ p1a1_frame_names, np.array(p1a1_extracted_kpts), p1a0_list_whitened, p1a0_list_masked, \ p1a0_frame_names, np.array(p1a0_extracted_kpts)
def main(): udb.connect('localhost', 9000) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 400 0 300') udb.client.request('vset /camera/0/rotation 0 180 0') val2017_dir = '/data/qiuwch/val2017' bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) render_params = load_render_params() # random.shuffle(render_params) num_img = 0 obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) animal.set_mesh(udb.asset.MESH_TIGER) env = udb.CvEnv() # for delay in range(10): for delay in [0]: output_dir = os.path.join(str(delay), 'generated_data') mask_dir = os.path.join(str(delay), 'masked') if not os.path.isdir(output_dir): os.makedirs(output_dir) if not os.path.isdir(mask_dir): os.makedirs(mask_dir) for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(i, mesh, anim, ratio, dist, az, el) # sky_texture = random.choice(bg_path_list) # floor_texture = random.choice(bg_path_list) # animal_texture = random.choice(texture_path_list) # Update the scene # env.set_floor(floor_texture) # env.set_sky(sky_texture) # animal.set_texture(animal_texture) # this will crash animal.set_animation(anim, ratio) # Capture data animal.set_tracking_camera(dist, az, el) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) obj_img = udb.mask_img(img, mask) imageio.imwrite(os.path.join(output_dir, filename + '_img.png'), img) imageio.imwrite(os.path.join(output_dir, filename + '_seg.png'), seg) np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) imageio.imwrite(os.path.join(mask_dir, filename + '_mask.png'), obj_img)
def render_from_file(meta_data, max_indx=5): clear_env() car_shape, car_rot, car_trans, cam_location, cam_rotation, img_fov, img_size, kpt_dict = load_meta_data( meta_data) request('vset /camera/1/location {:.6f} {:.6f} {:.6f}'.format( cam_location[0], cam_location[1], cam_location[2])) request('vset /camera/1/rotation {:.6f} {:.6f} {:.6f}'.format( cam_rotation[0], cam_rotation[1], cam_rotation[2])) request('vset /camera/1/fov {:.6f}'.format(img_fov)) car_group = Car_Manager() # if max_indx <= 0: # max_indx = max_indx + len(car_shape) #num_obj = min([len(car_trans), max_indx, 2]) num_obj = 2 car_color = [[i, 0, 0] for i in range(num_obj)] shape = car_shape[0] # for i in range(num_obj): # car_group.add_car(shape, shape_lib="ShapenetKeypoint") # #car_group.trans_car(car_trans[i], "car{}".format(i)) # car_group.trans_car([0, 100*i,150*i+100], "car{}".format(i)) # car_group.rot_car([0,0,0], "car{}".format(i)) # car_group.annotate_car(car_color[i], "car{}".format(i)) with open("example/car_activity/render_shape.json", "r") as f: data_render = json.load(f) trans_model = data_render[shape]["trans"] i = 0 car_group.add_car(shape, scale=(-1, 1, 1)) print(trans_model) car_group.trans_car([ trans_model[0] * 500, trans_model[2] * 500, 200 + trans_model[1] * 500 ], "car{}".format(i)) car_group.rot_car([0, 90, 0], "car{}".format(i)) car_group.annotate_car(car_color[i], "car{}".format(i)) i = 1 car_group.add_car(shape, shape_lib="ShapenetKeypoint") car_group.trans_car([0, 0, 200], "car{}".format(i)) car_group.rot_car([0, 0, 0], "car{}".format(i)) car_group.annotate_car(car_color[i], "car{}".format(i)) car_group.flush() cwd_root, _ = os.path.split(meta_data) base_dir = join(cwd_root, "render_res") if DEBUG: print("Finished write object pose data ") time.sleep(0.1) if DEBUG: print("Acquiring image ...") img = read_png(request('vget /camera/1/lit png')) cv2.imwrite(join(base_dir, "car_arrangment.png"), img[:, :, 2::-1]) mask = read_png(request('vget /camera/1/object_mask png')) cv2.imwrite(join(base_dir, "car_mask.png"), mask[:, :, 2::-1]) png = read_png(request('vget /camera/0/lit png')) cv2.imwrite(join(base_dir, "overview.png"), png[:, :, 2::-1]) for i in range(len(car_color)): obj_mask = udb.get_mask(mask, car_color[i]) [ys, xs] = np.where(obj_mask) bbox = [min(xs), max(xs), min(ys), max(ys)] print(bbox) obj_img = udb.mask_img(img, obj_mask) bbox_img = img[min(ys):max(ys), min(xs):max(xs), :] cv2.imwrite(join(base_dir, "car%d_seg.png" % i), obj_img[:, :, 2::-1]) cv2.imwrite(join(base_dir, "car%d_bbox.png" % i), bbox_img[:, :, 2::-1]) if DEBUG: print("Finished write image to files") return True
udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 400 0 300') udb.client.request('vset /camera/0/rotation 0 180 0') obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) animal.set_mesh(udb.asset.MESH_TIGER) animal.set_tracking_camera(350, 0, -5) for i, param in enumerate(tqdm(params)): anim, ratio, key = param animal.set_animation(anim, ratio) # animal.set_tracking_camera(350, 0, -5) # time.sleep(5) print(animal.get_animation_frames(anim)) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) obj_img = udb.mask_img(img, mask) imageio.imwrite('%s_im.png' % key, img) imageio.imwrite('%s_seg.png' % key, seg) np.save('%s_depth.npy' % key, depth) imageio.imwrite('%s_mask.png' % key, obj_img)
def retrieve(animal, num_videos, num_images, use_random_texture): udb.connect('localhost', 9900) # reset the program map_name = 'AnimalDataCapture' udb.client.request('vset /action/game/level {map_name}'.format(**locals())) udb.client.request('vset /camera/0/location 500 0 300') udb.client.request('vset /camera/0/rotation -20 180 0') random_texture_path = "val2017" # this path needs to be on the server!! val2017_dir = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/" + random_texture_path #os.path.abspath(random_texture_path) beautiful_textures = "/export/home/ffeldman/git/Learning-from-Synthetic-Animals/data_generation/texture_images/" bg_path_list = glob_images(val2017_dir) texture_path_list = glob_images(val2017_dir) beautiful_textures_path_list = glob_images(beautiful_textures) output_path = f"synthetic_animals/{animal}/" global_animal = animal render_params = load_render_params(global_animal) random.shuffle(render_params) obj_id = 'tiger' animal = udb.CvAnimal(obj_id) animal.spawn() # acquire offset obj_loc = udb.client.request('vget /object/tiger/location') obj_loc = [float(v) for v in obj_loc.split(' ')] offset = obj_loc[2] r, g, b = 155, 168, 157 animal.set_mask_color(r, g, b) if global_animal == 'tiger': animal.set_mesh(udb.asset.MESH_TIGER) elif global_animal == 'horse': animal.set_mesh(udb.asset.MESH_HORSE) elif global_animal == 'domestic_sheep': animal.set_mesh(udb.asset.MESH_DOMESTIC_SHEEP) elif global_animal == 'hellenic_hound': #Dog animal.set_mesh(udb.asset.MESH_HELLENIC_HOUND) elif global_animal == 'elephant': animal.set_mesh(udb.asset.MESH_ELEPHANT) # from here todo! elif global_animal == 'cat': animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='zebra': # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='celtic_wolfhound': # Dog # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='pug': # mops -> dog # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='cane_corso': # a dog # animal.set_mesh(udb.asset.MESH_CAT) elif global_animal == 'scotland_cattle': # a scottish cow animal.set_mesh(udb.asset.MESH_SCOTTLAND_CATTLE) #elif global_animal=='longhorn_cattle': # a cow # animal.set_mesh(udb.asset.MESH_CAT) #elif global_animal=='longhorn_cattle_v2': # a cow # animal.set_mesh(udb.asset.MESH_CAT) env = udb.CvEnv() output_dir = output_path if not os.path.isdir(output_dir): os.makedirs(output_dir) masked_frames = [] whitened_frames = [] frame_names = [] extracted_kpts = [] fid = [] # fram id for sequence dataset vids = [] # list of video ids vid = -1 # video id for video in range(num_videos): vid += 1 img_idx = 0 sky_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" #random.choice(bg_path_list) floor_texture = "/export/home/ffeldman/Masterarbeit/data/white.jpg" #random.choice(bg_path_list) animal_texture = beautiful_textures_path_list[ video] # random.choice(texture_path_list) if use_random_texture: # Randomly sets the texture for a sequence of images animal.set_texture(animal_texture) #process_params = random.choices(render_params, k=num_images) random.shuffle(render_params) for i, param in enumerate(tqdm(render_params)): mesh, anim, ratio, dist, az, el = param filename = make_filename(img_idx, mesh, anim, ratio, dist, az, el) # Update the scene env.set_random_light() env.set_floor(floor_texture) env.set_sky(sky_texture) animal.set_animation(anim, ratio) # if global_animal=='horse': # # set different original textures # _, animal_texture = random.choice(list(udb.asset.animal.horse_material.items())) # _, animal_texture_fur = random.choice(list(udb.asset.animal.horse_material.items())) # animal.set_material(0, animal_texture) # animal.set_material(1, animal_texture_fur) # Capture data animal.set_tracking_camera(dist, az, el) shift_camera_animal(global_animal) img = animal.get_img() seg = animal.get_seg() depth = animal.get_depth() mask = udb.get_mask(seg, [r, g, b]) # get kpts ## get cam_loc and cam_rot cam_loc, cam_rot = get_camera_params() cam_loc = [float(item) for item in cam_loc.split(' ')] cam_rot = [float(item) for item in cam_rot.split(' ')] ## transform keypoints kp_3d_array = parse_kpts(filename, offset) kpts, kpts_z = transform_kpts(cam_loc, cam_rot, kp_3d_array, depth) ## transform images and kpts img = Image.fromarray(img[:, :, :3]) seg_mask = np.zeros((mask.shape[0], mask.shape[1]), dtype=np.uint8) seg_mask[mask == False] = 0 # tiger/horse seg_mask[mask == True] = 255 # tiger/horse # # save imgs if global_animal == 'tiger': kp_18_id = [ 2679, 2753, 2032, 1451, 1287, 3085, 1632, 229, 1441, 1280, 2201, 1662, 266, 158, 270, 152, 219, 129 ] elif global_animal == 'horse': kp_18_id = [ 1718, 1684, 1271, 1634, 1650, 1643, 1659, 925, 392, 564, 993, 726, 1585, 1556, 427, 1548, 967, 877 ] elif global_animal == 'domestic_sheep': kp_18_id = [ 2046, 1944, 1267, 1875, 1900, 1868, 1894, 687, 173, 1829, 1422, 821, 624, 580, 622, 575, 1370, 716 ] elif global_animal == 'hellenic_hound': kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] elif global_animal == 'elephant': kp_18_id = [ 1980, 2051, 1734, 2122, 2155, 2070, 2166, 681, 923, 1442, 1041, 1528, 78, 599, 25, 595, 171, 570 ] else: print( "WARNING THIS ANIMAL HAS NO CORRECT KEYPOINTS YET - DO NOT USE!!" ) kp_18_id = [ 2028, 2580, 912, 878, 977, 1541, 1734, 480, 799, 1575, 1446, 602, 755, 673, 780, 1580, 466, 631 ] #if sum(kpts[kp_18_id,2]) < 6: # print(kpts[kpts[:,2]>0].shape) # #import pdb # #pdb.set_trace() if sum(kpts[kp_18_id, 2]) >= 4: arr = kpts[kp_18_id] # set non visible points to zero arr[arr[:, 2] == 0] = [0, 0, 0] arr = arr[:, :2] # create output folder for images e.g. synthetic_animals/{animal}/{video} sequence_output_dir = output_dir + str(video) sequence_dir_filename = os.path.join(sequence_output_dir, filename) filename_mask = filename.replace(".png", "_mask.png") filename_mask_whitened = filename.replace( ".png", "_mask_white.png") sequence_dir_filename_mask = os.path.join( sequence_output_dir, filename_mask) sequence_dir_filename_mask_whitened = os.path.join( sequence_output_dir, filename_mask_whitened) if not os.path.isdir(sequence_output_dir): os.makedirs(sequence_output_dir) whitened_img = np.array(copy.deepcopy(img)) whitened_img[~mask] = 255 imageio.imwrite(sequence_dir_filename_mask, seg_mask) imageio.imwrite(sequence_dir_filename_mask_whitened, whitened_img) imageio.imwrite(sequence_dir_filename, img) masked_frames.append( os.path.join(sequence_output_dir, filename_mask)) whitened_frames.append( os.path.join(sequence_output_dir, filename_mask_whitened)) frame_names.append(sequence_dir_filename) extracted_kpts.append(arr) fid.append(img_idx) vids.append(vid) #imageio.imwrite(os.path.join(sequence_output_dir, filename), seg_mask) #np.save(os.path.join(output_dir, filename + '_depth.npy'), depth) #np.save(os.path.join(sequence_output_dir, filename + '_kpts.npy'), arr) #np.save(os.path.join(output_dir, filename + '_kpts_z.npy'), kpts_z) img_idx += 1 if img_idx > num_images - 1: break assert len(frame_names) == len(extracted_kpts) == len(fid) == len(vids) return frame_names, masked_frames, whitened_frames, np.array( extracted_kpts), np.array(fid), np.array(vids)