def detect_img_folder(img_folder, out_folder, yolo): mkdir_if_not_exist(out_folder) path_list, name_list = traverse_dir_files(img_folder) print_info('图片数: %s' % len(path_list)) _, imgs_names = traverse_dir_files(out_folder) count = 0 for path, name in zip(path_list, name_list): if path.endswith('.gif'): continue out_name = name + '.d.jpg' if out_name in imgs_names: print_info('已检测: %s' % name) continue print_info('检测图片: %s' % name) try: image = Image.open(path) out_file = os.path.join(ROOT_DIR, 'face', 'yolov3', 'output_data', 'logAll_res.txt') r_image = yolo.detect_image(image, ('logAll/' + name), out_file) r_image.save(os.path.join(out_folder, name + '.d.jpg')) except Exception as e: print(e) pass count += 1 if count % 100 == 0: print_info('已检测: %s' % count) yolo.close_session()
def format_img_and_anno(img_folder): """ 格式化输出。图片和标注文件夹 :param img_folder: 图片文件夹 :return: """ file_paths, file_names = traverse_dir_files(img_folder) img_dict = dict() # 将标注和图片路径,生成一个字典 for file_path, file_name in zip(file_paths, file_names): if file_name.endswith('.jpg'): name = file_name.replace('.jpg', '') if name not in img_dict: img_dict[name] = (None, None) (img_p, anno_p) = img_dict[name] img_dict[name] = (file_path, anno_p) if file_name.endswith('.xml'): name = file_name.replace('.xml', '') if name not in img_dict: img_dict[name] = (None, None) (img_p, anno_p) = img_dict[name] img_dict[name] = (img_p, file_path) print_info('图片数: {}'.format(len(img_dict.keys()))) return img_dict
def data_processor_testV3(): dataset_dir = os.path.join(DATASET_DIR, 's2a4zsV4') # person_path = "/Users/wangchenlong/Downloads/seeprettyface_asian_stars" person_path = "/Users/wangchenlong/Downloads/SCUT-FBP5500_v2/Images" paths_list, names_list = traverse_dir_files(person_path) trainA_dir = os.path.join(dataset_dir, 'trainA') testA_dir = os.path.join(dataset_dir, 'testA') mkdir_if_not_exist(trainA_dir) mkdir_if_not_exist(testA_dir) train_size = 5000 test_size = 100 print_size = 100 count = 0 random.shuffle(paths_list) for path in paths_list: img = cv2.imread(path) img = cv2.resize(img, (256, 256)) if count < train_size: file_name = os.path.join(trainA_dir, u"c_{:04d}.jpg".format(count + 1)) cv2.imwrite(file_name, img) else: file_name = os.path.join(testA_dir, u"c_{:04d}.jpg".format(count + 1)) cv2.imwrite(file_name, img) count += 1 if count % print_size == 0: print(u'[Info] run count: {}'.format(count)) if count == train_size + test_size: break print('[Info] 数据处理完成')
def write_frames_to_vid(self, frames_folder, out_vid_path): paths_list, names_list = traverse_dir_files(frames_folder) img_list = [] for path, name in zip(paths_list, names_list): img = cv2.imread(path) img_list.append(img) # fps: 29, h: 1280, w: 720 write_video(out_vid_path, img_list, 29, 1280, 720)
def get_livevqc_index(feature_dir): from utils.project_utils import traverse_dir_files paths_list, names_list = traverse_dir_files(feature_dir, ext='.npy') res_names = set() for name in names_list: res_names.add(name.split('_')[0]) res_names = sorted(list(res_names)) return res_names
def save_video(): img_dir = os.path.join(DATA_DIR, 'frames') paths_list, names_list = traverse_dir_files(img_dir) frame_list = [] for name, path in zip(names_list, paths_list): frame = cv.imread(path) frame_list.append(frame) from_video = "normal_video.mp4" cap, n_frame, fps, h, w = init_vid(from_video) video_path = "norm.out.mp4" write_video(video_path, frame_list, fps, h, w)
def folder_test(): img_dir = os.path.join(IMGS_DIR, 'tests') out_dir = os.path.join(IMGS_DIR, 'tests-out') paths_list, names_list = traverse_dir_files(img_dir) gp = GazePredicter() for img_path, name in zip(paths_list, names_list): print('[Info] 处理图像: {}'.format(name)) face_dict = gp.predict_path(img_path) img_op = face_dict['img_draw'] out_path = os.path.join(out_dir, name + ".out.jpg") cv2.imwrite(out_path, img_op)
def process_dataset(self): """ 处理数据集 """ c_paths_list, c_names_list = traverse_dir_files(self.cartoons_path) p_paths_list, p_names_list = traverse_dir_files(self.persons_path) random.seed(47) random.shuffle(c_paths_list) random.shuffle(p_paths_list) train_size = 1500 # 训练集量 test_size = 100 # 测试集量 print_size = 100 count = 0 train_person_dir = os.path.join(ROOT_DIR, 'dataset', 's2a4zsV1', 'trainA') test_person_dir = os.path.join(ROOT_DIR, 'dataset', 's2a4zsV1', 'testA') mkdir_if_not_exist(train_person_dir) mkdir_if_not_exist(test_person_dir) print('[Info] 真人样本总数: {}'.format(len(p_paths_list))) for p_path in p_paths_list: try: p_img = cv2.imread(p_path) p_img = cv2.resize(p_img, (256, 256)) if count < train_size: p_file_name = os.path.join( train_person_dir, u"p_{:04d}.jpg".format(count + 1)) else: p_file_name = os.path.join( test_person_dir, u"p_{:04d}.jpg".format(count + 1)) cv2.imwrite(p_file_name, p_img) count += 1 except Exception as e: print('[Error] error {}'.format(e)) continue if count % print_size == 0: print(u'[Info] run count: {}'.format(count)) if count == train_size + test_size: break train_cartoon_dir = os.path.join(ROOT_DIR, 'dataset', 's2a4zsV1', 'trainB') test_cartoon_dir = os.path.join(ROOT_DIR, 'dataset', 's2a4zsV1', 'testB') mkdir_if_not_exist(train_cartoon_dir) mkdir_if_not_exist(test_cartoon_dir) count = 0 print('[Info] 卡通样本总数: {}'.format(len(c_paths_list))) for c_path in c_paths_list: try: c_img = cv2.imread(c_path) c_img = cv2.resize(c_img, (256, 256)) if count < train_size: c_file_name = os.path.join( train_cartoon_dir, u"c_{:04d}.jpg".format(count + 1)) cv2.imwrite(c_file_name, c_img) else: c_file_name = os.path.join( test_cartoon_dir, u"c_{:04d}.jpg".format(count + 1)) cv2.imwrite(c_file_name, c_img) count += 1 except Exception as e: print('[Error] error {}'.format(e)) continue if count % print_size == 0: print(u'[Info] run count: {}'.format(count)) if count == train_size + test_size: break print('[Info] 数据处理完成')