def recover_original_aligned_filename(input_path): io.log_info("Recovering original aligned filename...") files = [] for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"): filepath = Path(filepath) dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_err(f"{filepath.name} is not a dfl image file") continue files += [[filepath, None, dflimg.get_source_filename(), False]] files_len = len(files) for i in io.progress_bar_generator(range(files_len), "Sorting"): fp, _, sf, converted = files[i] if converted: continue sf_stem = Path(sf).stem files[i][1] = fp.parent / (sf_stem + '_0' + fp.suffix) files[i][3] = True c = 1 for j in range(i + 1, files_len): fp_j, _, sf_j, converted_j = files[j] if converted_j: continue if sf_j == sf: files[j][1] = fp_j.parent / (sf_stem + ('_%d' % (c)) + fp_j.suffix) files[j][3] = True c += 1 for file in io.progress_bar_generator(files, "Renaming", leave=False): fs, _, _, _ = file dst = fs.parent / (fs.stem + '_tmp' + fs.suffix) try: fs.rename(dst) except: io.log_err('fail to rename %s' % (fs.name)) for file in io.progress_bar_generator(files, "Renaming"): fs, fd, _, _ = file fs = fs.parent / (fs.stem + '_tmp' + fs.suffix) try: fs.rename(fd) except: io.log_err('fail to rename %s' % (fs.name))
def save_faceset_metadata_folder(input_path): input_path = Path(input_path) metadata_filepath = input_path / 'meta.dat' io.log_info(f"Saving metadata to {str(metadata_filepath)}\r\n") d = {} for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"): filepath = Path(filepath) dflimg = DFLIMG.load(filepath) dfl_dict = dflimg.getDFLDictData() d[filepath.name] = (dflimg.get_shape(), dfl_dict) try: with open(metadata_filepath, "wb") as f: f.write(pickle.dumps(d)) except: raise Exception('cannot save %s' % (filename)) io.log_info("Now you can edit images.") io.log_info("!!! Keep same filenames in the folder.") io.log_info( "You can change size of images, restoring process will downscale back to original size." ) io.log_info("After that, use restore metadata.")
def remove_xseg_labels(input_path): if not input_path.exists(): raise ValueError(f'{input_path} not found. Please ensure it exists.') io.log_info(f'Processing folder {input_path}') io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!') io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!') io.log_info('!!! WARNING : LABELED XSEG POLYGONS WILL BE REMOVED FROM THE FRAMES !!!') io.input_str('Press enter to continue.') images_paths = pathex.get_image_paths(input_path, return_Path_class=True) files_processed = 0 for filepath in io.progress_bar_generator(images_paths, "Processing"): dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_info(f'{filepath} is not a DFLIMG') continue if dflimg.has_seg_ie_polys(): dflimg.set_seg_ie_polys(None) dflimg.save() files_processed += 1 io.log_info(f'Files processed: {files_processed}')
def fetch_xseg(input_path): if not input_path.exists(): raise ValueError(f'{input_path} not found. Please ensure it exists.') output_path = input_path.parent / (input_path.name + '_xseg') output_path.mkdir(exist_ok=True, parents=True) io.log_info(f'Copying faces containing XSeg polygons to {output_path.name}/ folder.') images_paths = pathex.get_image_paths(input_path, return_Path_class=True) files_copied = [] for filepath in io.progress_bar_generator(images_paths, "Processing"): dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_info(f'{filepath} is not a DFLIMG') continue ie_polys = dflimg.get_seg_ie_polys() if ie_polys.has_polys(): files_copied.append(filepath) shutil.copy ( str(filepath), str(output_path / filepath.name) ) io.log_info(f'Files copied: {len(files_copied)}') is_delete = io.input_bool (f"\r\nDelete original files?", True) if is_delete: for filepath in files_copied: Path(filepath).unlink()
def auto_extract_to_img(): workspace = os.path.join(get_root_path(), "workspace") data_dst = None for f in os.listdir(workspace): if f.startswith("data_dst_"): data_dst = f break io.log_info(data_dst) video_name = None if data_dst is not None: name = "_".join(data_dst.split("_")[8:]) print(name) for f in os.listdir(os.path.join(workspace, "../trash_workspace")): if f.startswith(name): video_name = f break io.log_info(video_name) if video_name is not None: video_path = os.path.join(workspace, "../trash_workspace", video_name) data_dst_path = os.path.join(workspace, data_dst) io.log_info(video_path) io.log_info(data_dst_path) for f in io.progress_bar_generator(os.listdir(data_dst_path), "Remove"): if f.endswith(".jpg") or f.endswith(".png"): os.remove(os.path.join(data_dst_path, f)) dfl.dfl_extract_video(video_path, data_dst_path)
def generator(): for sample in io.progress_bar_generator( packed_samples, "Collecting alignments"): filepath = Path(sample.filename) yield filepath, DFLIMG.load( filepath, loader_func=lambda x: sample.read_raw_file())
def sort_by_hist_dissim(input_path): io.log_info("Sorting by histogram dissimilarity...") img_list = [] trash_img_list = [] for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"): filepath = Path(filepath) dflimg = DFLIMG.load(filepath) image = cv2_imread(str(filepath)) if dflimg is not None: face_mask = LandmarksProcessor.get_image_hull_mask( image.shape, dflimg.get_landmarks()) image = (image * face_mask).astype(np.uint8) img_list.append([ str(filepath), cv2.calcHist([cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)], [0], None, [256], [0, 256]), 0 ]) img_list = HistDissimSubprocessor(img_list).run() io.log_info("Sorting...") img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True) return img_list, trash_img_list
def extract_vggface2_dataset(input_dir, device_args={} ): multi_gpu = device_args.get('multi_gpu', False) cpu_only = device_args.get('cpu_only', False) input_path = Path(input_dir) if not input_path.exists(): raise ValueError('Input directory not found. Please ensure it exists.') bb_csv = input_path / 'loose_bb_train.csv' if not bb_csv.exists(): raise ValueError('loose_bb_train.csv found. Please ensure it exists.') bb_lines = bb_csv.read_text().split('\n') bb_lines.pop(0) bb_dict = {} for line in bb_lines: name, l, t, w, h = line.split(',') name = name[1:-1] l, t, w, h = [ int(x) for x in (l, t, w, h) ] bb_dict[name] = (l,t,w, h) output_path = input_path.parent / (input_path.name + '_out') dir_names = pathex.get_all_dir_names(input_path) if not output_path.exists(): output_path.mkdir(parents=True, exist_ok=True) data = [] for dir_name in io.progress_bar_generator(dir_names, "Collecting"): cur_input_path = input_path / dir_name cur_output_path = output_path / dir_name if not cur_output_path.exists(): cur_output_path.mkdir(parents=True, exist_ok=True) input_path_image_paths = pathex.get_image_paths(cur_input_path) for filename in input_path_image_paths: filename_path = Path(filename) name = filename_path.parent.name + '/' + filename_path.stem if name not in bb_dict: continue l,t,w,h = bb_dict[name] if min(w,h) < 128: continue data += [ ExtractSubprocessor.Data(filename=filename,rects=[ (l,t,l+w,t+h) ], landmarks_accurate=False, force_output_path=cur_output_path ) ] face_type = FaceType.fromString('full_face') io.log_info ('Performing 2nd pass...') data = ExtractSubprocessor (data, 'landmarks', 256, face_type, debug_dir=None, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False).run() io.log_info ('Performing 3rd pass...') ExtractSubprocessor (data, 'final', 256, face_type, debug_dir=None, multi_gpu=multi_gpu, cpu_only=cpu_only, manual=False, final_output_path=None).run()
def add_landmarks_debug_images(input_path): io.log_info ("Adding landmarks debug images...") for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"): filepath = Path(filepath) img = cv2_imread(str(filepath)) dflimg = DFLIMG.load (filepath) if dflimg is None or not dflimg.has_data(): io.log_err (f"{filepath.name} is not a dfl image file") continue if img is not None: face_landmarks = dflimg.get_landmarks() face_type = FaceType.fromString ( dflimg.get_face_type() ) if face_type == FaceType.MARK_ONLY: rect = dflimg.get_source_rect() LandmarksProcessor.draw_rect_landmarks(img, rect, face_landmarks, FaceType.FULL ) else: LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True ) output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg') cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50] )
def save_faceset_metadata_folder(input_path): input_path = Path(input_path) metadata_filepath = input_path / 'meta.dat' io.log_info(f"保存附加信息到 {str(metadata_filepath)}\r\n") d = {} for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"): filepath = Path(filepath) dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_info(f"{filepath} is not a dfl image file") continue dfl_dict = dflimg.get_dict() d[filepath.name] = (dflimg.get_shape(), dfl_dict) try: with open(metadata_filepath, "wb") as f: f.write(pickle.dumps(d)) except: raise Exception('cannot save %s' % (filename)) io.log_info("现在你可以修改图片了.") io.log_info("!!!但是不要文件的名字") io.log_info("你改变图片大小后, 恢复过程将缩小到原始大小。") io.log_info("之后使用 restore metadata脚本")
def sort_by_face_source_rect_size(input_path): io.log_info("Sorting by face rect size...") img_list = [] trash_img_list = [] for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"): filepath = Path(filepath) dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_err(f"{filepath.name} is not a dfl image file") trash_img_list.append([str(filepath)]) continue source_rect = dflimg.get_source_rect() rect_area = mathlib.polygon_area( np.array(source_rect[[0, 2, 2, 0]]).astype(np.float32), np.array(source_rect[[1, 1, 3, 3]]).astype(np.float32)) img_list.append([str(filepath), rect_area]) io.log_info("Sorting...") img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True) return img_list, trash_img_list
def add_landmarks_debug_images(input_path): io.log_info("Adding landmarks debug images...") for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Processing"): filepath = Path(filepath) img = cv2_imread(str(filepath)) dflimg = DFLIMG.load(filepath) if dflimg is None: io.log_err("%s is not a dfl image file" % (filepath.name)) continue if img is not None: face_landmarks = dflimg.get_landmarks() LandmarksProcessor.draw_landmarks(img, face_landmarks, transparent_mask=True, ie_polys=IEPolys.load( dflimg.get_ie_polys())) output_file = '{}{}'.format( str(Path(str(input_path)) / filepath.stem), '_debug.jpg') cv2_imwrite(output_file, img, [int(cv2.IMWRITE_JPEG_QUALITY), 50])
def load(sample_type, samples_path, subdirs=False): """ Return MPSharedList of samples """ samples_cache = SampleLoader.samples_cache if str(samples_path) not in samples_cache.keys(): samples_cache[str(samples_path)] = [None]*SampleType.QTY samples = samples_cache[str(samples_path)] if sample_type == SampleType.IMAGE: if samples[sample_type] is None: samples[sample_type] = [ Sample(filename=filename) for filename in io.progress_bar_generator( pathex.get_image_paths(samples_path, subdirs=subdirs), "Loading") ] elif sample_type == SampleType.FACE: if samples[sample_type] is None: try: result = samplelib.PackedFaceset.load(samples_path) except: io.log_err(f"Error occured while loading samplelib.PackedFaceset.load {str(samples_dat_path)}, {traceback.format_exc()}") if result is not None: io.log_info (f"Loaded {len(result)} packed faces from {samples_path}") if result is None: result = SampleLoader.load_face_samples( pathex.get_image_paths(samples_path, subdirs=subdirs) ) samples[sample_type] = MPSharedList(result) elif sample_type == SampleType.FACE_TEMPORAL_SORTED: result = SampleLoader.load (SampleType.FACE, samples_path) result = SampleLoader.upgradeToFaceTemporalSortedSamples(result) samples[sample_type] = MPSharedList(result) return samples[sample_type]
def apply_celebamaskhq(input_dir ): input_path = Path(input_dir) img_path = input_path / 'aligned' mask_path = input_path / 'mask' if not img_path.exists(): raise ValueError(f'{str(img_path)} directory not found. Please ensure it exists.') CelebAMASKHQSubprocessor(pathex.get_image_paths(img_path), pathex.get_image_paths(mask_path, subdirs=True) ).run() return paths_to_extract = [] for filename in io.progress_bar_generator(pathex.get_image_paths(img_path), desc="Processing"): filepath = Path(filename) dflimg = DFLIMG.load(filepath) if dflimg is not None: paths_to_extract.append (filepath) image_to_face_mat = dflimg.get_image_to_face_mat() src_filename = dflimg.get_source_filename() #img = cv2_imread(filename) h,w,c = dflimg.get_shape() fanseg_mask = LandmarksProcessor.get_image_hull_mask( (h,w,c), dflimg.get_landmarks() ) idx_name = '%.5d' % int(src_filename.split('.')[0]) idx_files = [ x for x in masks_files if idx_name in x ] skin_files = [ x for x in idx_files if 'skin' in x ] eye_glass_files = [ x for x in idx_files if 'eye_g' in x ] for files, is_invert in [ (skin_files,False), (eye_glass_files,True) ]: if len(files) > 0: mask = cv2_imread(files[0]) mask = mask[...,0] mask[mask == 255] = 1 mask = mask.astype(np.float32) mask = cv2.resize(mask, (1024,1024) ) mask = cv2.warpAffine(mask, image_to_face_mat, (w, h), cv2.INTER_LANCZOS4) if not is_invert: fanseg_mask *= mask[...,None] else: fanseg_mask *= (1-mask[...,None]) #cv2.imshow("", (fanseg_mask*255).astype(np.uint8) ) #cv2.waitKey(0) dflimg.embed_and_set (filename, fanseg_mask=fanseg_mask)
def convert_png_to_jpg_folder(input_path): input_path = Path(input_path) io.log_info("Converting PNG to JPG...\r\n") for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Converting"): filepath = Path(filepath) convert_png_to_jpg_file(filepath)
def remove_ie_polys_folder(input_path): input_path = Path(input_path) io.log_info("Removing ie_polys...\r\n") for filepath in io.progress_bar_generator( pathex.get_image_paths(input_path), "Removing"): filepath = Path(filepath) remove_ie_polys_file(filepath)
def dev_resave_pngs(input_dir): input_path = Path(input_dir) if not input_path.exists(): raise ValueError('input_dir not found. Please ensure it exists.') images_paths = pathex.get_image_paths(input_path, image_extensions=['.png'], subdirs=True, return_Path_class=True) for filepath in io.progress_bar_generator(images_paths,"Processing"): cv2_imwrite(filepath, cv2_imread(filepath))
def final_process(input_path, img_list, trash_img_list): if len(trash_img_list) != 0: parent_input_path = input_path.parent trash_path = parent_input_path / (input_path.stem + '_trash') trash_path.mkdir(exist_ok=True) io.log_info("Trashing %d items to %s" % (len(trash_img_list), str(trash_path))) for filename in pathex.get_image_paths(trash_path): Path(filename).unlink() for i in io.progress_bar_generator(range(len(trash_img_list)), "Moving trash", leave=False): src = Path(trash_img_list[i][0]) dst = trash_path / src.name try: src.rename(dst) except: io.log_info('fail to trashing %s' % (src.name)) io.log_info("") if len(img_list) != 0: for i in io.progress_bar_generator([*range(len(img_list))], "Renaming", leave=False): src = Path(img_list[i][0]) dst = input_path / ('%.5d_%s' % (i, src.name)) try: src.rename(dst) except: io.log_info('fail to rename %s' % (src.name)) for i in io.progress_bar_generator([*range(len(img_list))], "Renaming"): src = Path(img_list[i][0]) src = input_path / ('%.5d_%s' % (i, src.name)) dst = input_path / ('%.5d%s' % (i, src.suffix)) try: src.rename(dst) except: io.log_info('fail to rename %s' % (src.name))
def merge(input_dir): input_path = Path(input_dir) if not input_path.exists(): raise ValueError('input_dir not found. Please ensure it exists.') images_paths = pathex.get_image_paths(input_path, return_Path_class=True) images_processed = 0 for filepath in io.progress_bar_generator(images_paths, "Processing"): json_filepath = filepath.parent / (filepath.stem + '.json') if json_filepath.exists(): dflimg = DFLIMG.load(filepath) if dflimg is not None and dflimg.has_data(): try: json_dict = json.loads(json_filepath.read_text()) seg_ie_polys = IEPolys() total_points = 0 #include polys first for shape in json_dict['shapes']: if shape['shape_type'] == 'polygon' and \ shape['label'] != '0': seg_ie_poly = seg_ie_polys.add(1) for x, y in shape['points']: seg_ie_poly.add(int(x), int(y)) total_points += 1 #exclude polys for shape in json_dict['shapes']: if shape['shape_type'] == 'polygon' and \ shape['label'] == '0': seg_ie_poly = seg_ie_polys.add(0) for x, y in shape['points']: seg_ie_poly.add(int(x), int(y)) total_points += 1 if total_points == 0: io.log_info( f"No points found in {json_filepath}, skipping.") continue dflimg.set_seg_ie_polys(seg_ie_polys.dump()) dflimg.save() json_filepath.unlink() images_processed += 1 except: io.log_err(f"err {filepath}, {traceback.format_exc()}") return io.log_info(f"Images processed: {images_processed}")
def sort_by_black(input_path): io.log_info ("Sorting by amount of black pixels...") img_list = [] for x in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading"): img = cv2_imread(x) img_list.append ([x, img[(img == 0)].size ]) io.log_info ("Sorting...") img_list = sorted(img_list, key=operator.itemgetter(1), reverse=False) return img_list, []
def sort_by_hue(input_path): io.log_info("Sorting by hue...") img_list = [[ x, np.mean( cv2.cvtColor(cv2_imread(x), cv2.COLOR_BGR2HSV)[..., 0].flatten()) ] for x in io.progress_bar_generator( pathex.get_image_paths(input_path), "Loading")] io.log_info("Sorting...") img_list = sorted(img_list, key=operator.itemgetter(1), reverse=True) return img_list, []
def sync_trash(trash_path, pool_path): import shutil count = 0 for f in io.progress_bar_generator(os.listdir(trash_path), "Trash Files"): if f.endswith(".jpg") or f.endswith(".png"): img_name = f.split("_")[-1] img_path = os.path.join(pool_path, img_name) dst_path = os.path.join(trash_path, "_origin") if os.path.exists(img_path): shutil.move(img_path, dst_path) count += 1 io.log_info("Trash %d" % count)
def get_pitch_yaw_roll(input_path, r=0.05): import os import numpy as np import cv2 from shutil import copyfile from pathlib import Path from utils import Path_utils from utils.DFLPNG import DFLPNG from utils.DFLJPG import DFLJPG from facelib import LandmarksProcessor from joblib import Subprocessor import multiprocessing from imagelib import estimate_sharpness io.log_info("Sorting by face yaw...") img_list = [] trash_img_list = [] for filepath in io.progress_bar_generator( Path_utils.get_image_paths(input_path), "Loading"): filepath = Path(filepath) if filepath.suffix == '.png': dflimg = DFLPNG.load(str(filepath)) elif filepath.suffix == '.jpg': dflimg = DFLJPG.load(str(filepath)) else: dflimg = None if dflimg is None: io.log_err("%s is not a dfl image file" % (filepath.name)) trash_img_list.append([str(filepath)]) continue pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll( dflimg.get_landmarks()) img_list.append([str(filepath), pitch, yaw, roll]) img_list.sort(key=lambda item: item[1]) with open(os.path.join(input_path, "_pitch_yaw_roll.csv"), "w") as f: for i in img_list: f.write("%s,%f,%f,%f\n" % (os.path.basename(i[0]), i[1], i[2], i[3])) import cv width = 800 img = cv.cv_new((width, width)) xs = [i[1] for i in img_list] ys = [i[2] for i in img_list] cs = [(128, 128, 128)] * len(xs) rs = [int(r * width / 2)] * len(xs) cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs) cs = [(0xcc, 0x66, 0x33)] * len(xs) rs = [2] * len(xs) cv.cv_scatter(img, xs, ys, [-1, 1], [-1, 1], cs, rs) cv.cv_save(img, os.path.join(input_path, "_pitch_yaw_roll.bmp")) return img_list
def select(exists_path, pool_path, div=200): # 先计算output_path的已有图像 import cv import dfl import random width = 800 trans = cv.trans_fn(-1, 1, 0, width) img = cv.cv_new((width, width)) for f in io.progress_bar_generator(os.listdir(exists_path), "Existing Imgs"): if f.endswith(".png") or f.endswith("jpg"): img_path = os.path.join(exists_path, f) dfl_img = dfl.dfl_img_load(img_path) pitch, yaw, _ = dfl.dfl_estimate_pitch_yaw_roll(dfl_img) pitch = trans(pitch) yaw = trans(yaw) cv.cv_circle(img, (pitch, yaw), (128, 128, 128), width / div, -1) time_str = get_time_str() import shutil pool_files = list(os.listdir(pool_path)) # random.shuffle(pool_files) count = 0 for f in io.progress_bar_generator(pool_files, os.path.basename(pool_path)): if f.endswith(".png") or f.endswith(".jpg"): img_path = os.path.join(pool_path, f) dfl_img = dfl.dfl_img_load(img_path) pitch, yaw, _ = dfl.dfl_estimate_pitch_yaw_roll(dfl_img) pitch = trans(pitch) yaw = trans(yaw) if sum(img[yaw][pitch]) == 255 * 3: dst = os.path.join(exists_path, "%s_%s" % (time_str, f)) shutil.copy(img_path, dst) count += 1 cv.cv_circle(img, (pitch, yaw), (0xcc, 0x66, 0x33), width / div, -1) cv.cv_save(img, os.path.join(exists_path, "_select.bmp")) io.log_info("Copy %d, Total %d" % (count, len(pool_files)))
def dev_test1(input_dir): input_path = Path(input_dir) dir_names = pathex.get_all_dir_names(input_path) for dir_name in io.progress_bar_generator(dir_names, desc="Processing"): img_paths = pathex.get_image_paths(input_path / dir_name) for filename in img_paths: filepath = Path(filename) dflimg = DFLIMG.x(filepath) if dflimg is None: raise ValueError
def remove_xseg(input_path): if not input_path.exists(): raise ValueError(f'{input_path} not found. Please ensure it exists.') images_paths = pathex.get_image_paths(input_path, return_Path_class=True) for filepath in io.progress_bar_generator(images_paths, "Processing"): dflimg = DFLIMG.load(filepath) if dflimg is None or not dflimg.has_data(): io.log_info(f'{filepath} is not a DFLIMG') continue dflimg.set_xseg_mask(None) dflimg.save()
def merge(input_path, target_path): import os import shutil for f in os.listdir(input_path): sub_path = os.path.join(input_path, f) if os.path.abspath(sub_path) == os.path.abspath(target_path): continue if os.path.isdir(sub_path): time_str = get_time_str() for img in io.progress_bar_generator(os.listdir(sub_path), f): if img.endswith(".png") or img.endswith(".jpg"): img_path = os.path.join(sub_path, img) dst_path = os.path.join(target_path, "%s_%s" % (time_str, img)) shutil.move(img_path, dst_path)
def reload_src(): nonlocal src_img_list nonlocal src_cur_list src_img_list = [] if src_path: for f in io.progress_bar_generator(os.listdir(src_path), "Loading"): if f.endswith(".jpg") or f.endswith(".png"): fpath = os.path.join(src_path, f) dfl_img = dfl.dfl_img_load(fpath) p, y, _ = dfl.dfl_estimate_pitch_yaw_roll(dfl_img) src_img_list.append([fno, p, y]) src_img_list.append([fno, p, -y]) src_img_list = np.array(src_img_list, "float") src_cur_list = src_img_list
def split(input_path, target_path, batch=3000): import os import shutil count = 0 if not os.path.exists(target_path): os.mkdir(target_path) dst_dir = os.path.join(target_path, "split_%03d" % int(count / batch)) for f in io.progress_bar_generator(os.listdir(input_path), "Process"): if not f.endswith(".jpg") and not f.endswith(".png"): continue if count % batch == 0: dst_dir = os.path.join(target_path, "split_%03d" % int(count / batch)) os.mkdir(dst_dir) src = os.path.join(input_path, f) shutil.move(src, dst_dir) count += 1
def split(input_dir ): input_path = Path(input_dir) if not input_path.exists(): raise ValueError('input_dir not found. Please ensure it exists.') images_paths = pathex.get_image_paths(input_path, return_Path_class=True) images_processed = 0 for filepath in io.progress_bar_generator(images_paths, "Processing"): json_filepath = filepath.parent / (filepath.stem+'.json') dflimg = DFLIMG.load(filepath) if dflimg is not None: try: seg_ie_polys = dflimg.get_seg_ie_polys() if seg_ie_polys is not None: json_dict = {} json_dict['version'] = "4.2.9" json_dict['flags'] = {} json_dict['shapes'] = [] json_dict['imagePath'] = filepath.name json_dict['imageData'] = None for poly_type, points_list in seg_ie_polys: shape_dict = {} shape_dict['label'] = str(poly_type) shape_dict['points'] = points_list shape_dict['group_id'] = None shape_dict['shape_type'] = 'polygon' shape_dict['flags'] = {} json_dict['shapes'].append( shape_dict ) json_filepath.write_text( json.dumps (json_dict,indent=4) ) dflimg.remove_seg_ie_polys() dflimg.embed_and_set (filepath) images_processed += 1 except: io.log_err(f"err {filepath}, {traceback.format_exc()}") return io.log_info(f"Images processed: {images_processed}")