def resize_one_img_xml(save_dir, resize_ratio, img_xml): """将一张训练图片进行 resize""" # 解析读到的数据 img_path, xml_path = img_xml # a = DeteRes(xml_path) # if (not os.path.exists(img_path)) or (not os.path.exists(xml_path)): return # if len(a) < 1: return # im = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1) im_height, im_width = im.shape[:2] im_height_new, im_width_new = int(im_height * resize_ratio), int(im_width * resize_ratio) im_new = cv2.resize(im, (im_width_new, im_height_new)) # # a.height = im_height_new # a.width = im_width_new # a.img_path = # 将每一个 obj 进行 resize for each_obj in a: each_obj.x1 = max(1, int(each_obj.x1 * resize_ratio)) each_obj.x2 = min(im_width_new - 1, int(each_obj.x2 * resize_ratio)) each_obj.y1 = max(1, int(each_obj.y1 * resize_ratio)) each_obj.y2 = min(im_height_new - 1, int(each_obj.y2 * resize_ratio)) # 保存 img save_img_path = os.path.join(save_dir, 'JPEGImages', FileOperationUtil.bang_path(xml_path)[1] + '.jpg') cv2.imwrite(save_img_path, im_new) # 保存 xml a.img_path = save_img_path save_xml_path = os.path.join(save_dir, 'Annotations', FileOperationUtil.bang_path(xml_path)[1] + '.xml') a.save_to_xml(save_xml_path)
def resize_train_data(img_dir, xml_dir, save_dir, resize_ratio=0.5): """对训练数据进行resize,resize img 和 xml """ save_img_dir = os.path.join(save_dir, 'JPEGImages') save_xml_dir = os.path.join(save_dir, 'Annotations') os.makedirs(save_xml_dir, exist_ok=True) os.makedirs(save_img_dir, exist_ok=True) index = 0 for each_xml_path in FileOperationUtil.re_all_file(xml_dir, endswitch=['.xml']): print(index, each_xml_path) index += 1 each_img_path = os.path.join(img_dir, FileOperationUtil.bang_path(each_xml_path)[1] + '.jpg') resize_one_img_xml(save_dir, resize_ratio, (each_img_path, each_xml_path))
# # 关键点检测 # # 模型的下载路径:http://dlib.net/files/ # predictor = dlib.shape_predictor(r'C:\Users\14271\Desktop\del\shape_predictor_68_face_landmarks.dat') # # for det in dets: # shape = predictor(img, det) # print(shape.parts()) # # # 人脸对齐 # my_img = dlib.get_face_chip(img, shape, size=150) # # plt.imshow(my_img) # plt.show() # img_dir = r"/home/ldq/20220112_img_from_iphone/img" save_dir = r"/home/ldq/20220112_img_from_iphone/xml" detector = dlib.get_frontal_face_detector() for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): print(each_img_path) each_dete_res = DeteRes(assign_img_path=each_img_path) img = cv2.imread(each_img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) dets = detector(img, 1) for each_shape in dets: each_dete_res.add_obj(x1=int(each_shape.left()), y1=int(each_shape.top()), x2=int(each_shape.right()), y2=int(each_shape.bottom()), tag='face') each_dete_res.save_to_xml(os.path.join(save_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml'))
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os from JoTools.utils.FileOperationUtil import FileOperationUtil img_dir = r"C:\Users\14271\Desktop\for_unet_data" index = 0 for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG']): dir, name, suffix = FileOperationUtil.bang_path(each_img_path) each_json_path = os.path.join(img_dir, name + ".json") if not os.path.exists(each_json_path): index += 1 print(index, each_img_path) print(index, each_json_path) os.remove(each_img_path)
from PIL import Image import os """ * mask 的理想状态是每一个对象用一个不同的 int 值表示出来 * mask 的次理想状态是每一个对象用相同的 int 值表示出来 """ img_dir = r"C:\Users\14271\Desktop\mask_test_res_019\img" mask_dir = r"C:\Users\14271\Desktop\mask_test_res_019\mask" save_dir = r"C:\Users\14271\Desktop\mask_test_res_019\json" for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg']): each_mask_path = os.path.join( mask_dir, FileOperationUtil.bang_path(each_img_path)[1] + '_mask.png') each_save_path = os.path.join( save_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.json') if not os.path.exists(each_mask_path): print("* mask 文件不存在") continue else: print(each_mask_path) a = SegmentRes() a.img_path = each_img_path a.get_segment_obj_from_mask(each_mask_path, each_mask_point_numb=60) a.save_to_josn(each_save_path)
if isinstance(img_mat, str): img_mat = cv2.imdecode(np.fromfile(img_mat, dtype=np.uint8), 1) # rect = four_point_transform(img_mat, np.array(four_points)) # if save_path: # cv2.imwrite(save_path, rect) cv2.imencode('.jpg', rect)[1].tofile(save_path) return rect if __name__ == "__main__": xml_point_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" img_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" save_dir = r"C:\Users\14271\Desktop\jizhuangxiang\crop" for each_json_path in FileOperationUtil.re_all_file(xml_point_dir, endswitch=['.json']): img_path = os.path.join( img_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.png') four_points = JsonUtil.load_data_from_json_file( each_json_path)["shapes"][0]['points'] each_save_path = os.path.join( save_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.jpg') # transform transform_img_with_4_point(img_path, four_points, each_save_path)
import numpy as np from labelme import utils import labelme import cv2 from PIL import Image import os json_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\json" crop_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\crop_box_fix\crop_box" save_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\json_fix" index = 1 for each_json_path in FileOperationUtil.re_all_file(json_dir, endswitch=['.json']): img_name = FileOperationUtil.bang_path(each_json_path)[1] save_json_path = os.path.join(save_dir, img_name + '.json') include_labels = [] a = SegmentRes() a.parse_json_info(json_path=each_json_path, parse_mask=False, parse_img=False) # 找到未被删除的 crop for i in range(len(a)): each_label = "test{0}".format(i + 1) each_crop_path = os.path.join( crop_dir, "{0}_{1}.jpg".format(img_name, each_label)) if os.path.exists(each_crop_path): include_labels.append(each_label)
region_img_dir = r"C:\data\fzc_优化相关资料\dataset_fzc\000_train_data_step_1\JPEGImages" region_xml_dir = r"C:\data\fzc_优化相关资料\dataset_fzc\000_train_data_step_1\Annotations" new_img_dir = r"C:\Users\14271\Desktop\wuhan_006_fzc\JPEGImages" new_xml_dir = r"C:\Users\14271\Desktop\wuhan_006_fzc\Annotations" new_xml_path_list = [] new_img_path_list = [] index = 0 for each_img_path in FileOperationUtil.re_all_file( new_img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): # print(index, each_img_path) index += 1 each_xml_path = os.path.join( new_xml_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml') # each_new_img_path = os.path.join(region_img_dir, os.path.split(each_img_path)[1]) each_new_xml_path = os.path.join(region_xml_dir, os.path.split(each_xml_path)[1]) if os.path.exists(each_new_img_path): print("* img path exists : {0}".format(each_new_img_path)) continue if os.path.exists(each_new_xml_path): print("* xml path exists : {0}".format(each_new_xml_path)) continue new_img_path_list.append(each_img_path)
save_img_dir = os.path.join(save_dir, 'JPEGImages') os.makedirs(save_img_dir, exist_ok=True) save_xml_dir = os.path.join(save_dir, 'Annotations') os.makedirs(save_xml_dir, exist_ok=True) for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): print(each_img_path) each_md5 = HashLibUtil.get_file_md5(each_img_path) img_new_path = os.path.join(save_img_dir, "{0}.jpg".format(each_md5)) xml_new_path = os.path.join(save_xml_dir, "{0}.xml".format(each_md5)) # each_xml_path = os.path.join(xml_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml') # if os.path.exists(each_xml_path): # shutil.copy(each_img_path, img_new_path) # shutil.copy(each_xml_path, xml_new_path) a = DeteRes(each_xml_path) if len(a) < 1: continue shutil.move(each_img_path, img_new_path) shutil.move(each_xml_path, xml_new_path)