def resize_one_img_xml(save_dir, resize_ratio, img_xml): """将一张训练图片进行 resize""" # 解析读到的数据 img_path, xml_path = img_xml # a = DeteRes(xml_path) # if (not os.path.exists(img_path)) or (not os.path.exists(xml_path)): return # if len(a) < 1: return # im = cv2.imdecode(np.fromfile(img_path, dtype=np.uint8), 1) im_height, im_width = im.shape[:2] im_height_new, im_width_new = int(im_height * resize_ratio), int(im_width * resize_ratio) im_new = cv2.resize(im, (im_width_new, im_height_new)) # # a.height = im_height_new # a.width = im_width_new # a.img_path = # 将每一个 obj 进行 resize for each_obj in a: each_obj.x1 = max(1, int(each_obj.x1 * resize_ratio)) each_obj.x2 = min(im_width_new - 1, int(each_obj.x2 * resize_ratio)) each_obj.y1 = max(1, int(each_obj.y1 * resize_ratio)) each_obj.y2 = min(im_height_new - 1, int(each_obj.y2 * resize_ratio)) # 保存 img save_img_path = os.path.join(save_dir, 'JPEGImages', FileOperationUtil.bang_path(xml_path)[1] + '.jpg') cv2.imwrite(save_img_path, im_new) # 保存 xml a.img_path = save_img_path save_xml_path = os.path.join(save_dir, 'Annotations', FileOperationUtil.bang_path(xml_path)[1] + '.xml') a.save_to_xml(save_xml_path)
def resize_train_data(img_dir, xml_dir, save_dir, resize_ratio=0.5): """对训练数据进行resize,resize img 和 xml """ save_img_dir = os.path.join(save_dir, 'JPEGImages') save_xml_dir = os.path.join(save_dir, 'Annotations') os.makedirs(save_xml_dir, exist_ok=True) os.makedirs(save_img_dir, exist_ok=True) index = 0 for each_xml_path in FileOperationUtil.re_all_file(xml_dir, endswitch=['.xml']): print(index, each_xml_path) index += 1 each_img_path = os.path.join(img_dir, FileOperationUtil.bang_path(each_xml_path)[1] + '.jpg') resize_one_img_xml(save_dir, resize_ratio, (each_img_path, each_xml_path))
# -*- coding: utf-8 -*- # -*- author: jokker -*- from JoTools.txkjRes.segmentRes import SegmentRes from JoTools.txkjRes.deteRes import DeteRes from JoTools.utils.FileOperationUtil import FileOperationUtil import base64 import numpy as np from labelme import utils import labelme import cv2 from PIL import Image import os json_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\json" save_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\crop_box" index = 1 for each_json_path in FileOperationUtil.re_all_file(json_dir, endswitch=['.json']): a = SegmentRes() a.parse_json_info(json_path=each_json_path, parse_mask=True, parse_img=True) a.crop_and_save(save_dir) print(index, each_json_path) index += 1
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os import random from JoTools.utils.FileOperationUtil import FileOperationUtil img_dir = r"D:\data\001_fzc_优化相关资料\dataset_fzc\001_train_data_step_1.5\jieya\zd" save_dir = r"C:\Users\14271\Desktop\train_vit\3" # for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG']): # # random_num = random.randrange(1, 1000) # # print(random_num) # # if random_num > 250: # os.remove(each_img_path) img_path_list = list( FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG'])) FileOperationUtil.move_file_to_folder(img_path_list, save_dir, is_clicp=True)
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os from JoTools.utils.FileOperationUtil import FileOperationUtil img_dir = r"C:\Users\14271\Desktop\for_unet_data" index = 0 for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG']): dir, name, suffix = FileOperationUtil.bang_path(each_img_path) each_json_path = os.path.join(img_dir, name + ".json") if not os.path.exists(each_json_path): index += 1 print(index, each_img_path) print(index, each_json_path) os.remove(each_img_path)
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os from JoTools.utils.FileOperationUtil import FileOperationUtil, FilterFun img_dir = r"C:\Users\14271\Desktop\del" for each_img_path in FileOperationUtil.re_all_file(img_dir, func=FilterFun.get_filter_about_file_size(1, mode='bt')): img_size = os.path.getsize(each_img_path) print(img_size) # if img_size == 0: # os.remove(each_img_path) # print(each_img_path)
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os from JoTools.utils.LivpUtil import LivpUtil from JoTools.utils.FileOperationUtil import FileOperationUtil livp_dir = r"/home/ldq/livp2jpg/img/heic" temp_folder = r"C:\Users\14271\Desktop\del\livp\tmp" save_folder = r"/home/ldq/livp2jpg/res" for each_heic_path in FileOperationUtil.re_all_file(livp_dir, endswitch=['.heic']): save_path = os.path.join(save_folder, each_heic_path[:-4] + 'jpg') LivpUtil.heic_to_jpg(each_heic_path, save_path)
from JoTools.utils.FileOperationUtil import FileOperationUtil from JoTools.operateDeteRes import OperateDeteRes from JoTools.utils.JsonUtil import JsonUtil import prettytable standard_dir = r"C:\data\fzc_优化相关资料\防振锤优化\000_标准分类测试集\crop_add_broken" customer_dir = r"C:\Users\14271\Desktop\fzc分类验证结果\fzc_test_res_006" # OperateDeteRes.cal_acc_classify(standard_dir, customer_dir) label_list = ["yt", "sm", "gt", "zd_yt", "fzc_broken"] # todo 解析 json 文件,对比每一个类型在 各个模型上的正确率和召回率 model_dir = r"C:\Users\14271\Desktop\003_test_res" model_list = FileOperationUtil.re_all_file(model_dir, lambda x: str(x).endswith('.json')) all_res = {} for each_json_path in model_list: epoch_num = int(each_json_path.split('_')[-2]) js_file = JsonUtil.load_data_from_json_file(each_json_path) each_res = {'rec': {}, 'acc': {}} for each in js_file: type_str, label, val = each[0], each[1], each[3] each_res[type_str][label] = val all_res[epoch_num] = each_res epoch_num_list = list(all_res.keys())
if isinstance(img_mat, str): img_mat = cv2.imdecode(np.fromfile(img_mat, dtype=np.uint8), 1) # rect = four_point_transform(img_mat, np.array(four_points)) # if save_path: # cv2.imwrite(save_path, rect) cv2.imencode('.jpg', rect)[1].tofile(save_path) return rect if __name__ == "__main__": xml_point_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" img_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" save_dir = r"C:\Users\14271\Desktop\jizhuangxiang\crop" for each_json_path in FileOperationUtil.re_all_file(xml_point_dir, endswitch=['.json']): img_path = os.path.join( img_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.png') four_points = JsonUtil.load_data_from_json_file( each_json_path)["shapes"][0]['points'] each_save_path = os.path.join( save_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.jpg') # transform transform_img_with_4_point(img_path, four_points, each_save_path)
img_dir = r"/home/suanfa-5/ldq/002_test_data/69G塔基" xml_dir = r"/home/ldq/tj_dete/merge_new" save_dir = r"/home/ldq/tj_dete/train_data" img_save_dir = os.path.join(save_dir, "JPEGImages") xml_save_dir = os.path.join(save_dir, "Annotations") os.makedirs(img_save_dir, exist_ok=True) os.makedirs(xml_save_dir, exist_ok=True) img_path_list = [] xml_path_list = [] index = 0 for each_xml_path in FileOperationUtil.re_all_file(xml_dir, endswitch=['.xml']): index += 1 print(index, each_xml_path) xml_name = os.path.split(each_xml_path)[1] img_name = xml_name[:-3] + 'jpg' each_img_path = os.path.join(img_dir, img_name) if not (os.path.exists(each_xml_path) and os.path.exists(each_img_path)): print("* 没找到对应的 img 数据: {0}".format(each_img_path)) continue a = DeteRes(each_xml_path) # a.filter_by_area(50*100)
from JoTools.txkjRes.deteRes import DeteRes from JoTools.utils.FileOperationUtil import FileOperationUtil import base64 import numpy as np from labelme import utils import labelme import cv2 from PIL import Image json_dir = r"C:\data\004_绝缘子污秽\val\json" a = SegmentJson() dete_res = DeteRes() for each_json_path in list( FileOperationUtil.re_all_file(json_dir, endswitch=['.json']))[20:]: print(each_json_path) a.parse_json_info(each_json_path, parse_img=True, parse_mask=True) dete_res.img = Image.fromarray(a.image_data) for each_obj in a.shapes: print(each_obj.box) box = each_obj.box dete_res.add_obj(box[0], box[1], box[2], box[3], tag=each_obj.label) b = Image.fromarray(a.mask.astype(np.uint8) * 100) b.save(r"C:\Users\14271\Desktop\del\112233.png")
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os import shutil from JoTools.utils.FileOperationUtil import FileOperationUtil from JoTools.txkjRes.deteRes import DeteRes from JoTools.utils.PrintUtil import PrintUtil from JoTools.utils.HashlibUtil import HashLibUtil img_dir = r"C:\Users\14271\Desktop\data" save_dir = r"C:\Users\14271\Desktop\data" for each_img_path in FileOperationUtil.re_all_file( img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): print(each_img_path) each_md5 = HashLibUtil.get_file_md5(each_img_path) img_new_path = os.path.join(save_dir, "{0}.jpg".format(each_md5)) shutil.move(each_img_path, img_new_path)
from JoTools.txkjRes.deteRes import DeteRes from JoTools.utils.FileOperationUtil import FileOperationUtil import base64 import numpy as np from labelme import utils import labelme import cv2 from PIL import Image import os json_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\json" crop_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\crop_box_fix\crop_box" save_dir = r"C:\data\004_绝缘子污秽\002_测试标图流程\json_fix" index = 1 for each_json_path in FileOperationUtil.re_all_file(json_dir, endswitch=['.json']): img_name = FileOperationUtil.bang_path(each_json_path)[1] save_json_path = os.path.join(save_dir, img_name + '.json') include_labels = [] a = SegmentRes() a.parse_json_info(json_path=each_json_path, parse_mask=False, parse_img=False) # 找到未被删除的 crop for i in range(len(a)): each_label = "test{0}".format(i + 1) each_crop_path = os.path.join( crop_dir, "{0}_{1}.jpg".format(img_name, each_label))
# import PDFMiner import pdfkit from JoTools.utils.FileOperationUtil import FileOperationUtil # 读取 pdf 中的内容:http://www.ityouknow.com/python/2020/01/02/python-pdf-107.html # url页面转化为pdf # url = r'https://blog.csdn.net/qq_41185868/article/details/79907936#pdfkit%E4%BD%BF%E7%94%A8%E6%96%B9%E6%B3%95' file_path = r'C:\Users\Administrator\Desktop\SnowDepth.pdf' dir_path = r"C:\data\深度学习资料\001_要打印的论文\detection" # pdfkit.from_url(url, file_path) # 文本内容转化为pdf # pdfkit.from_string(u"jokker,呵呵,你说呢", file_path) pdf_path_list = FileOperationUtil.re_all_file( dir_path, lambda x: str(x).endswith('.pdf')) for each_pdf_path in pdf_path_list: # print(each_pdf_path) pass print(pdf_path_list[1]) # # 文件转化为pdf # pdfkit.from_file(file, file_path) # # # 也可以是打开的文件 # with open('file.html') as f: # pdfkit.from_file(f, 'out.pdf') # # print('OK')
import os from JoTools.utils.FileOperationUtil import FileOperationUtil from JoTools.utils.PrintUtil import PrintUtil # todo 添加移动的记录,这样方便数据的还原 region_img_dir = r"C:\data\fzc_优化相关资料\dataset_fzc\000_train_data_step_1\JPEGImages" region_xml_dir = r"C:\data\fzc_优化相关资料\dataset_fzc\000_train_data_step_1\Annotations" new_img_dir = r"C:\Users\14271\Desktop\wuhan_006_fzc\JPEGImages" new_xml_dir = r"C:\Users\14271\Desktop\wuhan_006_fzc\Annotations" new_xml_path_list = [] new_img_path_list = [] index = 0 for each_img_path in FileOperationUtil.re_all_file( new_img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): # print(index, each_img_path) index += 1 each_xml_path = os.path.join( new_xml_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml') # each_new_img_path = os.path.join(region_img_dir, os.path.split(each_img_path)[1]) each_new_xml_path = os.path.join(region_xml_dir, os.path.split(each_xml_path)[1]) if os.path.exists(each_new_img_path): print("* img path exists : {0}".format(each_new_img_path)) continue
# -*- coding: utf-8 -*- # -*- author: jokker -*- from JoTools.txkjRes.deteRes import DeteRes, DeteObj from JoTools.utils.FileOperationUtil import FileOperationUtil xml_dir = r"C:\data\fzc_优化相关资料\dataset_fzc\999_wait_for_train\武汉电科院_2021_04\xml_new_0.05" for i in FileOperationUtil.re_all_file(xml_dir, endswitch=['.xml']): a = DeteRes(i) a.do_augment([0.05, 0.05, 0.05, 0.05], is_relative=True) a.save_to_xml(i)
start_time = time.time() args = parse_args() portNum = args.port # ------------------------------------------------------------------------------------------ model_name = "kkxTC" save_dir = r"./result" img_dir = input("输入要测试的文件夹地址: ") # ------------------------------------------------------------------------------------------ print("-" * 100) url = 'http://192.168.3.109:' + str(portNum) + '//' + model_name for each_img_path in FileOperationUtil.re_all_file( img_dir, lambda x: str(x).endswith((".jpg", ".JPG"))): each_img_name = os.path.split(each_img_path)[1] files = {'image': open(each_img_path, 'rb')} data = {'filename': each_img_name} res = requests.post(url=url, data=data, files=files) # if res.status_code == 200: res = json.loads(res.text) # print('-' * 50) print(each_img_path) for alarm_index, each in enumerate(res["alarms"]): print(" * {0}, {1}".format(alarm_index, each)) a = DeteRes(assign_img_path=each_img_path) #
face_num = res['result']['face_num'] for i in range(face_num): loc = res['result']['face_list'][i]['location'] x1, y1 = loc['left'], loc['top'] width, height = loc['width'], loc['height'] x2, y2 = x1 + width, y1 + height face_info.append([int(x1), int(y1), int(x2), int(y2)]) return face_info # OperateDeteRes.crop_imgs(img_dir, xml_dir=img_dir, save_dir=save_dir) # todo 测试正脸的图片 for img_path in FileOperationUtil.re_all_file( img_dir, lambda x: str(x).endswith(('.JPG', '.jpg'))): dete_res = DeteRes(assign_img_path=img_path) res = dete_face(img_path) print(res) for index, each_res in enumerate(res): x1, y1, x2, y2 = each_res dete_res.add_obj(x1=x1, y1=y1, x2=x2, y2=y2, tag='face', assign_id=index) save_path = os.path.join(save_dir, os.path.split(img_path)[1]) dete_res.draw_dete_res(save_path) time.sleep(3)
# assign_code_list = ['040500021','040500022','040500023','040501031','040501032','040501033'] assign_code_list = ['040303021', '040303022'] save_dir = r"C:\Users\14271\Desktop\del\新防振锤数据武汉电科院" img_dir_list = [ r"\\192.168.3.80\数据\9eagle数据库\peiyu_06.library\images", r"\\192.168.3.80\数据\9eagle数据库\peiyu_07.library\images", r"\\192.168.3.80\数据\9eagle数据库\peiyu_11.library\images" ] for dir_index, img_dir in enumerate(img_dir_list): for index, each_json_path in enumerate( FileOperationUtil.re_all_file(img_dir, lambda x: str(x).endswith('.json'))): try: print(dir_index, index, each_json_path) b = DeteRes() a = EagleMetaData() a.load_atts_from_json(each_json_path) b.img_path = os.path.join(os.path.dirname(each_json_path), a.name + '.jpg') if not os.path.exists(b.img_path): continue if a.comments is None: continue
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os import shutil from JoTools.utils.FileOperationUtil import FileOperationUtil img_dir = r"D:\data\001_fzc_优化相关资料\dataset_fzc\001_train_data_step_1.5\jieya\zd\extend" for each_img_path in FileOperationUtil.re_all_file(img_dir): print(each_img_path) img_dir, img_name, suffix = FileOperationUtil.bang_path(each_img_path) new_img_path = os.path.join(img_dir, img_name + '_extend.' + suffix) shutil.move(each_img_path, new_img_path)
# -*- coding: utf-8 -*- # -*- author: jokker -*- import random import os import shutil import cv2 import PIL.Image as Image from JoTools.operateDeteRes import OperateDeteRes from JoTools.txkjRes.deteRes import DeteRes, DeteObj from JoTools.utils.FileOperationUtil import FileOperationUtil from JoTools.utils.RandomUtil import RandomUtil xml_dir = r"/home/ldq/tj_dete/merge" save_dir = r"/home/ldq/tj_dete/merge_new" OperateDeteRes.get_class_count(xml_dir, print_count=True) for each_xml_path in FileOperationUtil.re_all_file(xml_dir, endswitch=['.xml']): # a = DeteRes(each_xml_path) a.filter_by_tags(need_tag=["2"]) save_path = os.path.join(save_dir, os.path.split(each_xml_path)[1]) if len(a) > 0: a.save_to_xml(save_path) OperateDeteRes.get_class_count(save_dir, print_count=True)
# -*- author: jokker -*- import os from JoTools.utils.FileOperationUtil import FileOperationUtil from JoTools.utils.PrintUtil import PrintUtil from JoTools.utils.HashlibUtil import HashLibUtil # todo 添加移动的记录,这样方便数据的还原 region_img_dir_list = [ r"D:\data\001_fzc_优化相关资料\dataset_fzc\000_train_data_step_1\JPEGImages", ] new_img_dir = r"F:\20211019_防震锤锈蚀数据清洗\fix_data" index = 0 for img_index, each_img_path in enumerate(FileOperationUtil.re_all_file(new_img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG'])): # 计算 md5 值 md5_str = HashLibUtil.get_file_md5(each_img_path) for each_img_dir in region_img_dir_list: # 数据集中的名字 region_img_path = os.path.join(each_img_dir, md5_str + '.jpg') # if os.path.exists(region_img_path): # os.remove(each_img_path) index += 1 print("{0} | {2} remove : {1}".format(index, each_img_path, img_index))
# -*- coding: utf-8 -*- # -*- author: jokker -*- from JoTools.utils.FileOperationUtil import FileOperationUtil assign_dir = r"\\192.168.3.80\大金具-算法\qfm\连接件训练数据集" save_dir = r"C:\Users\14271\Desktop\连接件" FileOperationUtil.move_file_to_folder(FileOperationUtil.re_all_file(assign_dir, endswitch=['.xml']), save_dir, is_clicp=False) config_path = r"D:\Algo\saturn_database\config.ini"
import numpy as np from labelme import utils import labelme import cv2 from PIL import Image import os """ * mask 的理想状态是每一个对象用一个不同的 int 值表示出来 * mask 的次理想状态是每一个对象用相同的 int 值表示出来 """ img_dir = r"C:\Users\14271\Desktop\mask_test_res_019\img" mask_dir = r"C:\Users\14271\Desktop\mask_test_res_019\mask" save_dir = r"C:\Users\14271\Desktop\mask_test_res_019\json" for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg']): each_mask_path = os.path.join( mask_dir, FileOperationUtil.bang_path(each_img_path)[1] + '_mask.png') each_save_path = os.path.join( save_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.json') if not os.path.exists(each_mask_path): print("* mask 文件不存在") continue else: print(each_mask_path) a = SegmentRes() a.img_path = each_img_path
# # 关键点检测 # # 模型的下载路径:http://dlib.net/files/ # predictor = dlib.shape_predictor(r'C:\Users\14271\Desktop\del\shape_predictor_68_face_landmarks.dat') # # for det in dets: # shape = predictor(img, det) # print(shape.parts()) # # # 人脸对齐 # my_img = dlib.get_face_chip(img, shape, size=150) # # plt.imshow(my_img) # plt.show() # img_dir = r"/home/ldq/20220112_img_from_iphone/img" save_dir = r"/home/ldq/20220112_img_from_iphone/xml" detector = dlib.get_frontal_face_detector() for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): print(each_img_path) each_dete_res = DeteRes(assign_img_path=each_img_path) img = cv2.imread(each_img_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) dets = detector(img, 1) for each_shape in dets: each_dete_res.add_obj(x1=int(each_shape.left()), y1=int(each_shape.top()), x2=int(each_shape.right()), y2=int(each_shape.bottom()), tag='face') each_dete_res.save_to_xml(os.path.join(save_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml'))
# -*- coding: utf-8 -*- # -*- author: jokker -*- import os from JoTools.utils.FileOperationUtil import FileOperationUtil img_dir = r"\\192.168.3.80\数据\9eagle数据库\peiyu_06.library\images" img_dir_2 = r"\\192.168.3.80\数据\9eagle数据库\peiyu_07.library\images" img_dir_3 = r"\\192.168.3.80\数据\9eagle数据库\peiyu_11.library\images" id_set = set() for index, each in enumerate(FileOperationUtil.re_all_folder(img_dir)): print(index, each) id_set.add(os.path.split(each)[1]) for index, each in enumerate(FileOperationUtil.re_all_folder(img_dir_2)): if os.path.split(each)[1] in id_set: print("* 重复 :", each) for index, each in enumerate(FileOperationUtil.re_all_folder(img_dir_3)): if os.path.split(each)[1] in id_set: print("* 重复 :", each) print("over")
import time def print_img_shape(img_path, times=5): for _ in range(times): img = cv2.imread(img_path) print(img.shape) if __name__ == "__main__": start_time = time.time() img_dir = r"C:\Users\14271\Desktop\del\pillow_cv2" image_list = list(FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg'])) pool = Pool(4) for each_img_path in image_list: pool.apply_async(print_img_shape, (each_img_path, 5, )) pool.close() pool.join() end_time = time.time() print("use time : {0} s".format(end_time - start_time))
from JoTools.utils.PrintUtil import PrintUtil from JoTools.utils.HashlibUtil import HashLibUtil img_dir = r"C:\Users\14271\Desktop\20220112_img_from_iphone\img" xml_dir = r"C:\Users\14271\Desktop\20220112_img_from_iphone\xml" save_dir = r"C:\Users\14271\Desktop\20220112_img_from_iphone\data" save_img_dir = os.path.join(save_dir, 'JPEGImages') os.makedirs(save_img_dir, exist_ok=True) save_xml_dir = os.path.join(save_dir, 'Annotations') os.makedirs(save_xml_dir, exist_ok=True) for each_img_path in FileOperationUtil.re_all_file(img_dir, endswitch=['.jpg', '.JPG', '.png', '.PNG']): print(each_img_path) each_md5 = HashLibUtil.get_file_md5(each_img_path) img_new_path = os.path.join(save_img_dir, "{0}.jpg".format(each_md5)) xml_new_path = os.path.join(save_xml_dir, "{0}.xml".format(each_md5)) # each_xml_path = os.path.join(xml_dir, FileOperationUtil.bang_path(each_img_path)[1] + '.xml') # if os.path.exists(each_xml_path): # shutil.copy(each_img_path, img_new_path) # shutil.copy(each_xml_path, xml_new_path) a = DeteRes(each_xml_path) if len(a) < 1: continue