customer_dir = r"C:\Users\14271\Desktop\fzc分类验证结果\fzc_test_res_006" # OperateDeteRes.cal_acc_classify(standard_dir, customer_dir) label_list = ["yt", "sm", "gt", "zd_yt", "fzc_broken"] # todo 解析 json 文件,对比每一个类型在 各个模型上的正确率和召回率 model_dir = r"C:\Users\14271\Desktop\003_test_res" model_list = FileOperationUtil.re_all_file(model_dir, lambda x: str(x).endswith('.json')) all_res = {} for each_json_path in model_list: epoch_num = int(each_json_path.split('_')[-2]) js_file = JsonUtil.load_data_from_json_file(each_json_path) each_res = {'rec': {}, 'acc': {}} for each in js_file: type_str, label, val = each[0], each[1], each[3] each_res[type_str][label] = val all_res[epoch_num] = each_res epoch_num_list = list(all_res.keys()) epoch_num_list.sort() tb = prettytable.PrettyTable() # 增加标题 tb.add_column(" ", ["acc"] * len(label_list) + ["rec"] * len(label_list)) tb.add_column(" ", label_list * 2)
# -*- coding: utf-8 -*- # -*- author: jokker -*- # todo 两个字典 (1)最难接上的成语 from JoTools.utils.JsonUtil import JsonUtil from JoTools.utils.CsvUtil import CsvUtil from pypinyin import pinyin, Style import pypinyin a = JsonUtil.load_data_from_json_file(r"chengyuzidian.json") # #普通模式 # print(pinyin('中心', style=Style.NORMAL)) def chinese2pingyin(chinese): res = [] a = pinyin(chinese, style=Style.NORMAL) for each in a: res.append(each[0]) return res # 最好接的字 head_cy_head = {} # 代表的是每一个字被对上的概率大小 head_cy_tail = {} # 代表的是每一个字被对上的概率大小 word_hard_index_head = {} word_hard_index_tail = {} for each in a: each = chinese2pingyin(each)
# -*- author: jokker -*- from JoTools.utils.JsonUtil import JsonUtil from JoTools.txkjRes.deteObj import PointObj, LineObj, LineStripObj, CricleObj, PolygonObj, RectangleObj json_path = r"C:\Users\14271\Desktop\关键点\images.json" json_info = { "polygon": None, "rectangle": None, "circle": None, "line_strip": None, "line": None, "point": None } a = JsonUtil.load_data_from_json_file(json_path, encoding='GBK') # # parse attr # self.version = a["version"] if "version" in a else "" # self.width = a["imageWidth"] if "imageWidth" in a else "" # self.height = a["imageHeight"] if "imageWidth" in a else "" # self.file_name = a["imagePath"] if "imagePath" in a else "" # self.image_data_bs64 = a["imageData"] obj_index = -1 for each_shape in a["shapes"]: each_shape_type = each_shape["shape_type"] # 数据的类型 point, # obj_index += 1 each_label = each_shape["label"] # point
if isinstance(img_mat, str): img_mat = cv2.imdecode(np.fromfile(img_mat, dtype=np.uint8), 1) # rect = four_point_transform(img_mat, np.array(four_points)) # if save_path: # cv2.imwrite(save_path, rect) cv2.imencode('.jpg', rect)[1].tofile(save_path) return rect if __name__ == "__main__": xml_point_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" img_dir = r"C:\Users\14271\Desktop\jizhuangxiang\img" save_dir = r"C:\Users\14271\Desktop\jizhuangxiang\crop" for each_json_path in FileOperationUtil.re_all_file(xml_point_dir, endswitch=['.json']): img_path = os.path.join( img_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.png') four_points = JsonUtil.load_data_from_json_file( each_json_path)["shapes"][0]['points'] each_save_path = os.path.join( save_dir, FileOperationUtil.bang_path(each_json_path)[1] + '.jpg') # transform transform_img_with_4_point(img_path, four_points, each_save_path)
def save_dz_to_json(self, save_path): """将段子保存为需要的格式""" JsonUtil.save_data_to_json_file(self.dz_data, save_path)
def load_dz_from_json(self, file_path): """从 json 结构读取段子信息""" self.dz_data = JsonUtil.load_data_from_json_file(file_path)
# -*- coding: utf-8 -*- # -*- author: jokker -*- from JoTools.utils.CsvUtil import CsvUtil from JoTools.utils.TxtUtil import TxtUtil from JoTools.utils.JsonUtil import JsonUtil import zhconv a = CsvUtil.read_csv_to_list( r"C:\Users\14271\Desktop\成语词典\dict_idioms_2020_20211229(2).csv") cy_list = [] cy_json_list = [] for each in a: print(each[3]) cy_list.append([zhconv.convert(each[1], 'zh-hans') + ',' + each[3] + '\n']) cy_json_list.append(zhconv.convert(each[1], 'zh-hans')) print(len(a)) JsonUtil.save_data_to_json_file(cy_json_list, r"chengyuzidian.json") TxtUtil.write_table_to_txt(cy_list, r"chengyu.txt") #