def classification_images(): images_path = get_all_files_under_directory(ORIGINAL_PATH) images_size = len(images_path) for index in range(images_size): try: p = images_path[index] if index % 100 == 0: print(str(index) + '/' + str(images_size)) dir_path, image_name = os.path.split(p) label = str(image_name.split('-')[0]).replace('#', '').replace(' ', '') if label == '': dir_name = 'unrecognize' else: length = len(label) dir_name = str(length) new_path = os.path.join(DESTINATION_PATH, dir_name) new_path = os.path.join(new_path, label) create_dir(new_path) # if label[0] == '4': # label = label.replace('\\', '').replace('/', '') md5 = GetFileMd5(p) image_name = label + '-' + md5 + '.jpg' des_path = os.path.join(new_path, image_name) # if len(label) == 4: shutil.copy(p, des_path) except Exception as e: print(e)
def save_orig_char(im, char): """ 保存生成的原始图片 :param im: :param char: :return: """ dir_path = GENERATE_ORIG_CHAR_DIE_PATH + char + '/' create_dir(dir_path) path = dir_path + str(uuid.uuid4()) + '.jpg' save_img(im, path)
def __init__(self, mode='console', log_path=None): """ :param mode: console or file :param log_path: """ assert mode in ['console', 'file'] self.__mode = mode if log_path is None: dir_path = combine_file_path('logs') create_dir(dir_path) log_path = os.path.join(dir_path, 'log.log') if not os.path.exists(path=log_path): file = open(log_path, encoding='utf8', mode='w') file.close() self.__log_path = log_path
2018/8/31: ------------------------------------------------- """ import json import os from utility.file_io_utility import read_all_content from utility.file_path_utility import get_all_file_from_dir, create_dir from xml.dom.minidom import Document __author__ = 'li' result_dir_path = 'C:/Users\lr\Desktop/123' result_paths = get_all_file_from_dir(result_dir_path) save_annotation_dir = './xml/' create_dir(save_annotation_dir) def load_result(result_paths): """ load result :param result_paths: :return: """ for p in result_paths: if p.find('txt') > 0: with open(p, mode='r', encoding='utf8') as file: lines = file.readlines() if len(lines) == 0: continue dir_path, file_name = os.path.split(p)
__author__ = 'li' upload_path = 'F:/BaiduNetdiskDownload/upload/' annotation_path = 'F:\BaiduNetdiskDownload/annotation' img_path = 'F:\BaiduNetdiskDownload/uuid_image' def load_map(path): paths = get_all_file_from_dir(path) map = {} for p in paths: dir_, name = os.path.split(p) key = name.split('.')[0] map[key] = p return map img_map = load_map(img_path) anns_map = load_map(annotation_path) for k in img_map.keys(): dir_name = upload_path + str(k[0]) img_dir = dir_name + '/img/' ann_dir = dir_name + '/ann/' create_dir(img_dir) create_dir(ann_dir) if k in anns_map.keys(): shutil.copy(img_map[k], img_dir + str(k) + '.jpg') shutil.copy(anns_map[k], ann_dir + str(k) + '.xml') pass
------------------------------------------------- """ import base64 import json import math import os import cv2 from chinese_project.move_file.rename_file_md5 import GetFileMd5 from llib.cv_utility.image_opt_utility import read_image, write_image from utility.file_io_utility import read_all_content from utility.file_path_utility import get_all_file_from_dir, create_dir import numpy as np JSON_DIR = '/data/data/dangerous_bb/tt/project/daokou/data/json/' TRAINING_DATA_DIR = 'G:/tmp/txt/' create_dir(TRAINING_DATA_DIR) enlarge_radio = 0.9 def save_image(image_str, i, file_name): if enlarge_radio == 1: image_path = TRAINING_DATA_DIR + file_name.replace('.json', '') + '.jpg' else: image_path = TRAINING_DATA_DIR + file_name.replace('.json', '') + \ str(enlarge_radio).replace('.', '_') + '.jpg' fh = open(image_path, "wb") fh.write(base64.b64decode(image_str)) fh.close() img = read_image(image_path)