コード例 #1
0
 def start_paste(self):
     parent_dir, tail = os.path.split(os.path.abspath(
         self.bg_dir))  # 使用abspath函数先规范化路径
     utils.create_new_empty_dir(parent_dir + '/magic_imgs/')
     for i in range(self.total_num):
         magic_pic = self.paste_one_bg()
         img_name = parent_dir + '/magic_imgs/' + str(i) + '.jpg'
         cv2.imwrite(img_name, magic_pic)
         print('magic saved:', img_name)
コード例 #2
0
ファイル: T_scale.py プロジェクト: watson8544/tools
 def __init__(self,images_dir,scale=0,width=0,height=0,img_ext='bmp'):
     self.images_dir = images_dir
     self.scale = scale
     self.width = width
     self.height = height
     self.img_ext = img_ext
     parent_dir,tail = utils.get_parent_dir(images_dir)
     self.out_dir = os.path.join(parent_dir,tail+'_scale_'+str(scale)+'/')
     utils.create_new_empty_dir(self.out_dir)
     self.start_scale()
コード例 #3
0
 def __init__(self,images_dir,ratio_limit=(0.5,2),min_width=128,min_height=128,img_ext='bmp'):
     self.images_dir = images_dir
     self.ratio_limit = ratio_limit   # 宽高比 ratio = w/h
     self.min_width = min_width
     self.min_height = min_height
     self.img_ext = img_ext
     parent_dir,tail = utils.get_parent_dir(images_dir)
     self.out_dir = os.path.join(parent_dir,tail+'_sifted_w'+str(self.min_width)+'_h'+str(self.min_height)+'/')
     utils.create_new_empty_dir(self.out_dir)
     self.start_sift()
コード例 #4
0
 def start_split(self):
     dir_A = self.parent_dir + '/train'
     dir_B = self.parent_dir + '/test'
     utils.create_new_empty_dir(dir_A)
     utils.create_new_empty_dir(dir_B)
     for i, file in enumerate(self.file_list):
         _, name = os.path.split(file)
         if i < self.train_count:
             dst = dir_A + '/' + name
         else:
             dst = dir_B + '/' + name
         shutil.copy(file, dst)
         print('已拷贝:', dst)
コード例 #5
0
 def translate(self,
               model_path=None,
               total_num=1,
               output_dir='./translation'):
     # np.random.seed(0)
     if model_path != None:
         self.g_AB.load_weights(model_path)
     imgs_A = self.data_loader.load_data(domain='A', batch_size=total_num)
     fake_B = self.g_AB.predict(imgs_A)
     translation_dir = output_dir
     utils.create_new_empty_dir(translation_dir)
     names = self.data_loader.get_imgs_name()
     for i, img in enumerate(fake_B):
         print('img:', i)
         img = (img + 1) * 127.5
         cv2.imwrite(translation_dir + '/' + names[i], img)
コード例 #6
0
def export_image_and_label_selfdefine(labels_folder_path,images_folder):
    parent_dir, _ = utils.get_parent_dir(labels_folder_path)
    target_dir = utils.create_new_empty_dir(parent_dir + '/images/')
    file_list = utils.get_dir_filelist_by_extension(dir=labels_folder_path, ext='xml')
    for file in file_list:
        tree = ET.ElementTree(file=file)

        for elem in tree.iter(tag='filename'):
            file_name = elem.text
        full_path = images_folder+'/'+file_name
        try:
            shutil.copy2(src=full_path, dst=target_dir + '/' + file_name)
        except:
            print(full_path + '复制失败')
コード例 #7
0
 def __init__(self, target_dir):
     parent_dir, tail = utils.get_parent_dir(target_dir)
     self.out_dir = parent_dir + '/' + tail + '_centercut/'
     utils.create_new_empty_dir(self.out_dir)
     self.load_images(target_dir)
     self.start_crop()
コード例 #8
0
ファイル: T_merge_datasets.py プロジェクト: watson8544/tools
# 将分类标记好bbox的图像一股脑的放在一起,并规范化命名
import sys
import utils
import os
import shutil
import random

if __name__ == "__main__":
    root_dir = sys.argv[1]
    parent_dir, _ = utils.get_parent_dir(root_dir)
    target_dir = os.path.join(parent_dir, 'results')
    utils.create_new_empty_dir(target_dir)
    os.mkdir(os.path.join(target_dir, 'images'))
    os.mkdir(os.path.join(target_dir, 'annotations'))
    sub_dirs = utils.get_immediate_subdirectories(root_dir)

    total_count = 0
    for sub_dir in sub_dirs:
        sub_dir = os.path.join(root_dir, sub_dir)
        image_dir = os.path.join(sub_dir, 'images')
        image_file_list = utils.get_dir_filelist_by_extension(image_dir, 'bmp')
        total_count += len(image_file_list)
    print('图像总数:%d' % total_count)
    file_paths = []
    for i in range(1, total_count + 1):
        new_name = utils.fixed_length(i, 4)
        a = {
            'image':
            os.path.join(target_dir, 'images', new_name + '.bmp'),
            'annotation':
            os.path.join(target_dir, 'annotations', new_name + '.xml')
コード例 #9
0

def change_tag_text(file_list, target_dir):

    for file in file_list:
        tree = ET.ElementTree(file=file)
        root = tree.getroot()
        # print(root.tag)
        # print(root.attrib)
        print(file)
        for elem in tree.iter(tag='path'):
            # file_name = elem.text
            # index = file_name.find('.')
            # ext = file_name[index:]
            _, fn = utils.get_parent_dir(file)
            fn = fn.replace('xml', 'bmp')
            elem.text = ''
            # print(elem.text)

        tree.write(target_dir + os.path.basename(file))


if __name__ == "__main__":
    xmls_dir = '/Users/shidanlifuhetian/All/data/KHB_ANNO/USTB中厚板检测数据集/test/xmls1'

    # 工具作用:规范xml内容,filename规范为真实文件名
    file_list = utils.get_dir_filelist_by_extension(dir=xmls_dir, ext='xml')
    parent_dir, _ = utils.get_parent_dir(xmls_dir)
    utils.create_new_empty_dir(parent_dir + '/xmls2/')
    target_dir = parent_dir + '/xmls2/'
    change_tag_text(file_list, target_dir)
コード例 #10
0
            img = cv2.imread(k, flags=cv2.IMREAD_GRAYSCALE)
            pics.append(img)
        print('%s 处理完成' % subd)

    b = utils.get_dir_filelist_by_extension(d[0], 'jpg')
    a = len(b)
    utils.view_pics(pics,
                    a,
                    b,
                    output_full_path='./compareresult/' +
                    working_dir.replace('./', '').replace(' ', '-') + '.png')


if __name__ == "__main__":
    # 控制显卡可见性
    # os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    # cls = ['Cr','In','Pa','PS','RS','Sc']
    # cls = ['Cr']
    f = open('list.txt')
    txt = f.read().split('\n')
    f.close()
    utils.create_new_empty_dir('./compareresult')

    # task1: 读取saved_model下面的json模型,以及参数文件。输入一张图像,实现翻译效果,并显示图像 Done
    for t in txt:
        step1(t)

        # task2:拼图
        step2('./tmp.txt')
コード例 #11
0
    def __init__(self, dataset_name, appendix='', lamb=(1, 10, 0)):
        # Input shape
        self.img_rows = 128
        self.img_cols = 128
        self.channels = 1
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        # Configure data loader
        self.dataset_name = dataset_name  #'/plates/单独训练集/Sc'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.img_rows, self.img_cols))

        # Calculate output shape of D (PatchGAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of filters in the first layer of G and D
        self.gf = 32
        self.df = 64

        # Loss weights
        self.lambda_gan = lamb[0]
        self.lambda_cycle = lamb[1]  # Cycle-consistency loss
        self.lambda_id = lamb[2]  # Identity loss

        optimizer = Adam(0.0002, 0.5)
        # optimizer = keras.optimizers.RMSprop()
        # Build and compile the discriminators
        # 两个判别器,A和B
        self.d_A = self.build_discriminator()
        self.d_B = self.build_discriminator()
        self.d_A.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])
        self.d_B.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

        # Build and compile the generators
        # 两个生成器
        self.g_AB = self.build_generator()
        self.g_BA = self.build_generator()
        self.g_AB.compile(loss='binary_crossentropy', optimizer=optimizer)
        self.g_BA.compile(loss='binary_crossentropy', optimizer=optimizer)

        # Input images from both domains
        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # Translate images to the other domain
        # 图像翻译
        fake_B = self.g_AB(img_A)  # 通过真实的图像A生成假的图像B
        fake_A = self.g_BA(img_B)  # 通过真是的图像B生成假的图像A
        # Translate images back to original domain
        # 将刚刚生成好的假图像再翻译回去
        reconstr_A = self.g_BA(fake_B)  #
        reconstr_B = self.g_AB(fake_A)

        # For the combined model we will only train the generators
        self.d_A.trainable = False
        self.d_B.trainable = False

        # Discriminators determines validity of translated images
        # 判别翻译过来的假图质量咋样
        valid_A = self.d_A(fake_A)
        valid_B = self.d_B(fake_B)

        self.combined = Model(
            [img_A, img_B],
            [valid_A, valid_B, fake_B, fake_A, reconstr_A, reconstr_B])
        self.combined.compile(loss=['mse', 'mse', 'mae', 'mae', 'mae', 'mae'],
                              loss_weights=[
                                  self.lambda_gan, self.lambda_gan,
                                  self.lambda_id, self.lambda_id,
                                  self.lambda_cycle, self.lambda_cycle
                              ],
                              optimizer=optimizer)

        current_time = time.strftime('%Y-%m-%d %H_%M_%S', time.localtime())
        self.output_dir = './' + current_time
        if appendix != '':
            self.output_dir += '_' + appendix
        self.saved_model_dir = self.output_dir + '/saved_model'
        self.predicts_dir = self.output_dir + '/predict_result'
        utils.create_new_empty_dir(self.output_dir)
        utils.create_new_empty_dir(self.saved_model_dir)
        utils.create_new_empty_dir(self.predicts_dir)
        self.save_structure()
        self.g_losses = []
        self.d_losses = []
コード例 #12
0
    def train(self, epochs, batch_size=128, save_interval=50):

        half_batch = int(batch_size / 2)

        start_time = datetime.datetime.now()

        for epoch in range(epochs):

            # ----------------------
            #  Train Discriminators
            # ----------------------

            imgs_A = self.data_loader.load_data(domain="A",
                                                batch_size=half_batch)
            imgs_B = self.data_loader.load_data(domain="B",
                                                batch_size=half_batch)

            # Translate images to opposite domain
            fake_B = self.g_AB.predict(imgs_A)
            fake_A = self.g_BA.predict(imgs_B)

            valid = np.ones((half_batch, ) + self.disc_patch)
            fake = np.zeros((half_batch, ) + self.disc_patch)

            # Train the discriminators (original images = real / translated = Fake)
            dA_loss_real = self.d_A.train_on_batch(imgs_A, valid)
            dA_loss_fake = self.d_A.train_on_batch(fake_A, fake)
            dA_loss = 0.5 * np.add(dA_loss_real, dA_loss_fake)

            dB_loss_real = self.d_B.train_on_batch(imgs_B, valid)
            dB_loss_fake = self.d_B.train_on_batch(fake_B, fake)
            dB_loss = 0.5 * np.add(dB_loss_real, dB_loss_fake)

            # Total disciminator loss
            d_loss = 0.5 * np.add(dA_loss, dB_loss)
            print('d loss:', d_loss)
            self.d_losses.append(float(d_loss[0]))

            # ------------------
            #  Train Generators
            # ------------------

            # Sample a batch of images from both domains
            imgs_A = self.data_loader.load_data(domain="A",
                                                batch_size=batch_size)
            imgs_B = self.data_loader.load_data(domain="B",
                                                batch_size=batch_size)

            # The generators want the discriminators to label the translated images as real
            # !!生成器需要认假为真
            valid = np.ones((batch_size, ) + self.disc_patch)

            # Train the generators
            # combined 模型结构              Model([img_A, img_B], [valid_A, valid_B, fake_B, fake_A,reconstr_A, reconstr_B])

            g_loss = self.combined.train_on_batch(
                [imgs_A, imgs_B],
                [valid, valid, imgs_A, imgs_B, imgs_A, imgs_B])
            print('g loss:', g_loss)
            # self.g_losses.append(float(g_loss[1]+g_loss[2]+g_loss[5]+g_loss[6]))
            self.g_losses.append(float(g_loss[0]))
            elapsed_time = datetime.datetime.now() - start_time
            # Plot the progress
            print("%d time: %s" % (epoch, elapsed_time))

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                self.epoch = epoch
                self.save_imgs(epoch)
                self.g_AB.save(self.saved_model_dir + '/model_gAB_epoch_' +
                               utils.fixed_length(epoch, 5) + '.h5')
                self.combined.save(self.saved_model_dir +
                                   '/model_combined_epoch_' +
                                   utils.fixed_length(epoch, 5) + '.h5')
                utils.create_new_empty_dir(self.predicts_dir + '/epoch_%d/' %
                                           (self.epoch))
                self.predicts_from_A_to_B()
            self.save_loss_img(epoch)
            self.save_loss()