Esempio n. 1
0
def create_style_dataset(output_dir, input_dir, b_dir):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    src_paths = []
    dst_paths = []

    skipped = 0
    for src_path in im.find(input_dir):
        name, _ = os.path.splitext(os.path.basename(src_path))
        dst_path = os.path.join(output_dir, name + ".png")
        if os.path.exists(dst_path):
            skipped += 1
        else:
            src_paths.append(src_path)
            dst_paths.append(dst_path)

    print("skipping %d files that already exist" % skipped)

    global total
    total = len(src_paths)

    print("processing %d files" % total)

    global start
    start = time.time()

    with tf.compat.v1.Session() as sess:
        for src_path, dst_path in zip(src_paths, dst_paths):
            src = im.load(src_path)
            dst = combine(src, src_path, b_dir)
            im.save(dst, dst_path)
            complete()
Esempio n. 2
0
def combine(pairing, A_path, B_path, dst_path):
    # clear output path
    if os.path.exists(dst_path):
        shutil.rmtree(dst_path)
    os.mkdir(dst_path)
    with tf.Session() as sess:
        for imgA_name, imgB_name in pairing.items():
            imgA = im.load(os.path.join(A_path, imgA_name))
            imgB = im.load(os.path.join(B_path, imgB_name))

            # make sure that dimensions are correct
            height, width, _ = imgA.shape
            if height != imgB.shape[0] or width != imgB.shape[1]:
                raise Exception("differing sizes")

            # convert both images to RGB if necessary
            if imgA.shape[2] == 1:
                imgA = im.grayscale_to_rgb(images=imgA)

            if imgB.shape[2] == 1:
                imgB = im.grayscale_to_rgb(images=imgB)

            # remove alpha channel
            if imgA.shape[2] == 4:
                imgA = imgA[:, :, :3]

            if imgB.shape[2] == 4:
                imgB = imgB[:, :, :3]

            imgC = np.concatenate([imgA, imgB], axis=1)
            im.save(imgC, os.path.join(dst_path, imgA_name))
Esempio n. 3
0
def process(src_path, dst_path, image_dir):
    global total_count

    total_count += 1
    src = im.load(src_path)

    if args.operation == "combine":
        dst = combine(src, src_path)
    else:
        raise Exception("invalid operation")
    im.save(dst, dst_path)
Esempio n. 4
0
def process(src_path, dst_path):
    src = im.load(src_path)

    if a.operation == "grayscale":
        dst = grayscale(src)
    elif a.operation == "resize":
        dst = resize(src)
    elif a.operation == "blank":
        dst = blank(src)
    elif a.operation == "combine":
        dst = combine(src, src_path)
    elif a.operation == "edges":
        dst = edges(src)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 5
0
def process(src_path, dst_path):
    src = im.load(src_path)

    if a.operation == "grayscale":
        dst = grayscale(src)
    elif a.operation == "resize":
        dst = resize(src)
    elif a.operation == "blank":
        dst = blank(src)
    elif a.operation == "combine":
        dst = combine(src, src_path)
    elif a.operation == "edges":
        dst = edges(src)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 6
0
def main():
    if not os.path.exists(a.output_dir):
        os.makedirs(a.output_dir)

    src_paths = []
    dst_paths = []

    for src_path in im.find(a.input_dir):
        name, _ = os.path.splitext(os.path.basename(src_path))
        dst_path = os.path.join(a.output_dir, name + ".png")
        src_paths.append(src_path)
        dst_paths.append(dst_path)

    with tf.Session() as sess:
        for src_path, dst_path in zip(src_paths, dst_paths):
            src = im.load(src_path)
            dst = combine(src, src_path)
            im.save(dst, dst_path)
Esempio n. 7
0
def process(src_path, dst_path):
    src = im.load(src_path)

    if a.operation == "grayscale":  #灰度化
        dst = grayscale(src)
    elif a.operation == "resize":  #缩放至统一尺寸
        dst = resize(src)
    elif a.operation == "blank":  #中间留白
        dst = blank(src)
    elif a.operation == "combine":  #结合图片
        dst = combine(src, src_path)
    elif a.operation == "edges":  #检测边缘
        dst = edges(src)
    elif a.operation == "blur":  #模糊
        dst = blur(src)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 8
0
def process(src_path, dst_path): #通过Process()函数,根据命令行参数,对image进行相应处理
    src = im.load(src_path)

    if a.operation == "grayscale":
        dst = grayscale(src)
    elif a.operation == "resize":
        dst = resize(src)
    elif a.operation == "blank":
        dst = blank(src)
    elif a.operation == "combine":
        dst = combine(src, src_path)
    elif a.operation == "edges":
        dst = edges(src)
    elif a.operation == "blur":
        dst = blur(src)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 9
0
def process(src_path, dst_path):
    src = im.load(src_path)
    im.save(src, "/data12/liyh/tools/test_output/src.png")
    if a.operation == "grayscale":
        dst = grayscale(src)
    elif a.operation == "resize":
        dst = resize(src)
    elif a.operation == "blank":
        dst = blank(src)
    elif a.operation == "combine":
        dst = combine(src, src_path)
    elif a.operation == "edges":
        dst = edges(src)
    elif a.operation == "combine_color_binary":
        dst = combine_color_binary(src, src_path)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 10
0
def process(src_path, dst_path, label_dict, labels, labels_csv, image_dir):
    global index
    global total_count

    # 입력받은 하나의 한글 이미지 경로를 가지고 해당 한글 이미지를 읽음
    total_count += 1
    src = im.load(src_path)

    # 명령어에서 인자로 입력받은 연산이 combine이라면 combine 수행
    if args.operation == "combine":
        dst = combine(src, src_path)
    else:
        raise Exception("invalid operation")
    # combine을 수행한 결과를 파일로 저장함
    im.save(dst, dst_path)

    # 저장한 combine 결과 이미지를 csv 파일에 레이블을 맵핑하여 저장
    file_string = '{}.png'.format(total_count)
    file_path = os.path.join(image_dir, file_string)

    character = list(label_dict.keys())[labels[index]]
    labels_csv.write(u'{},{}\n'.format(file_path, character))
    index += 1
Esempio n. 11
0
def process(src_path, dst_path):
    src = im.load(src_path)
    if a.operation == "edges":
        if a.crop:
            name = dst_path.split("/")[-1]
            src, dst = edges(src)
            im.save(src, os.path.join(a.crop_dir, name))
        else:
            dst = edges(src)
    elif a.operation == "grayscale":
        dst = grayscale(src)
    elif a.operation == "resize":
        dst = resize(src)
    elif a.operation == "blank":
        dst = blank(src)
    elif a.operation == "combine":
        dst = combine(src, src_path)
    elif a.operation == "skeletonize":
        dst = skeletonize_edge(src)
    else:
        raise Exception("invalid operation")

    im.save(dst, dst_path)
Esempio n. 12
0
    # convert both images to RGB if necessary
    if src1.shape[2] == 1:
        src1 = im.grayscale_to_rgb(images=src1)
        src2 = im.grayscale_to_rgb(images=src2)

    # remove alpha channel
    if src2.shape[2] == 4:
        src1 = src1[:, :, :3]
        src2 = src2[:, :, :3]

    im1 = src1[:, :width_cutoff]
    im2 = src2[:, width_cutoff:]

    im_dest = np.concatenate([im1, im2], axis=1)

    return im_dest


with tf.Session() as sess:
    for src_path1, src_path2, dst_path_new in zip(src_paths1, src_paths2,
                                                  dst_paths):

        srcA = im.load(src_path1)
        srcB = im.load(src_path2)
        new_im = split(srcA, srcB)
        #print(src_path1)
        #print(src_path2)
        #print(dst_path_new)
        test = 1
        im.save(new_im, dst_path_new)
def main():

    for output_type in output_types:
        full_subfolder_path = os.path.join(a.output_dir, output_type)
        if not os.path.exists(full_subfolder_path):
            os.makedirs(full_subfolder_path)

    seg_input_files = im.findContainingSubtext(a.input_dir, 'sample_inputs')
    gen_input_files = im.findContainingSubtext(a.input_dir, 'sample_outputs')

    output_paths = {}
    seg_src_paths = {}
    gen_src_paths = {}

    skipped = 0
    for seg_input_file, gen_input_file in zip(seg_input_files,
                                              gen_input_files):
        name, _ = os.path.splitext(os.path.basename(seg_input_file))
        name = name.split('_')[0]

        output_paths[name] = {}
        name_out_dict = {}
        output_file_exists_for_name = False

        for output_type in output_types:
            output_path = os.path.join(a.output_dir, output_type)
            output_path = os.path.join(output_path, name + '.png')

            if os.path.exists(output_path):
                output_file_exists_for_name = True
                break

            name_out_dict[output_type] = output_path

        if not output_file_exists_for_name:
            output_paths[name] = name_out_dict
            seg_src_paths[name] = seg_input_file
            gen_src_paths[name] = gen_input_file
        else:
            skipped += 1

    print("skipping %d files that already exist" % skipped)

    global total
    total = len(output_paths)

    print("processing %d files" % total)

    global start
    start = time.time()

    skin_color = getSkinColor()
    background_color = getBackgroundColor()
    positive_colors = getPositiveColors()

    upscale_interpolation = getUpscaleInterpolation()

    with tf.Session() as sess:
        for name, name_out_dict in output_paths.iteritems():
            print name
            print name_out_dict

            gen_image = im.load(gen_src_paths[name])
            seg_image = im.load(seg_src_paths[name])

            #Save inputs
            im.save(seg_image, name_out_dict['humanseginputs'])
            im.save(gen_image, name_out_dict['humangeninputs'])

            #Crop reference
            crop_reference = getFaceCropReference(seg_image, skin_color)

            #Cropped face segmentation
            temp_image_seg = crop(seg_image, crop_reference)
            temp_image_seg = addBackground(temp_image_seg, skin_color,
                                           positive_colors, background_color)
            im.save(temp_image_seg, name_out_dict['croppedsegfaces'])
            #Resize face segmentation
            temp_image_seg = resize(
                temp_image_seg,
                tfUpscaleInterpolationType=upscale_interpolation)
            im.save(temp_image_seg, name_out_dict['resizedsegfaces'])

            #Cropped face generated
            temp_image_gen = crop(gen_image, crop_reference)
            im.save(temp_image_gen, name_out_dict['croppedgenfaces'])
            #Resize face generated
            temp_image_gen = resize(
                temp_image_gen,
                tfUpscaleInterpolationType=upscale_interpolation)
            im.save(temp_image_gen, name_out_dict['resizedgenfaces'])

            complete()

    #Combine resized seg and gen images to input face module
    inputDirA = os.path.join(a.output_dir, 'resizedsegfaces')
    inputDirB = os.path.join(a.output_dir, 'resizedgenfaces')
    outputDir = os.path.join(a.output_dir, 'combinedseggenfaces')
    combineImagePairs(inputDirA, inputDirB, outputDir)

    #Now lets generate face images from extracted face segmentation maps
    inputDir = os.path.join(a.output_dir, 'combinedseggenfaces')
    outputDir = os.path.join(a.output_dir, 'generatedfaces')
    runFacePix2PixModule(inputDir, outputDir)

    with tf.Session() as sess:
        for name, name_out_dict in output_paths.iteritems():
            gen_image = im.load(name_out_dict['humangeninputs'])
            seg_image = im.load(name_out_dict['humanseginputs'])
            #Crop reference
            crop_reference = getFaceCropReference(seg_image, skin_color)

            #Get crop reference point and size
            [rmin, rmax, cmin, cmax] = cropRectangle(crop_reference)
            width = rmax - rmin
            height = cmax - cmin

            #Load generated face
            gen_face_path = os.path.join(a.output_dir, 'generatedfaces')
            gen_face_path = os.path.join(gen_face_path, 'images')
            gen_face_path = os.path.join(gen_face_path, name + '-outputs.png')

            gen_face_image = im.load(gen_face_path)
            gen_face_image = downscaleToSize(gen_face_image, [width, height])

            updated_full_body_image = updateFaceResults(
                gen_image, gen_face_image, rmin, rmax, cmin, cmax,
                crop_reference)
            im.save(updated_full_body_image, name_out_dict['mergedgenoutputs'])

    #Combine original generated full body images and face updated generated images
    inputDirA = os.path.join(a.output_dir, 'humangeninputs')
    inputDirB = os.path.join(a.output_dir, 'mergedgenoutputs')
    outputDir = os.path.join(a.output_dir, 'finalcomparisonimages')
    combineImagePairs(inputDirA, inputDirB, outputDir)
def main():
    if not os.path.exists(a.output_dir_labels):
        os.makedirs(a.output_dir_labels)
    if not os.path.exists(output_train_directory_labels):
        os.makedirs(output_train_directory_labels)
    if not os.path.exists(output_test_directory_labels):
        os.makedirs(output_test_directory_labels)
    if not os.path.exists(output_val_directory_labels):
        os.makedirs(output_val_directory_labels)
        
    processInputImages = a.resize or a.crop
    
    if not os.path.exists(a.output_dir_images) and processInputImages:
        os.makedirs(a.output_dir_images)
    if not os.path.exists(output_train_directory_images) and processInputImages:
        os.makedirs(output_train_directory_images)
    if not os.path.exists(output_test_directory_images) and processInputImages:
        os.makedirs(output_test_directory_images)
    if not os.path.exists(output_val_directory_images) and processInputImages:
        os.makedirs(output_val_directory_images)
        
    #cropped images directory

    splits = ['train', 'test', 'val']
    
    src_paths = []
    dst_paths_labels = []
    dst_paths_images = []
    
    skipped = 0
    for split in splits:
        split_folder = os.path.join(a.input_dir, split)
        for src_path in im.find(split_folder):
    
            name, _ = os.path.splitext(os.path.basename(src_path))
            dst_path_label = os.path.join(a.output_dir_labels, split)
            dst_path_label = os.path.join(dst_path_label, name + ".png")
            dst_path_image = os.path.join(a.output_dir_images, split)
            dst_path_image = os.path.join(dst_path_image, name + ".png")
            
            if os.path.exists(dst_path_label) or os.path.exists(dst_path_image):
                skipped += 1
            else:
                src_paths.append(src_path)
                dst_paths_labels.append(dst_path_label)
                dst_paths_images.append(dst_path_image)
            
    print("skipping %d files that already exist" % skipped)
    
    global total
    total = len(src_paths)
    
    print("processing %d files" % total)

    global start
    start = time.time()
    
    if a.workers == 1:
        with tf.Session() as sess:
            for src_path, dst_path_label, dst_path_image in zip(src_paths, dst_paths_labels, dst_paths_images):
            
                name, _ = os.path.splitext(os.path.basename(src_path))
            
                print 'Name: ' + name
            
                label_folder = os.path.join(a.label_images_dir, name)
            
                label_image_paths = getLabelImages(label_folder)
            
                print label_image_paths
            
                color_dict = getLabelToColorDictionary()
            

                label_img = getLabelImage(label_image_paths, color_dict)
                
                if processInputImages:
                    processedImage = im.load(src_path)
                    
                    if a.crop:
                        crop_reference = getCropReference(label_image_paths)

                        processedImage = crop(processedImage, crop_reference)
                        label_img = crop(label_img, crop_reference)
                        
                        if a.resize:
                            processedImage = resize(processedImage)
                            label_img = resize(label_img)
                        
                    im.save(processedImage, dst_path_image)
                    
                im.save(label_img, dst_path_label)
                complete()