def train_gan(discriminator: nn.Module, generator: nn.Module, name, train_loader, val_loader=None, epochs=50, num_imgs=9, Optimizer_fn=optim.Adam, loss_type=0, k=1):
    os.makedirs(os.path.join(CKPT_PATH, name), exist_ok=True)
    with open(os.path.join(CKPT_PATH, name, "loss_log.txt"),"w") as loss_log:
        pass
    latent_sample = torch.randn(size=(num_imgs, generator.latent_size,), device=device)
    latent_sample[0] = 0
    losses = dict(
        train_d_real_losses=[],
        train_d_fake_losses=[],
        train_g_losses=[],
        val_d_real_losses=[],
    )
    d_optimizer, d_scheduler = get_optimizer_scheduler(Optimizer_fn, discriminator.parameters())
    g_optimizer, g_scheduler = get_optimizer_scheduler(Optimizer_fn, generator.parameters())

    for epoch in tqdm.trange(epochs):  # loop over the dataset multiple times
        train_d_real_loss, train_d_fake_loss, train_g_loss = train_epoch(discriminator, generator, train_loader, device, d_optimizer, d_scheduler, g_optimizer, g_scheduler, loss_type, k)
        val_d_real_loss = val_epoch(discriminator, generator, val_loader, device, loss_type, 50)

        losses["train_d_real_losses"].append(train_d_real_loss)
        losses["train_d_fake_losses"].append(train_d_fake_loss)
        losses["train_g_losses"].append(train_g_loss)
        losses["val_d_real_losses"].append(val_d_real_loss)
        loss_msg = f"epoch {epoch}: train_d_real_loss={train_d_real_loss}, train_d_fake_loss={train_d_fake_loss}, train_g_loss={train_g_loss}, val_d_real_loss={val_d_real_loss}"
        logging.info(loss_msg)
        with open(os.path.join(CKPT_PATH, name, "loss_log.txt"),"a") as loss_log:
            loss_log.write(loss_msg)
        plot_losses(losses, name)
        generator.eval().to(device)
        save_image(generator(latent_sample).detach().to("cpu"), "generated_images", name, epoch)   
        save_model(discriminator, generator, name)
Exemplo n.º 2
0
def colorTransferToColor(inputFile, inputDataFile, outputFileName, destColor,
                         srcColor):
    '''
	입력받은 inputFile의 정해진 부분( srcColor와 비슷한 부분 )의 색을 destColor로 변경한다.
	'''
    if utility.is_exist(inputDataFile):
        [divided_class, class_number, class_total, class_border] = \
        utility.load_result(inputDataFile)
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        divided_class, class_number, class_total, class_border, class_count, class_length, class_color, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    class_color = image_processing.get_class_color(
        utility.read_image(inputFile), class_total, class_count)

    destArea = styler.get_similar_color_area(
        divided_class, class_number, class_total, class_color, srcColor,
        240)  # Simmilar Color threshold to 200.
    part_change_image = styler.change_area_color(inputFile, outputFileName,
                                                 destColor, divided_class,
                                                 destArea)
    utility.save_image(part_change_image, outputFileName)
Exemplo n.º 3
0
def generate_image(count, gen_batch_size=16):

    obj_list = []
    z_where_list = []
    rot_mat_list = []
    for i in range(T - 1):

        obj = np.random.randint(3)
        obj_tensor = torch.zeros(gen_batch_size, cat_size).scatter_(
            1,
            torch.tensor(obj).expand(gen_batch_size, 1), 1).float().to(device)
        obj_list.append(obj_tensor)

        scale = np.random.uniform(1.8, 2.3, size=(gen_batch_size, 1))
        pos = np.random.uniform(-1.7, 1.7, size=(gen_batch_size, 2))
        angle = np.random.uniform(-3.14, 3.14, size=(gen_batch_size))
        z_where = torch.from_numpy(np.concatenate([scale, pos],
                                                  axis=-1)).float().to(device)
        z_where_list.append(z_where)
        rot_mat = rotation_matrix(angle, gen_batch_size)
        rot_mat_list.append(rot_mat)
        print('object num{}, obj: {}, scale:{}, pos:{}'.format(
            i, obj, scale[0], pos[0]))

    gen_x = model.generate(obj_list,
                           z_where_list,
                           rot_mat_list,
                           batch_size=gen_batch_size)
    save_image(gen_x, count, 'gen', path='X_image/')
Exemplo n.º 4
0
def saveParameters(fileDir):
    # Model name 1 mean dataset`s folder 1.
    model_name = '1'
    detection_model = objectDetector.load_model(model_name)
    # File is directory
    files = utility.get_filenames(fileDir)
    fileNames = []
    domColors = []
    wallColors = []
    floorColors = []

    for f in files:
        if "." not in f:
            continue
        print("Now proceeding ", f, " [ ", files.index(f), " ]")

        coord, str_tag, number_tag, score = objectDetector.inference(
            detection_model, f)

        # Save file name make.
        save_file_name = utility.add_name(f, "_od", extension="bin")
        dirs = save_file_name.split("/")

        save_image_name = ""
        for d in dirs[0:-1]:
            save_image_name += d + "/"
        save_image_name += f.split("/")[-1].split(".")[0] + "/"

        utility.make_dir(save_image_name)

        rect_files = []
        additional_infor = []

        for i in range(len(str_tag)):
            additional_infor.append(-1)
            rect_image = image_processing.get_rect_image(
                f, int(coord[i][0]), int(coord[i][1]), int(coord[i][2]),
                int(coord[i][3]))
            rect_image_name = save_image_name + f.split("/")[-1]
            rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
            rect_files.append(rect_image_name)
            utility.save_image(rect_image, rect_image_name)

        dom_color = image_processing.get_dominant_color(f)
        n_color = utility.get_remarkable_color_n(dom_color, MAX_COLOR_LENGTH)
        fileNames.append(os.path.basename(f))
        domColors.append(n_color)
        wallColors.append([])
        floorColors.append([])
        utility.save_result([
            coord, str_tag, number_tag, score, rect_files, additional_infor,
            n_color
        ], save_file_name)

    utility.save_result([files, domColors, wallColors, floorColors],
                        config.RESEARCH_BASE_FILE)
Exemplo n.º 5
0
def segment(inputFile, outputFile, outputDataFile, total=True):
    divided_class, class_number, class_total, class_border, _, _, class_color, largest_mask, width, height = \
    segmentation.get_divided_class(inputFile, total=total)
    utility.save_result(
        [divided_class, class_number, class_total, class_border, largest_mask],
        outputDataFile)

    dc_image = utility.divided_class_into_image(divided_class, class_number,
                                                class_color, width, height,
                                                class_number)
    if not outputFile == None:
        utility.save_image(dc_image, outputFile)
Exemplo n.º 6
0
def change_area_style(input_file, output_file, texture_file, area):
    stylized_image = set_style(input_file, texture_file)
    stylized_image = np.array((stylized_image * 255)[0], np.uint8)
    stylized_image = cv2.cvtColor(stylized_image, cv2.COLOR_BGR2RGB)

    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      stylized_image, area,
                                                      width, height)
    utility.save_image(part_change_image, output_file)
Exemplo n.º 7
0
def getFurnitureShape(inputFile, inputDataFile, outputFile):
    '''
	입력받은 inputFile과 그 분석 파일 inputDataFile을 통해 grayscale 및 segmentation 된 데이터를 만든다.
	만든 데이터는 outputFile로 ( Grayscale 사진 ) Output.
	'''
    if utility.is_exist(inputDataFile):
        [divided_class, _, class_total, _] = utility.load_result(inputDataFile)
    else:
        segment(inputFile, None, inputDataFile)
        [divided_class, _, class_total, _] = utility.load_result(inputDataFile)

    gray_image = image_processing.to_gray_scale(inputFile)
    utility.print_image(gray_image)
    utility.save_image(gray_image, outputFile)
Exemplo n.º 8
0
def getPartChangedImage(inputFile,
                        outputFile,
                        str_tag,
                        coord,
                        rect_files,
                        selectedPreferenceImage,
                        i,
                        j,
                        ratio=(0.5, 0.5)):
    partChangedOutFile = utility.add_name(outputFile,
                                          "_changed_" + str(i) + str(j))
    original_image = utility.read_image(inputFile)
    resized_coord = utility.change_arrcoords(coord, ratio=ratio)
    recommand_furniture = []
    changed_log = []

    for k in range(len(str_tag)):
        if (str_tag[k] == "sofa" or str_tag[k] == "chair"):
            inpaintingRandomValue = random.randint(0, 9)
            furniture_file = rect_files[k]
            # 만약 userinput 이 있다면, 그것을 대신 사용.
            if utility.is_exist(utility.get_userinput_bin(furniture_file)):
                furniture_data_file = utility.get_userinput_bin(furniture_file)
            else:
                furniture_data_file = utility.get_bin(furniture_file)
            styled_furniture, change_color = styleTransfer(
                furniture_file,
                furniture_data_file,
                selectedPreferenceImage,
                inpaintingRandomValue,
                ratio=ratio)
            original_image = image_processing.add_up_image_to(original_image, styled_furniture, \
             int(resized_coord[k][0]), int(resized_coord[k][1]), int(resized_coord[k][2]), int(resized_coord[k][3]))
            rec_furn = getRecommandFurnitureForImage(selectedPreferenceImage,
                                                     str_tag[k])
            if len(rec_furn) < 3:
                utility.logging(selectedPreferenceImage)
                utility.logging(str(rec_furn))
                recommand_furniture.append(["", "", ""])
            else:
                recommand_furniture.append(random.sample(rec_furn, 3))
            changed_log.append([resized_coord[k], change_color])

    out_res_file = utility.add_name(partChangedOutFile,
                                    "_result",
                                    extension=".bin")
    utility.save_result([changed_log, recommand_furniture], out_res_file)
    utility.save_image(original_image, partChangedOutFile)
    return partChangedOutFile, out_res_file
Exemplo n.º 9
0
def segment(inputFile, outputFile, outputDataFile, total=False):
    '''
	입력받은 파일을 Segmentation 해서 output한다.
	Output 한 결과는 조각난 사진 모음.
	'''
    divided_class, class_number, class_total, class_border, _, _, class_color, largest_mask, width, height = \
    segmentation.get_divided_class(inputFile, total=total)
    utility.save_result(
        [divided_class, class_number, class_total, class_border, largest_mask],
        outputDataFile)

    dc_image = utility.divided_class_into_image(divided_class, class_number,
                                                class_color, width, height,
                                                class_number)
    if not outputFile == None:
        utility.save_image(dc_image, outputFile)
    return divided_class, class_number, class_total, class_border
Exemplo n.º 10
0
def change_dest_texture(input_file, output_file, texture_file, divided_class,
                        class_total, touch_list):
    stylized_image = set_style(input_file, texture_file)
    stylized_image = np.array((stylized_image * 255)[0], np.uint8)
    stylized_image = cv2.cvtColor(stylized_image, cv2.COLOR_BGR2RGB)

    ret_class_total = utility.get_class_with_given_coord(
        class_total, touch_list)
    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      stylized_image,
                                                      ret_class_total, width,
                                                      height)
    utility.save_image(part_change_image, output_file)
Exemplo n.º 11
0
def colorTransferWithImage(inputFile, inputDataFile, outputFileName,
                           destImage):
    '''
	입력받은 inputFile의 색을 destImage와 비슷하게 변경해서 outputFileName에 저장한다.
	Segmentation이 된다면 자른 부분만 변경.
	'''
    if utility.is_exist(inputDataFile):
        [_, _, class_total, _] = \
        utility.load_result(inputDataFile)
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        _, _, class_total, _, class_count, _, _, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    _, _, mask_map, (width,
                     height) = segmentation.get_segmented_image(inputFile)
    changed_image = styler.set_color_with_image(inputFile, destImage, mask_map)
    utility.save_image(changed_image, outputFileName)
Exemplo n.º 12
0
def saveParameter(fileName, detection_model):
    coord, str_tag, number_tag, score = objectDetector.inference(
        detection_model, fileName)

    # Save file name make.
    save_file_name = config.RESEARCH_BASE_DIR + "/" + os.path.basename(
        utility.get_od_bin(fileName))
    dirs = save_file_name.split("/")

    save_image_name = ""
    for d in dirs[0:-1]:
        save_image_name += d + "/"
    save_image_name += fileName.split("/")[-1].split(".")[0] + "/"

    utility.make_dir(save_image_name)

    rect_files = []
    additional_infor = []

    for i in range(len(str_tag)):
        additional_infor.append(-1)
        rect_image = image_processing.get_rect_image(fileName,
                                                     int(coord[i][0]),
                                                     int(coord[i][1]),
                                                     int(coord[i][2]),
                                                     int(coord[i][3]))
        rect_image_name = save_image_name + fileName.split("/")[-1]
        rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
        rect_files.append(rect_image_name)
        utility.save_image(rect_image, rect_image_name)

    dom_color = image_processing.get_dominant_color(fileName)
    n_color = utility.get_remarkable_color_n(dom_color, MAX_COLOR_LENGTH)
    utility.save_result([
        coord, str_tag, number_tag, score, rect_files, additional_infor,
        n_color
    ], save_file_name)
    return [
        coord, str_tag, number_tag, score, rect_files, additional_infor,
        n_color
    ]
Exemplo n.º 13
0
    def perform_adaptive_non_maximum_supression(self, corner_responses,
                                                number_of_NMS_keypoints,
                                                offset):
        print("Performing adaptive non-maximum supression")
        corner_responses = corner_responses[
            abs(offset[0]):corner_responses.shape[0] - abs(offset[0]),
            abs(offset[0]):corner_responses.shape[1] - abs(offset[0])]
        if (number_of_NMS_keypoints > 500):
            number_of_keypoints = 500
        else:
            number_of_keypoints = number_of_NMS_keypoints
        # self.adaptive_non_maximum_supression(number_of_keypoints)

        # # number_of_keypoints = 500
        robust_factor = 1.1
        self.perform_ANMS(corner_responses, number_of_keypoints, robust_factor)

        # show corner response
        utl.show_image('Corner response after ANMS', corner_responses)

        # display keypoints
        print("Number of keypoints: ", len(self.corners))
        result_image_path = utl.result_image_name(self.image_name)
        utl.save_image(self.corners, self.image, result_image_path)
Exemplo n.º 14
0
    def saveData(self):
        global nowIndex  # 현재 추가하고 있는 index
        global divided_class  # Class Number map
        global class_number  # Class Number 의 종류
        global class_total  # 각 Class들의 total Coords
        global class_border  # Class border.

        img = cv2.imread(IMAGE_NAME)
        (height, width, _) = img.shape

        class_total, class_number, divided_class = mergeGroup(
            class_total, class_number, divided_class, nowIndex)
        utility.save_result(
            [divided_class, class_number, class_total, class_border],
            SEG_SAVE_NAME)
        class_count = [len(class_total[i]) for i in range(len(class_total))]
        class_color = image_processing.get_class_color(
            utility.read_image(IMAGE_NAME), class_total, class_count)
        dc_image = utility.divided_class_into_image(divided_class,
                                                    class_number, class_color,
                                                    width, height,
                                                    class_number)
        utility.save_image(dc_image, CHANGE_DIVIED)
        self.imageLabel.changePixmap(CHANGE_DIVIED)
Exemplo n.º 15
0
def show_and_save(stream, target, predict, save_path_f, save_path_d,
                  save_path_o):
    batch = 0
    t_r = np.exp(target)
    y_r = np.exp(predict)
    for it in stream.get_epoch_iterator():
        e_l = target[batch] - predict[batch]
        e_r = t_r[batch] - y_r[batch]
        for i in range(len(it[0])):
            img = it[0][i]
            dis_img = utility.change_aspect_ratio(img, t_r[batch][i], 1)
            fix_img = utility.change_aspect_ratio(dis_img, 1 / y_r[batch][i],
                                                  1)

            print '[test_data]:', i + 1
            print '[t_l]:', round(target[batch][i],
                                  4), '\t[t_r]:', round(t_r[batch][i], 4)
            print '[y_l]:', round(predict[batch][i],
                                  4), '\t[y_r]:', round(y_r[batch][i], 4)
            print '[e_l]:', round(e_l[i], 4), '\t[e_r]:', round(e_r[i], 4)

            plt.figure(figsize=(16, 16))
            plt.subplot(131)
            plt.title('Distorted image')
            plt.tick_params(labelbottom='off',
                            labeltop='off',
                            labelleft='off',
                            labelright='off')
            plt.tick_params(bottom='off', top='off', left='off', right='off')
            plt.imshow(dis_img)
            plt.subplot(132)
            plt.title('Fixed image')
            plt.tick_params(labelbottom='off',
                            labeltop='off',
                            labelleft='off',
                            labelright='off')
            plt.tick_params(bottom='off', top='off', left='off', right='off')
            plt.imshow(fix_img)
            plt.subplot(133)
            plt.title('Normal image')
            plt.tick_params(labelbottom='off',
                            labeltop='off',
                            labelleft='off',
                            labelright='off')
            plt.tick_params(bottom='off', top='off', left='off', right='off')
            plt.imshow(img)
            plt.show()

            utility.save_image(dis_img, save_path_d, ('%.18f' % e_l[i]))
            utility.save_image(fix_img, save_path_f, ('%.18f' % e_l[i]))
            utility.save_image(img, save_path_o, ('%.18f' % e_l[i]))

        batch += 1
    make_html.make_html(save_path_d)
    make_html.make_html(save_path_f)
    make_html.make_html(save_path_o)
Exemplo n.º 16
0
def change_dest_color(input_file,
                      output_file,
                      setting_color,
                      divided_class,
                      class_total,
                      touch_list,
                      touch_hint=None,
                      a=5,
                      b=1,
                      change_style="median",
                      save_flag=True,
                      ratio=(1.0, 1.0)):
    colored_image = set_color_with_color(input_file,
                                         setting_color,
                                         a=a,
                                         b=b,
                                         change_style=change_style,
                                         ratio=ratio)

    if touch_hint == None:
        ret_class_total = utility.get_class_with_given_coord(
            class_total, touch_list)
    else:
        ret_class_total = class_total[touch_hint]
    original_image = utility.read_image(input_file)
    original_image = utility.resize_image(original_image, ratio=ratio)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      colored_image,
                                                      ret_class_total, width,
                                                      height)
    if save_flag:
        utility.save_image(part_change_image, output_file)
    return part_change_image
def make_images(decoder, device, encoder, original_images, name, epoch,
                latent_sample):
    decoder.eval().to(device)
    mean, log_var = encoder(original_images)
    sample_ls = torch.distributions.Normal(mean,
                                           torch.exp(log_var / 2)).sample()
    save_image(
        decoder(mean).detach().to("cpu"), "compressed_images", name, epoch)
    save_image(
        decoder(sample_ls).detach().to("cpu"), "compressed_sampled_images",
        name, epoch)
    save_image(
        decoder(latent_sample).detach().to("cpu"), "generated_images", name,
        epoch)
def make_images(decoder, encoder, original_images, name, epoch, latent_sample,
                bunch, device):
    decoder.eval().to(device)
    save_image(
        decoder(encoder(original_images)).detach().to("cpu"),
        "compressed_images", name, epoch)
    save_image(
        decoder(latent_sample).detach().to("cpu"), "generated_images", name,
        epoch)
    #mean, cov = latent_space_pca(encoder, train_loader)
    torch.cuda.empty_cache()
    mean, cov = latent_space_pca(encoder, bunch)
    save_image(
        decoder(normal_to_pc(latent_sample, mean.to(device),
                             cov.to(device))).detach().to("cpu"),
        "pca_gen_images", name, epoch)
    save_labeled_pca_gen_images(encoder, decoder, latent_sample, bunch, name,
                                epoch)
Exemplo n.º 19
0
            train_step = optimizer.minimize(total_loss)

            sess.run(tf.global_variables_initializer())
            sess.run(model['input'].assign(initial_image))
            for it in range(ITERATIONS + 1):
                sess.run(train_step)

                if it % 100 == 0:
                    # Print every 100 iteration.
                    mixed_image = sess.run(model['input'])
                    print('Iteration %d' % (it))
                    print('sum         : ',
                          sess.run(tf.reduce_sum(mixed_image)))
                    print('total_loss  : ', sess.run(total_loss))
                    print("content_loss: ", alpha * sess.run(content_loss))
                    print("style_loss  : ", beta * sess.run(style_loss))
                    print("shape loss  : ", gamma * sess.run(shape_loss))

                    if not os.path.exists(OUTPUT_DIR):
                        os.mkdir(OUTPUT_DIR)

                    filename = OUTPUT_DIR + '/%d.jpg' % (it)
                    utility.save_image(filename,
                                       mixed_image,
                                       invert=result_invert)
                if sess.run(total_loss) < 1:
                    break
        sess.close()
    end_time = time.time()
    print("Time taken = ", end_time - start_time)
Exemplo n.º 20
0
def objectDetect(inputFile, outputFile):
    '''
	입력받은 inputFile의 가구를 ObjectDetection한 결과를 outputFile에 저장한다. json 형태로 저장한다.
	현재는 bin file로만 입출력이 가능.
	폴더를 입력하면 outputFile은 무시됨.
	'''
    if "." not in inputFile:
        # File is directory
        files = utility.get_filenames(inputFile)
        for f in files:
            if "." not in f:
                continue

            coord, str_tag, number_tag, score = objectDetector.inference(
                detection_model, f)
            # Save file name make.
            save_file_name = utility.add_name(f, "_od", extension="bin")
            dirs = save_file_name.split("/")
            save_image_name = ""
            for d in dirs[0:-1]:
                save_image_name += d + "/"
            save_image_name += f.split("/")[-1].split(".")[0] + "/"
            utility.make_dir(save_image_name)
            rect_files = []

            additional_infor = []
            for i in range(len(str_tag)):
                additional_infor.append(-1)
                rect_image = image_processing.get_rect_image(
                    f, int(coord[i][0]), int(coord[i][1]), int(coord[i][2]),
                    int(coord[i][3]))
                rect_image_name = save_image_name + f.split("/")[-1]
                rect_image_name = utility.add_name(rect_image_name,
                                                   "_" + str(i))
                rect_files.append(rect_image_name)
                utility.save_image(rect_image, rect_image_name)
            utility.save_result([
                coord, str_tag, number_tag, score, rect_files, additional_infor
            ], save_file_name)

    else:
        coord, str_tag, number_tag, score = objectDetector.inference(
            detection_model, inputFile)
        # Save file name make.
        save_file_name = utility.add_name(inputFile, "_od", extension="bin")
        dirs = save_file_name.split("/")
        save_image_name = ""
        for d in dirs[0:-1]:
            save_image_name += d + "/"
        save_image_name += inputFile.split("/")[-1].split(".")[0] + "/"
        utility.make_dir(save_image_name)
        rect_files = []
        additional_infor = []
        for i in range(len(str_tag)):
            additional_infor.append(-1)
            rect_image = image_processing.get_rect_image(
                inputFile, int(coord[i][0]), int(coord[i][1]),
                int(coord[i][2]), int(coord[i][3]))
            rect_image_name = save_image_name + inputFile.split("/")[-1]
            rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
            rect_files.append(rect_image_name)
            utility.save_image(rect_image, rect_image_name)
        utility.save_result(
            [coord, str_tag, number_tag, score, rect_files, additional_infor],
            outputFile)
Exemplo n.º 21
0
def getStyleChangedImage_past(inputFile, preferenceImages, tempdata="temp"):
    '''
	inputFile에 대한 preferenceImages 를 출력. 
	print 함수로 각 변환한 사진의 이름을 출력하고, 마지막에 몇 장을 줄것인지 출력한다.
	1. Object Detect 결과로 나온 가구들을 Segmentation 화 한다.
	2. 사용자가 좋아한다고 고른 인테리어의 가구 + 사용자가 좋아할것 같은 단독 가구의 색 / 재질 / Segment 를 가져온다.
	3. 원래의 인테리어의 가구들에 적절하게 배치한다.
		3-1. 원래의 인테리어 가구의 재질과 색을 변경한다. ( 모든 sofa, chair 에 대해서 ) -> 40%
		3-2. 원래의 인테리어 가구를 사용자가 좋아할만한 가구로 변경한다. ( 모든 sofa, chair에 대해서 color filter 적용한걸로 ) -> 40%
		3-3. 원래의 인테리어에서 색상 filter만 입혀준다. ( 위의 0.2 부분 )
	'''
    if "\\" in inputFile:
        dirs = inputFile.split("\\")
        inputFile = ""
        for d in dirs[:-1]:
            inputFile += d + "/"
        inputFile += dirs[-1]
    outputFile = utility.get_add_dir(inputFile, tempdata)
    # fav_furniture_list = "Image/InteriorImage/test_furniture/sofa"
    # fav_furniture_list = utility.get_filenames(fav_furniture_list)
    # 기존 Data 출력.
    base_name = inputFile.split("/")[-1].split("Z")[-1]
    researched_files = utility.get_only_jpg_files(
        "C:/workspace/IOU-Backend/util/IOU-ML/Image/InteriorImage/test")
    checked_file = ""
    for rf in researched_files:
        if base_name in rf:
            checked_file = rf

    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = utility.get_od_data(checked_file)
    '''
	segment_data = []
	for f in rect_files:
		segment_data.append(utility.get_segment_data(f))
	fav_furniture_seg_data = []
	for f in fav_furniture_list:
		fav_furniture_seg_data.append(utility.get_segment_data(f))
	'''
    returnImageList = []
    for i in range(MAX_OUT_IMAGE):
        now_index = random.randint(0, len(preferenceImages) - 1)
        saveOutputFile = utility.add_name(outputFile, "_" + str(i))
        if i < MAX_OUT_IMAGE * 0.2:
            original_image = utility.read_image(inputFile)
            decrese_ratio = (1.0, 1.0)
            if original_image.shape[0] * original_image.shape[1] > 1200 * 960:
                decrese_ratio = (0.3, 0.3)
            changed_image = styler.set_color_with_image(
                inputFile, preferenceImages[now_index], mask_map=None)
            utility.save_image(changed_image, saveOutputFile)
        elif i < MAX_OUT_IMAGE * 1.0:
            original_image = utility.read_image(inputFile)
            # 특정 크기 이상이면 decrease ratio를 조절하여 1/3으로..
            decrese_ratio = (1.0, 1.0)
            if original_image.shape[0] * original_image.shape[1] > 1200 * 960:
                decrese_ratio = (0.3, 0.3)
                original_image = cv2.resize(original_image,
                                            None,
                                            fx=decrese_ratio[0],
                                            fy=decrese_ratio[1],
                                            interpolation=cv2.INTER_AREA)
            for i in range(len(str_tag)):
                if (str_tag[i] == "sofa" or str_tag[i] == "chair"):
                    styled_furniture = styler.set_color_with_image(
                        "C:/workspace/IOU-Backend/util/IOU-ML/" +
                        rect_files[i], preferenceImages[now_index], None,
                        decrese_ratio)
                    original_image = image_processing.add_up_image_to(original_image, styled_furniture, \
                     int(coord[i][0] * decrese_ratio[0]), int(coord[i][1] * decrese_ratio[0]), int(coord[i][2] * decrese_ratio[0]), int(coord[i][3] * decrese_ratio[0]))
            utility.save_image(original_image, saveOutputFile)
        else:
            original_image = utility.read_image(inputFile)
            for i in range(len(str_tag)):
                if (str_tag[i] == "sofa" or str_tag[i] == "chair"):
                    stylized_image = styler.set_style(
                        "C:/workspace/IOU-Backend/util/IOU-ML/" +
                        rect_files[i], preferenceImages[now_index])
                    stylized_image = np.array((stylized_image * 255)[0],
                                              np.uint8)
                    styled_furniture = cv2.cvtColor(stylized_image,
                                                    cv2.COLOR_BGR2RGB)
                    original_image = image_processing.add_up_image_to(
                        original_image, styled_furniture, int(coord[i][0]),
                        int(coord[i][1]), int(coord[i][2]), int(coord[i][3]))
            utility.save_image(original_image, saveOutputFile)
        returnImageList.append(saveOutputFile)
    returnImageList.append(MAX_OUT_IMAGE)
    return returnImageList
Exemplo n.º 22
0
def getStyleChangedImage(inputFile,
                         preferenceImages,
                         od_model,
                         baseLight=[255, 255, 255],
                         changeLight=[178, 220, 240]):
    '''
	입력 Color는 BGR ( [178, 220, 240] 은 주황불빛 )
	preferenceImages 가 4장만 되어도 충분함.
	'''
    if len(preferenceImages) <= 2:
        preferenceImages = preferenceImages + preferenceImages
    print(preferenceImages)
    inputBaseFile, preferenceBaseFile = utility.file_basify(
        inputFile, preferenceImages)

    now = time.time()
    detection_model = pspnet_50_ADE_20K()
    outputFile = utility.get_add_dir(inputFile, "temp")

    # Object Detect & Segmentation
    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = getODandSegment(inputBaseFile, od_model)

    (imgHeight, imgWidth, _) = utility.read_image(inputFile).shape
    if imgWidth > destSize[0] and imgHeight > destSize[1]:
        ratio = (destSize[0] / imgWidth, destSize[1] / imgHeight)
    else:
        ratio = (1, 1)
    print("Loading Finished")

    temp = time.time()
    print("Loading Time : ", temp - now)

    # Wall Detection with input image.
    wall_divided = segmentation.detect_wall_floor(inputFile, detection_model)
    wall_divided = utility.resize_2darr(wall_divided, ratio=ratio)
    wall_total, wall_number = matrix_processing.divided_class_into_class_total(
        wall_divided)
    print("Wall Divided.")

    # Get preference image`s data.
    preferWallColor = []
    preferFloorColor = []
    selectedPreferenceImages = []
    [files, domColors, wallColors, floorColors] = utility.load_result(
        config.RESEARCH_BASE_FILE
    )  # Each files` dom color, wall color, floor color will be saved.
    baseNameFiles = [os.path.basename(files[f]) for f in range(len(files))]

    print("Wall Color start.")
    indx = list(range(0, len(preferenceBaseFile)))
    random.shuffle(indx)
    # Select 2 color of above to preferWallColor and preferFloorColor
    for i in range(MAX_WALL_IMAGE):
        ind = indx[i]
        preferImage = preferenceBaseFile[ind]
        loadIndex = baseNameFiles.index(os.path.basename(
            preferImage))  # We do only compare with base name.
        preferWallColor.append(wallColors[loadIndex])
        preferFloorColor.append(floorColors[loadIndex])
        selectedPreferenceImages.append(files[loadIndex])
    print("Wall Colored Selected.")

    # Change wall & floor
    wfColorChangeImage = []
    for i in range(MAX_WALL_IMAGE):
        wfOutputFile = changeWallFloor(inputFile,
                                       outputFile,
                                       wall_divided,
                                       wall_total,
                                       wall_number,
                                       i,
                                       preferWallColor,
                                       preferFloorColor,
                                       ratio=ratio)
        wfColorChangeImage.append(wfOutputFile)
    print("Wall Color Changed")

    temp = time.time()
    print("Wall Coloring Time : ", temp - now)

    # Change Object ( Table and Chair )
    partChangedFiles = []
    procs = []
    recommandFurnitureList = []
    changeFurnitureLocation = []
    changeFurnitureColor = []

    for i in range(MAX_WALL_IMAGE):
        for j in range(MAX_PART_CHANGE_IMAGE):
            # 넘겨줄 인자를 저장하고, Thread를 실행시켜서 속도 향상.
            argvFile = utility.add_name(
                config.SUBPROCESS_ARGV,
                "_" + str(MAX_PART_CHANGE_IMAGE * i + j))
            utility.save_result([
                selectedPreferenceImages, wfColorChangeImage, outputFile,
                str_tag, coord, rect_files, i, j, ratio
            ], argvFile)

            # Subprocess need to calculate with given ratio.
            proc = subprocess.Popen(
                ['python', 'getPartChangedImage.py', argvFile],
                shell=True,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                encoding="cp949")
            procs.append(proc)

    for i in range(len(procs)):
        out = procs[i].communicate()[0]
        out = str(out).split("\n")
        tout = []
        for i in range(len(out)):
            if len(out[i]) > 0:
                tout.append(out[i])
        [changed_log, recommand_furniture] = utility.load_result(tout[-1])
        partChangedFiles.append(tout[-2])
        recommandFurnitureList.append(recommand_furniture)
        for i in range(len(changed_log)):
            changeFurnitureLocation.append(changed_log[i][0])
            changeFurnitureColor.append(changed_log[i][1])

    print("Part Changed Finished")
    # Add some plant.
    # partChangedFiles = print() # Image number will not be changed.

    temp = time.time()
    print("Part Changing Time : ", temp - now)

    lightList = []
    # Change Light
    for i in range(MAX_OUT_IMAGE):
        print("Now Proceed : ", i)
        files = utility.add_name(partChangedFiles[i], "_lighter")
        if random.randint(1, MAX_OUT_IMAGE) > 4:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, changeLight)
            lightList.append(changeLight)
        else:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, baseLight)
            lightList.append(baseLight)
        utility.save_image(changed_file, files)
        partChangedFiles[i] = files
    # partChangedFiles 가 결국 바뀐 파일들
    temp = time.time()
    print("Total Time : ", temp - now)
    changeLog = makeChangeInfor(preferWallColor, preferFloorColor, [preferenceImages[indx[0]], preferenceImages[indx[1]]], partChangedFiles, lightList, changeFurnitureLocation, changeFurnitureColor, \
     recommandFurnitureList, [])

    resultDictionary = utility.save_log_dictionary(inputFile, partChangedFiles,
                                                   changeLog)
    utility.logging(str(resultDictionary))
    with open(FILE_OUTQUEUE, 'a') as f:
        f.write(str(resultDictionary) + "\n")
Exemplo n.º 23
0
    data_ix = np.empty((1, 785))
    #  train()

    avg_loss = 0
    count = 0
    for epoch in range(epoch_num):
        for data, lables in train_loader:
            bs = data.size()[0]
            data = Variable(data.float()).view(bs, -1).cuda()
            optimizer.zero_grad()
            loss = model.loss(data)
            avg_loss += loss.cpu().data.numpy()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
            optimizer.step()
            count += 1
            print('Epoch-{}; Count-{}; loss: {};'.format(
                epoch, count, avg_loss / 100))
            #x=generate_image(count)
            #save_image(x,count)
            if count % 100 == 0:
                print('Epoch-{}; Count-{}; loss: {};'.format(
                    epoch, count, avg_loss / 100))
                torch.save(model.state_dict(), 'save/weights_%d.tar' % (count))

                x = generate_image(count)
                save_image(x, count)
                avg_loss = 0
    torch.save(model.state_dict(), 'save/weights_final.tar')
    generate_image(count)
Exemplo n.º 24
0
def inference_on_events(pothole_events,
                        clean=False,
                        remove_useless=True,
                        verbose=1):
    ''' Filter out events in which aren't found potholes.
    Each selected image is unique, 
    if two events share the same images with potholes only the last event will be taken.
    Analyze events in LIFO way.
     '''
    # List of yet analyzed images
    analyzed_images = []
    # Frame that can be assigned to events
    useful_frames = []
    # Frame that be returned
    taken_events = []
    # Images saved to file during analysis
    saved_images = []
    verbose_time = []
    stats = {
        "key_images": 0,
        "lower_prob": 0,
        "found_target": 0,
        "not_found_target": 0,
        "not_found_path": 0,
        "found_path": 0
    }
    # Analize events in reversed order (from last to first)
    for event in tqdm(list(reversed(pothole_events))):
        # Join image name to path location
        if verbose:
            print("# EVENT", event.bumpID)
        if len(event.attached_images) < 0:
            if verbose: print("No attached frames, skip to next event.")
            continue
        image_paths = [
            os.path.join(config.in_dir, image['filename'])
            for image in event.attached_images
        ]
        # Filter out yet analyzed images
        not_analyzed = [x for x in image_paths if x not in analyzed_images]
        # Check integrity of images
        ok_images_path = check_frame_integrity(not_analyzed)
        # Remove similar-redoundant images
        # Reverse and then revereverse, so we can matain from the nearest image
        key_images_path = list(
            reversed(
                extract_key_images(list(reversed(ok_images_path)),
                                   lambda_match=0.76)))
        stats['key_images'] += len(key_images_path)
        stats['not_found_path'] += len(not_analyzed) - len(ok_images_path)
        stats['found_path'] += len(ok_images_path)

        print("-- Extracted key images not yet analyzed:",
              len(key_images_path))
        # Detect pothole in key images
        for image_path in key_images_path:
            if verbose:
                print("-> Image: {}".format(image_path))
                start = time()
            out = model.detect(image_path, save=False)
            # out['detections'] is [(label, prob, (x,y, width, height)), (...), ...]
            # get higher probability found
            prob = max([x[1] for x in out['detections']
                        ]) if len(out['detections']) > 0 else 0
            if prob > config.threshold:
                filename = os.path.basename(image_path)
                save_image(out['image'], os.path.join(config.out_dir,
                                                      filename))
                useful_frames.append({
                    'probability': prob,
                    'filename': filename
                })
                saved_images.append(filename)
                stats['found_target'] += 1
            else:
                if prob == 0:
                    stats['not_found_target'] += 1
                else:
                    stats['lower_prob'] += 1
            if verbose:
                verbose_time.append(time() - start)
        if verbose:
            print("|i| Analyzed {} sec/image".format(np.mean(verbose_time)))

        # Add to yet analyzed all images in event
        analyzed_images += not_analyzed

        # Find the frame with higher probability in the list of event image
        find_best_frame = False
        best_frame = None
        while not find_best_frame and len(useful_frames) > 0:
            best_frame_index = np.argmax(
                [x['probability'] for x in useful_frames])
            best_frame = useful_frames[best_frame_index]
            if any(best_frame['filename'] == img['filename']
                   for img in event.attached_images):
                find_best_frame = True
            del useful_frames[best_frame_index]

        # If find best frame override the others,
        # else empty the list
        if find_best_frame:
            event.attached_images = [best_frame]
            taken_events.append(event)
        else:
            print("|WARNING| Pothole not detected for event")

    # Remove useless images from disk, that was not assigned to any events
    if remove_useless:
        new_events_images = list(
            np.array([[e['filename'] for e in event.attached_images]
                      for event in taken_events]).flat)
        for filename in saved_images:
            file_path = os.path.join(config.out_dir, filename)
            if filename not in new_events_images and os.path.isfile(file_path):
                os.remove(file_path)
                if (verbose):
                    print("Useless image {} deleted.".format(file_path))

    # Remove all analyzed original images
    if clean:
        for image_path in analyzed_images:
            if os.path.isfile(image_path):
                os.remove(image_path)
                if (verbose):
                    print("Image {} deleted.".format(image_path))

    print("|Stats|")
    print(stats)
    print("Unique images in events {}".format(len(analyzed_images)))
    print("Input events: {} - Output events {}".format(len(pothole_events),
                                                       len(taken_events)))
    # Restore list to original order
    return list(reversed(taken_events))
Exemplo n.º 25
0
    print("Input events: {} - Output events {}".format(len(pothole_events),
                                                       len(taken_events)))
    # Restore list to original order
    return list(reversed(taken_events))


if __name__ == '__main__':
    frames = glob.glob(os.path.join('./test/data', "*.jpg"))
    frames.sort(key=sort_frame_names, reverse=False)
    frames = extract_key_images(frames)
    print("Frames to be analyzed, ", len(frames))
    optim_detections = {'detections': [], 'image': None, 'caption': None}
    for frame in frames[10:]:
        start = time()
        detections = model.detect(frame, save=False)
        print("Running detection in {}s".format(time() - start))

        print(detections['detections'], detections['caption'])

        if len(detections['detections']) > 0:
            if len(optim_detections['detections']) > 0:
                if is_pothole_nearest(optim_detections['detections'],
                                      detections['detections']):
                    optim_detections = detections
                else:
                    save_image(
                        optim_detections['image'],
                        os.path.join(config.out_dir, os.path.basename(frame)))
                    optim_detections = detections
            else:
                optim_detections = detections
Exemplo n.º 26
0
def generate_image(count):
    x = model.generate(batch_size)
    save_image(x, count)
Exemplo n.º 27
0
def generate_image(count, tau, gen_batch_size=64):
    #x = model.generate(batch_size)
    train_iter = iter(train_loader)
    (data, _) = train_iter.next()
    data = data.to(device)
    recon_x, z_pres, cats = model.recon(data, tau)
    z_zip = model.get_where_pres()
    z_obj = tensor_to_objs(latents_to_tensor(z_zip)[:5])
    #print(len(recon_x))
    recon_x_final = recon_x[-1]
    print(np.min(recon_x_final), np.max(recon_x_final))
    cats_array = []
    z_pres_array = []

    if count >= 60000:
        for i in range(3):
            cats_numpy = cats[i].detach().cpu().numpy()
            index = np.argmax(cats_numpy, axis=-1)

            z_pres_np = np.squeeze(
                torch.round(z_pres[i].detach()).cpu().numpy())
            z_pres_np_raw = np.squeeze(z_pres[i].detach().cpu().numpy())
            cats_array.append(cats_numpy)
            z_pres_array.append(z_pres_np_raw)

            print(index.shape, z_pres_np.shape)
            index = index * z_pres_np + z_pres_np - 1
            print('i=', i, ' cat index:')
            print(np.reshape(index, (8, 8)))

    np.savez_compressed('codes/recon_code_{}.csv'.format(count),
                        cats=np.array(cats_array),
                        z_pres=np.array(z_pres_array))

    vis.images(draw_many(data[:5].view(-1, A, B), z_obj))
    # Show reconstructions of data.
    vis.images(draw_many(
        torch.tensor(recon_x_final[:5]).view(-1, A, B), z_obj))
    #save_image(x,count,'gen')
    save_image(recon_x, count, 'recon', path='image/')
    save_image_single(data.cpu().numpy(), count, 'origin', path='image/')
    #####generate image from scratch
    #first_obj = np.random.randint(cat_size)
    first_obj = 0
    first_obj_tensor = torch.zeros(gen_batch_size, cat_size).scatter_(
        1,
        torch.tensor(first_obj).expand(gen_batch_size, 1),
        1).float().to(device)

    #second_obj = np.random.randint(cat_size)
    second_obj = 1
    second_obj_tensor = torch.zeros(gen_batch_size, cat_size).scatter_(
        1,
        torch.tensor(second_obj).expand(gen_batch_size, 1),
        1).float().to(device)

    #third_obj = np.random.randint(cat_size)
    third_obj = 2
    third_obj_tensor = torch.zeros(gen_batch_size, cat_size).scatter_(
        1,
        torch.tensor(third_obj).expand(gen_batch_size, 1),
        1).float().to(device)

    first_scale = np.random.uniform(1.5, 4, size=(gen_batch_size, 1))
    first_pos = np.random.uniform(-0.3, 0.3, size=(gen_batch_size, 2))
    second_scale = np.random.uniform(1.5, 4, size=(gen_batch_size, 1))
    second_pos = np.random.uniform(-0.3, 0.3, size=(gen_batch_size, 2))
    third_scale = np.random.uniform(1.5, 4, size=(gen_batch_size, 1))
    third_pos = np.random.uniform(-0.3, 0.3, size=(gen_batch_size, 2))
    #third_pos = np.concatenate([np.random.uniform(0.6,0.8,size=(gen_batch_size,1)),np.random.uniform(-0.6,-0.8,size=(gen_batch_size,1))],axis=-1)
    first_z_where = torch.from_numpy(
        np.concatenate([first_scale, first_pos], axis=-1)).float().to(device)
    second_z_where = torch.from_numpy(
        np.concatenate([second_scale, second_pos], axis=-1)).float().to(device)
    third_z_where = torch.from_numpy(
        np.concatenate([third_scale, third_pos], axis=-1)).float().to(device)

    gen_x = model.generate(
        [first_obj_tensor, second_obj_tensor, third_obj_tensor],
        [first_z_where, second_z_where, third_z_where])
    save_image(gen_x, count, 'gen', path='image/')
Exemplo n.º 28
0
def generate():
    x = model.generate(batch_size)
    save_image(x)
def train_stacked_ae(encoder: nn.Module,
                     decoder: nn.Module,
                     train_loader,
                     device,
                     name,
                     latent_size=4,
                     epochs=50,
                     num_imgs=9,
                     Optimizer=optim.Adam):
    train_losses = []
    bunch = get_bunch(train_loader)
    latent_sample = torch.randn(size=(
        num_imgs,
        latent_size,
    ), device=device)
    latent_sample[0] = 0
    original_images = next(iter(train_loader))[0].to(device)
    original_images = original_images[:min(num_imgs,
                                           original_images.size()[0])]
    save_img(
        get_grid(original_images.to("cpu")),
        os.path.join(CKPT_PATH, name, "compressed_images", "original.png"))
    criterion = nn.MSELoss()
    scheduler = optimizer = Optimizer(
        list(encoder.parameters()) + list(decoder.parameters()))
    while True:
        try:
            optimizer = optimizer.optimizer
        except:
            break
    if scheduler == optimizer:
        scheduler = False
    for epoch in tqdm.trange(epochs):  # loop over the dataset multiple times
        torch.cuda.empty_cache()
        decoder = decoder.train().to(device)
        encoder = encoder.train().to(device)
        running_loss = 0.0
        print(epoch)
        for i, data in tqdm.tqdm(enumerate(train_loader, 0)):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data
            inputs = inputs.to(device=device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = decoder(encoder(inputs, epoch), epoch)
            loss = criterion(outputs, inputs)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()
        if scheduler:
            scheduler.step()
        train_losses.append(running_loss / i)
        logging.info(f"epoch {epoch}: loss={running_loss/i}")
        decoder = decoder.eval()
        save_image(
            decoder(encoder(original_images, epoch), epoch).detach().to("cpu"),
            "compressed_images", name, epoch)
        save_image(
            decoder(latent_sample).detach().to("cpu"), "generated_images",
            name, epoch)
        torch.cuda.empty_cache()
        mean, cov = latent_space_pca(encoder, bunch)
        save_image(
            decoder(
                normal_to_pc(latent_sample, mean.to(device),
                             cov.to(device))).detach().to("cpu"),
            "pca_gen_images", name, epoch)
        del mean, cov
        torch.cuda.empty_cache()
        save_labeled_pca_gen_images(encoder, decoder, latent_sample, bunch,
                                    name, epoch)
        save_model(encoder, decoder, name)
    return train_losses
Exemplo n.º 30
0
def styleTransfer(inputFile,
                  inputDataFile,
                  destFile,
                  inpaintingRandomValue,
                  ratio=(1.0, 1.0)):
    '''
	입력받은 inputFile의 색과 질감을 destFile의 색과 질감으로 임의로 변형해준다. 
	'''
    if utility.is_exist(inputDataFile):
        loadData = utility.load_result(inputDataFile)
        if len(loadData) == 5:
            # Newer Version of segmentation.
            [divided_class, class_number, class_total, _,
             largest_mask] = loadData
        else:
            [divided_class, class_number, class_total, _] = loadData
            largest_mask = None
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        divided_class, class_number, class_total, _, class_count, _, class_color, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    # Init Variables. - TODO : Change this part with largest mask.
    # largest_mask, _, _, (width, height) = segmentation.get_segmented_image(inputFile)
    # class_color = image_processing.get_class_color(utility.read_image(inputFile), class_total, class_count)
    img = utility.read_image(inputFile)
    (height, width, _) = img.shape

    file_extension = "." + inputFile.split(".")[1]
    file_base_name = inputFile.split(".")[0]

    resized_class_total = utility.changed_coords2d(class_total, ratio=ratio)
    # 중복 제거
    temp_class_total = resized_class_total
    resized_class_total = []
    for tc in temp_class_total:
        if tc not in resized_class_total:
            resized_class_total.append(tc)

    input_sample = [
        resized_class_total[i][0] for i in range(len(resized_class_total))
    ]
    if len(input_sample) < MAX_CHANGE_COLOR:
        input_sample *= int(MAX_CHANGE_COLOR // len(input_sample)) + 1
    dest_color = image_processing.get_dominant_color(destFile, clusters=8)

    next_file_name = file_base_name + "_" + str(0) + file_extension
    now_input_sample = random.sample(input_sample, MAX_CHANGE_COLOR)
    now_dest_color = random.sample(dest_color, MAX_CHANGE_COLOR)
    part_change_image = utility.read_image(inputFile)
    part_change_image = utility.resize_image(part_change_image, ratio=ratio)
    randomValue = inpaintingRandomValue

    if randomValue < -1:
        # Image Inpainting
        masking_coord = []
        for ct in resized_class_total:
            masking_coord += ct
        tempFile = utility.add_name(next_file_name, "_temp")
        tempFile = config.RESEARCH_BASE_DIR + "/temp/" + tempFile.split(
            "/")[-1]

        utility.logging("Image Inpainting Starting." + str(randomValue))
        utility.save_image(
            utility.make_whitemask_image(part_change_image, masking_coord),
            tempFile)
        change_image = image_processing.inpainting(part_change_image, tempFile)
        part_change_image = image_processing.add_up_image(
            part_change_image, change_image, masking_coord, width, height)
        now_dest_color = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
    else:
        utility.logging("Image Inpainting Do not proceed. : " +
                        str(randomValue))
        # If not earse, recoloring.
        for j in range(MAX_CHANGE_COLOR):
            change_image = styler.change_dest_color(inputFile, next_file_name, now_dest_color[j], divided_class, resized_class_total,\
             [now_input_sample[j]], save_flag=False, ratio=ratio)
            part_change_image = image_processing.add_up_image(
                part_change_image, change_image,
                resized_class_total[input_sample.index(now_input_sample[j])],
                width, height)
    return part_change_image, now_dest_color