Example #1
0
def set_color_with_image(input_file,
                         color_file,
                         mask_map,
                         decrease_ratio=(0.1, 0.1)):
    source = utility.read_image(input_file)
    source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB)
    (h, w, _) = source.shape
    source = cv2.resize(source,
                        None,
                        fx=decrease_ratio[0],
                        fy=decrease_ratio[1],
                        interpolation=cv2.INTER_AREA)

    target = utility.read_image(color_file)
    target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB)
    (h, w, _) = target.shape
    target = cv2.resize(target,
                        None,
                        fx=decrease_ratio[0],
                        fy=decrease_ratio[1],
                        interpolation=cv2.INTER_AREA)

    s_mean, s_std = image_processing.get_mean_and_std(source)
    t_mean, t_std = image_processing.get_mean_and_std(target)

    # input의 평균과 표준편차를 사용해서 output 색을 조절.
    height, width, channel = source.shape
    for h in range(height):
        for w in range(width):
            for c in range(channel):
                x = source[h, w, c]
                x = ((x - s_mean[c]) * (t_std[c] / s_std[c])) + t_mean[c]

                source[h, w, c] = utility.check_bound(round(x))

    original_image = utility.read_image(input_file)
    (h, w, _) = original_image.shape
    original_image = cv2.resize(original_image,
                                None,
                                fx=decrease_ratio[0],
                                fy=decrease_ratio[1],
                                interpolation=cv2.INTER_AREA)

    all_class_total = []
    if mask_map == None:
        for h in range(len(original_image)):
            for w in range(len(original_image[0])):
                all_class_total.append((w, h))
    else:
        for h in range(len(mask_map)):
            for w in range(len(mask_map[0])):
                if mask_map[h][w]:
                    all_class_total.append((w, h))

    source = cv2.cvtColor(source, cv2.COLOR_LAB2BGR)

    part_change_image = image_processing.add_up_image(original_image, source,
                                                      all_class_total, width,
                                                      height)
    return part_change_image
Example #2
0
def gather_data(pipe):
    saved_images = []
    client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    client_socket.connect(("localhost", 8080))
    print("Connected")
    client_socket.send('CONSUMER,MOTOR_DATA'.encode())
    print("Registered")
    control_data = '[+0.00,+0.00]'.encode()
    cv2.namedWindow('Video feed')
    cv2.moveWindow('Video feed', 850, 20)

    while(True):
        control_data = __maybe_update_control_data(client_socket, control_data)
        image = utility.read_image(pipe)
        if image is not None:
            grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            uncropped_median = np.median(grey_image)
            cropped_image = grey_image[70:240, 0:320]
            blurred_image = cv2.bilateralFilter(cropped_image, 7, 75, 75)
            canny_edge = utility.auto_canny(blurred_image, uncropped_median)
            output = cv2.resize(canny_edge, (160, 85))
            saved_images.append([control_data, output])
            
            display = np.copy(output)
            cv2.putText(display, str(control_data.decode()), FONT_POSITION, FONT, .5, FONT_COLOR)
            cv2.imshow('Video feed', display)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        pipe.stdout.flush()
    cv2.destroyAllWindows()
    saved_images = np.asarray(saved_images)
    np.savez('/home/aaron/Downloads/training_data.npz', saved_images)
Example #3
0
def change_area_color_multi(input_file,
                            output_file,
                            setting_color,
                            divided_class,
                            area,
                            a=5,
                            b=1,
                            change_style="median"):
    colored_image = []
    for i in range(len(area)):
        colored_image.append(
            set_color_with_color(input_file,
                                 setting_color[i],
                                 a=a,
                                 b=b,
                                 change_style=change_style))
    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape
    for i in range(len(area)):
        original_image = image_processing.add_up_image(original_image,
                                                       colored_image[i],
                                                       area[i], width, height)

    # Change ret_class_total`s part with colored image.
    return original_image
Example #4
0
def colorTransferToColor(inputFile, inputDataFile, outputFileName, destColor,
                         srcColor):
    '''
	입력받은 inputFile의 정해진 부분( srcColor와 비슷한 부분 )의 색을 destColor로 변경한다.
	'''
    if utility.is_exist(inputDataFile):
        [divided_class, class_number, class_total, class_border] = \
        utility.load_result(inputDataFile)
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        divided_class, class_number, class_total, class_border, class_count, class_length, class_color, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    class_color = image_processing.get_class_color(
        utility.read_image(inputFile), class_total, class_count)

    destArea = styler.get_similar_color_area(
        divided_class, class_number, class_total, class_color, srcColor,
        240)  # Simmilar Color threshold to 200.
    part_change_image = styler.change_area_color(inputFile, outputFileName,
                                                 destColor, divided_class,
                                                 destArea)
    utility.save_image(part_change_image, outputFileName)
Example #5
0
def change_area_style(input_file, output_file, texture_file, area):
    stylized_image = set_style(input_file, texture_file)
    stylized_image = np.array((stylized_image * 255)[0], np.uint8)
    stylized_image = cv2.cvtColor(stylized_image, cv2.COLOR_BGR2RGB)

    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      stylized_image, area,
                                                      width, height)
    utility.save_image(part_change_image, output_file)
Example #6
0
def getPartChangedImage(inputFile,
                        outputFile,
                        str_tag,
                        coord,
                        rect_files,
                        selectedPreferenceImage,
                        i,
                        j,
                        ratio=(0.5, 0.5)):
    partChangedOutFile = utility.add_name(outputFile,
                                          "_changed_" + str(i) + str(j))
    original_image = utility.read_image(inputFile)
    resized_coord = utility.change_arrcoords(coord, ratio=ratio)
    recommand_furniture = []
    changed_log = []

    for k in range(len(str_tag)):
        if (str_tag[k] == "sofa" or str_tag[k] == "chair"):
            inpaintingRandomValue = random.randint(0, 9)
            furniture_file = rect_files[k]
            # 만약 userinput 이 있다면, 그것을 대신 사용.
            if utility.is_exist(utility.get_userinput_bin(furniture_file)):
                furniture_data_file = utility.get_userinput_bin(furniture_file)
            else:
                furniture_data_file = utility.get_bin(furniture_file)
            styled_furniture, change_color = styleTransfer(
                furniture_file,
                furniture_data_file,
                selectedPreferenceImage,
                inpaintingRandomValue,
                ratio=ratio)
            original_image = image_processing.add_up_image_to(original_image, styled_furniture, \
             int(resized_coord[k][0]), int(resized_coord[k][1]), int(resized_coord[k][2]), int(resized_coord[k][3]))
            rec_furn = getRecommandFurnitureForImage(selectedPreferenceImage,
                                                     str_tag[k])
            if len(rec_furn) < 3:
                utility.logging(selectedPreferenceImage)
                utility.logging(str(rec_furn))
                recommand_furniture.append(["", "", ""])
            else:
                recommand_furniture.append(random.sample(rec_furn, 3))
            changed_log.append([resized_coord[k], change_color])

    out_res_file = utility.add_name(partChangedOutFile,
                                    "_result",
                                    extension=".bin")
    utility.save_result([changed_log, recommand_furniture], out_res_file)
    utility.save_image(original_image, partChangedOutFile)
    return partChangedOutFile, out_res_file
Example #7
0
def change_dest_texture(input_file, output_file, texture_file, divided_class,
                        class_total, touch_list):
    stylized_image = set_style(input_file, texture_file)
    stylized_image = np.array((stylized_image * 255)[0], np.uint8)
    stylized_image = cv2.cvtColor(stylized_image, cv2.COLOR_BGR2RGB)

    ret_class_total = utility.get_class_with_given_coord(
        class_total, touch_list)
    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      stylized_image,
                                                      ret_class_total, width,
                                                      height)
    utility.save_image(part_change_image, output_file)
Example #8
0
def detect_wall_floor(file_name, model):
    # 간단한 Segmentation for 지역 구분.

    out = model.predict_segmentation(inp=file_name)
    (height, width, _) = utility.read_image(file_name).shape
    resized_out = utility.resize_arr(out, width, height)
    mp.set_start_method("spawn", force=True)
    args_list = [
        "modules/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml",
        file_name, 0.3,
        [
            "MODEL.WEIGHTS",
            "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
        ], "no_name"
    ]
    cfg = setup_cfg(args_list)
    masks, instance_number, width, height = get_segment_mask(file_name, cfg)
    mask = image_processing.get_total_instance_image(masks,
                                                     width,
                                                     height,
                                                     base=False)

    floor_class = []
    wall_class = []

    for w in range(width):
        if resized_out[0][w] not in wall_class:
            wall_class.append(resized_out[0][w])
        if resized_out[-1][w] not in floor_class:
            floor_class.append(resized_out[-1][w])

    wall_divied = np.zeros((height, width), dtype=np.uint8)
    for h in range(height):
        for w in range(width):
            if h < 2 / 3 * height and mask[h][w] and resized_out[h][
                    w] in wall_class:
                wall_divied[h][w] = 1
            elif h > 2 / 3 * height and mask[h][w] and resized_out[h][
                    w] in floor_class:
                wall_divied[h][w] = 2

    # Wall divided is 0 1 2 class which is 0 is nothing, 1 is wall, 2 is floor.
    return wall_divied
Example #9
0
def change_area_color(input_file,
                      output_file,
                      setting_color,
                      divided_class,
                      area,
                      a=5,
                      b=1,
                      change_style="median"):
    colored_image = set_color_with_color(input_file,
                                         setting_color,
                                         a=a,
                                         b=b,
                                         change_style=change_style)

    original_image = utility.read_image(input_file)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    return image_processing.add_up_image(original_image, colored_image, area,
                                         width, height)
Example #10
0
def display_video(pipe):
    print('Displaying video feed...')
    cv2.namedWindow('Video feed')
    cv2.moveWindow('Video feed', 850, 20)

    while True:
        image = utility.read_image(pipe)
        if image is not None:
            grey_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            uncropped_median = np.median(grey_image)
            cropped_image = grey_image[70:240, 0:320]
            blurred_image = cv2.bilateralFilter(cropped_image, 7, 75, 75)
            canny_edge = utility.auto_canny(blurred_image, uncropped_median)
            combined_images = np.vstack(
                (image, cv2.cvtColor(canny_edge, cv2.COLOR_GRAY2RGB)))
            cv2.imshow('Video feed', combined_images)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
        pipe.stdout.flush()

    cv2.destroyAllWindows()
Example #11
0
def textureTransferArea(inputFile, inputDataFile, outputFileName, destTexture,
                        srcColor):
    '''
	입력받은 inputFile의 정해진 부분( srcColor와 비슷한 색 )의 질감을 destTexture로 변경한다.
	'''
    if utility.is_exist(inputDataFile):
        [divided_class, class_number, class_total, _] = \
        utility.load_result(inputDataFile)
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        divided_class, class_number, class_total, _, class_count, _, class_color, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    class_color = image_processing.get_class_color(
        utility.read_image(inputFile), class_total, class_count)

    destArea = styler.get_similar_color_area(
        divided_class, class_number, class_total, class_color, srcColor,
        240)  # Simmilar Color threshold to 200.
    styler.change_area_style(inputFile, outputFileName, destTexture, destArea)
Example #12
0
def change_dest_color(input_file,
                      output_file,
                      setting_color,
                      divided_class,
                      class_total,
                      touch_list,
                      touch_hint=None,
                      a=5,
                      b=1,
                      change_style="median",
                      save_flag=True,
                      ratio=(1.0, 1.0)):
    colored_image = set_color_with_color(input_file,
                                         setting_color,
                                         a=a,
                                         b=b,
                                         change_style=change_style,
                                         ratio=ratio)

    if touch_hint == None:
        ret_class_total = utility.get_class_with_given_coord(
            class_total, touch_list)
    else:
        ret_class_total = class_total[touch_hint]
    original_image = utility.read_image(input_file)
    original_image = utility.resize_image(original_image, ratio=ratio)
    (height, width, _) = original_image.shape

    # Change ret_class_total`s part with colored image.
    part_change_image = image_processing.add_up_image(original_image,
                                                      colored_image,
                                                      ret_class_total, width,
                                                      height)
    if save_flag:
        utility.save_image(part_change_image, output_file)
    return part_change_image
Example #13
0
    def saveData(self):
        global nowIndex  # 현재 추가하고 있는 index
        global divided_class  # Class Number map
        global class_number  # Class Number 의 종류
        global class_total  # 각 Class들의 total Coords
        global class_border  # Class border.

        img = cv2.imread(IMAGE_NAME)
        (height, width, _) = img.shape

        class_total, class_number, divided_class = mergeGroup(
            class_total, class_number, divided_class, nowIndex)
        utility.save_result(
            [divided_class, class_number, class_total, class_border],
            SEG_SAVE_NAME)
        class_count = [len(class_total[i]) for i in range(len(class_total))]
        class_color = image_processing.get_class_color(
            utility.read_image(IMAGE_NAME), class_total, class_count)
        dc_image = utility.divided_class_into_image(divided_class,
                                                    class_number, class_color,
                                                    width, height,
                                                    class_number)
        utility.save_image(dc_image, CHANGE_DIVIED)
        self.imageLabel.changePixmap(CHANGE_DIVIED)
Example #14
0
def get_divided_class(inputFile,
                      total=False,
                      clipLimit=16.0,
                      tileGridSize=(16, 16),
                      start=60,
                      diff=150,
                      delete_line_n=20,
                      border_n=6,
                      border_k=2,
                      merge_min_value=180,
                      sim_score=30,
                      out_bound_check=False,
                      merge_mode_color=False):
    '''
	predict masking image and get divided_class.
	'''
    if not total:
        try:
            largest_mask, largest_index, mask_map, (
                width, height) = get_segmented_image(inputFile)
        except RuntimeError:
            largest_index = -1
        # 만약 Detectron이 감지하지 못한경우
        if largest_index == -1:
            largest_mask = utility.read_image(inputFile)
            (height, width, _) = largest_mask.shape
            mask_map = [[True for _ in range(width)] for _ in range(height)]
    else:
        largest_mask = utility.read_image(inputFile)
        (height, width, _) = largest_mask.shape
        mask_map = [[True for _ in range(width)] for _ in range(height)]

    # 잘린 이미지를 통해 외곽선을 얻어서 진행.
    contours, _ = image_processing.get_contours(largest_mask,
                                                clipLimit=clipLimit,
                                                tileGridSize=tileGridSize,
                                                start=start,
                                                diff=diff)
    coords = matrix_processing.contours_to_coord(contours)

    # 작은 Line은 삭제.
    coords = matrix_processing.delete_line_threshold(coords,
                                                     line_n=delete_line_n)
    cycle_list = []
    noncycle_list = []

    for c in coords:
        cycled, noncycled = matrix_processing.divide_cycle(c)
        if len(cycled) != 0:
            cycle_list += cycled
        if len(noncycled) != 0:
            noncycle_list += noncycled

    # 잘린 외곽선들을 True-False List로 바꿔서 각각 가장 가까운 곳에 연결.
    tf_map = utility.make_tf_map(noncycle_list, width, height)
    for nc in noncycle_list:
        print("Now proceed ", noncycle_list.index(nc), " Total ",
              len(noncycle_list))
        # 가장자리가 될 포인트를 잡는다.
        border_point = matrix_processing.find_border_k_tf_map(tf_map,
                                                              nc,
                                                              width,
                                                              height,
                                                              n=border_n,
                                                              k=border_k,
                                                              hard_check=False)
        for b in border_point:
            # 가장자리에서 가장 가까운 외곽선으로 연결한다.
            matrix_processing.connect_nearest_point(tf_map, b, width, height,
                                                    nc)

    # 나누어진 면적들을 DFS로 각각 가져온다. tf_map 은 true false 에서 숫자가 써있는 Map 이 된다.
    divided_class, class_total, class_border, class_count, class_length = matrix_processing.get_image_into_divided_plate(
        tf_map, width, height)
    # 또한 나눈 선들도 각 면적에 포함시켜 나눈다.
    matrix_processing.contours_to_divided_class(tf_map, divided_class,
                                                class_total, class_border,
                                                class_count, width, height)

    class_number = list(range(1, class_length + 1))
    # 작은 Size는 주변에 다시 넣는다. 이 때, 작은 값부터 천천히 올라가는 방법을 사용한다.
    for min_value in range(30, merge_min_value, 30):
        class_number, class_total, class_border, class_count, class_length = \
        merge_small_size(divided_class, class_number, class_total, class_border, class_count, width, height, min_value=min_value)

    if out_bound_check:
        # 원래 Segmentation 돤것에서 나가는 것은 삭제한다.
        class_number, class_total, class_border, class_count, class_length = \
        out_mask_delete(mask_map, class_number, class_total, class_border, class_count, class_length, out_pixel_threshold=0)

    class_color = image_processing.get_class_color(largest_mask, class_total,
                                                   class_count)
    if merge_mode_color:
        # 비슷한 색끼리도 모아준다.
        class_number, class_total, class_border, class_count, class_length, class_color = \
        merge_same_color(divided_class, class_number, class_total, class_border, class_count, largest_mask, width, height, sim_score=sim_score)

    return divided_class, class_number, class_total, class_border, class_count, class_length, class_color, largest_mask, width, height
Example #15
0
def styleTransfer(inputFile,
                  inputDataFile,
                  destFile,
                  inpaintingRandomValue,
                  ratio=(1.0, 1.0)):
    '''
	입력받은 inputFile의 색과 질감을 destFile의 색과 질감으로 임의로 변형해준다. 
	'''
    if utility.is_exist(inputDataFile):
        loadData = utility.load_result(inputDataFile)
        if len(loadData) == 5:
            # Newer Version of segmentation.
            [divided_class, class_number, class_total, _,
             largest_mask] = loadData
        else:
            [divided_class, class_number, class_total, _] = loadData
            largest_mask = None
        class_count = []
        for ct in class_total:
            class_count.append(len(ct))
    else:
        divided_class, class_number, class_total, _, class_count, _, class_color, _, _, _ = \
        segmentation.get_divided_class(inputFile)

    # Init Variables. - TODO : Change this part with largest mask.
    # largest_mask, _, _, (width, height) = segmentation.get_segmented_image(inputFile)
    # class_color = image_processing.get_class_color(utility.read_image(inputFile), class_total, class_count)
    img = utility.read_image(inputFile)
    (height, width, _) = img.shape

    file_extension = "." + inputFile.split(".")[1]
    file_base_name = inputFile.split(".")[0]

    resized_class_total = utility.changed_coords2d(class_total, ratio=ratio)
    # 중복 제거
    temp_class_total = resized_class_total
    resized_class_total = []
    for tc in temp_class_total:
        if tc not in resized_class_total:
            resized_class_total.append(tc)

    input_sample = [
        resized_class_total[i][0] for i in range(len(resized_class_total))
    ]
    if len(input_sample) < MAX_CHANGE_COLOR:
        input_sample *= int(MAX_CHANGE_COLOR // len(input_sample)) + 1
    dest_color = image_processing.get_dominant_color(destFile, clusters=8)

    next_file_name = file_base_name + "_" + str(0) + file_extension
    now_input_sample = random.sample(input_sample, MAX_CHANGE_COLOR)
    now_dest_color = random.sample(dest_color, MAX_CHANGE_COLOR)
    part_change_image = utility.read_image(inputFile)
    part_change_image = utility.resize_image(part_change_image, ratio=ratio)
    randomValue = inpaintingRandomValue

    if randomValue < -1:
        # Image Inpainting
        masking_coord = []
        for ct in resized_class_total:
            masking_coord += ct
        tempFile = utility.add_name(next_file_name, "_temp")
        tempFile = config.RESEARCH_BASE_DIR + "/temp/" + tempFile.split(
            "/")[-1]

        utility.logging("Image Inpainting Starting." + str(randomValue))
        utility.save_image(
            utility.make_whitemask_image(part_change_image, masking_coord),
            tempFile)
        change_image = image_processing.inpainting(part_change_image, tempFile)
        part_change_image = image_processing.add_up_image(
            part_change_image, change_image, masking_coord, width, height)
        now_dest_color = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
    else:
        utility.logging("Image Inpainting Do not proceed. : " +
                        str(randomValue))
        # If not earse, recoloring.
        for j in range(MAX_CHANGE_COLOR):
            change_image = styler.change_dest_color(inputFile, next_file_name, now_dest_color[j], divided_class, resized_class_total,\
             [now_input_sample[j]], save_flag=False, ratio=ratio)
            part_change_image = image_processing.add_up_image(
                part_change_image, change_image,
                resized_class_total[input_sample.index(now_input_sample[j])],
                width, height)
    return part_change_image, now_dest_color
Example #16
0
print('L2 error: %5.3e' % (np.linalg.norm(diff) / (n - 1)))

# Part 2: Finite difference to exam the adjoint gradient

import utility


def force(x, y):
    return 20 / pi * (np.exp(-20 * ((x - 0.3)**2 +
                                    (y - 0.3)**2)) + np.exp(-20 *
                                                            ((x - 0.3)**2 +
                                                             (y - 0.7)**2)))


manualSeed = 20180629
synthetic_u = utility.read_image('symmetry', n)
Sscale = np.linalg.norm(synthetic_u, ord=np.inf)
modelSolver = model.pde_adj_solver(n, Sscale, SNR, force, True)
true_solution = modelSolver.solve(synthetic_u)
observation, noise = utility.construct_ob(true_solution, Sscale, n, SNR,
                                          manualSeed, True)
u0 = np.random.normal(0, 1, (n - 1)**2)
#u0 = synthetic_u.copy()
p0, F0, duF0 = modelSolver.lnprob(u0, observation)

fnerror = np.zeros((6, 5))
for i in range(6):
    idx = np.random.randint(0, (n - 1)**2, 1)[0]
    mask = np.eye(1, (n - 1)**2, idx)[0]
    for j in range(5):
        u_new = u0 + 10**(-j - 2) * mask
Example #17
0
def getStyleChangedImage(inputFile,
                         preferenceImages,
                         od_model,
                         baseLight=[255, 255, 255],
                         changeLight=[178, 220, 240]):
    '''
	입력 Color는 BGR ( [178, 220, 240] 은 주황불빛 )
	preferenceImages 가 4장만 되어도 충분함.
	'''
    if len(preferenceImages) <= 2:
        preferenceImages = preferenceImages + preferenceImages
    print(preferenceImages)
    inputBaseFile, preferenceBaseFile = utility.file_basify(
        inputFile, preferenceImages)

    now = time.time()
    detection_model = pspnet_50_ADE_20K()
    outputFile = utility.get_add_dir(inputFile, "temp")

    # Object Detect & Segmentation
    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = getODandSegment(inputBaseFile, od_model)

    (imgHeight, imgWidth, _) = utility.read_image(inputFile).shape
    if imgWidth > destSize[0] and imgHeight > destSize[1]:
        ratio = (destSize[0] / imgWidth, destSize[1] / imgHeight)
    else:
        ratio = (1, 1)
    print("Loading Finished")

    temp = time.time()
    print("Loading Time : ", temp - now)

    # Wall Detection with input image.
    wall_divided = segmentation.detect_wall_floor(inputFile, detection_model)
    wall_divided = utility.resize_2darr(wall_divided, ratio=ratio)
    wall_total, wall_number = matrix_processing.divided_class_into_class_total(
        wall_divided)
    print("Wall Divided.")

    # Get preference image`s data.
    preferWallColor = []
    preferFloorColor = []
    selectedPreferenceImages = []
    [files, domColors, wallColors, floorColors] = utility.load_result(
        config.RESEARCH_BASE_FILE
    )  # Each files` dom color, wall color, floor color will be saved.
    baseNameFiles = [os.path.basename(files[f]) for f in range(len(files))]

    print("Wall Color start.")
    indx = list(range(0, len(preferenceBaseFile)))
    random.shuffle(indx)
    # Select 2 color of above to preferWallColor and preferFloorColor
    for i in range(MAX_WALL_IMAGE):
        ind = indx[i]
        preferImage = preferenceBaseFile[ind]
        loadIndex = baseNameFiles.index(os.path.basename(
            preferImage))  # We do only compare with base name.
        preferWallColor.append(wallColors[loadIndex])
        preferFloorColor.append(floorColors[loadIndex])
        selectedPreferenceImages.append(files[loadIndex])
    print("Wall Colored Selected.")

    # Change wall & floor
    wfColorChangeImage = []
    for i in range(MAX_WALL_IMAGE):
        wfOutputFile = changeWallFloor(inputFile,
                                       outputFile,
                                       wall_divided,
                                       wall_total,
                                       wall_number,
                                       i,
                                       preferWallColor,
                                       preferFloorColor,
                                       ratio=ratio)
        wfColorChangeImage.append(wfOutputFile)
    print("Wall Color Changed")

    temp = time.time()
    print("Wall Coloring Time : ", temp - now)

    # Change Object ( Table and Chair )
    partChangedFiles = []
    procs = []
    recommandFurnitureList = []
    changeFurnitureLocation = []
    changeFurnitureColor = []

    for i in range(MAX_WALL_IMAGE):
        for j in range(MAX_PART_CHANGE_IMAGE):
            # 넘겨줄 인자를 저장하고, Thread를 실행시켜서 속도 향상.
            argvFile = utility.add_name(
                config.SUBPROCESS_ARGV,
                "_" + str(MAX_PART_CHANGE_IMAGE * i + j))
            utility.save_result([
                selectedPreferenceImages, wfColorChangeImage, outputFile,
                str_tag, coord, rect_files, i, j, ratio
            ], argvFile)

            # Subprocess need to calculate with given ratio.
            proc = subprocess.Popen(
                ['python', 'getPartChangedImage.py', argvFile],
                shell=True,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                encoding="cp949")
            procs.append(proc)

    for i in range(len(procs)):
        out = procs[i].communicate()[0]
        out = str(out).split("\n")
        tout = []
        for i in range(len(out)):
            if len(out[i]) > 0:
                tout.append(out[i])
        [changed_log, recommand_furniture] = utility.load_result(tout[-1])
        partChangedFiles.append(tout[-2])
        recommandFurnitureList.append(recommand_furniture)
        for i in range(len(changed_log)):
            changeFurnitureLocation.append(changed_log[i][0])
            changeFurnitureColor.append(changed_log[i][1])

    print("Part Changed Finished")
    # Add some plant.
    # partChangedFiles = print() # Image number will not be changed.

    temp = time.time()
    print("Part Changing Time : ", temp - now)

    lightList = []
    # Change Light
    for i in range(MAX_OUT_IMAGE):
        print("Now Proceed : ", i)
        files = utility.add_name(partChangedFiles[i], "_lighter")
        if random.randint(1, MAX_OUT_IMAGE) > 4:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, changeLight)
            lightList.append(changeLight)
        else:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, baseLight)
            lightList.append(baseLight)
        utility.save_image(changed_file, files)
        partChangedFiles[i] = files
    # partChangedFiles 가 결국 바뀐 파일들
    temp = time.time()
    print("Total Time : ", temp - now)
    changeLog = makeChangeInfor(preferWallColor, preferFloorColor, [preferenceImages[indx[0]], preferenceImages[indx[1]]], partChangedFiles, lightList, changeFurnitureLocation, changeFurnitureColor, \
     recommandFurnitureList, [])

    resultDictionary = utility.save_log_dictionary(inputFile, partChangedFiles,
                                                   changeLog)
    utility.logging(str(resultDictionary))
    with open(FILE_OUTQUEUE, 'a') as f:
        f.write(str(resultDictionary) + "\n")
Example #18
0
def getStyleChangedImage_past(inputFile, preferenceImages, tempdata="temp"):
    '''
	inputFile에 대한 preferenceImages 를 출력. 
	print 함수로 각 변환한 사진의 이름을 출력하고, 마지막에 몇 장을 줄것인지 출력한다.
	1. Object Detect 결과로 나온 가구들을 Segmentation 화 한다.
	2. 사용자가 좋아한다고 고른 인테리어의 가구 + 사용자가 좋아할것 같은 단독 가구의 색 / 재질 / Segment 를 가져온다.
	3. 원래의 인테리어의 가구들에 적절하게 배치한다.
		3-1. 원래의 인테리어 가구의 재질과 색을 변경한다. ( 모든 sofa, chair 에 대해서 ) -> 40%
		3-2. 원래의 인테리어 가구를 사용자가 좋아할만한 가구로 변경한다. ( 모든 sofa, chair에 대해서 color filter 적용한걸로 ) -> 40%
		3-3. 원래의 인테리어에서 색상 filter만 입혀준다. ( 위의 0.2 부분 )
	'''
    if "\\" in inputFile:
        dirs = inputFile.split("\\")
        inputFile = ""
        for d in dirs[:-1]:
            inputFile += d + "/"
        inputFile += dirs[-1]
    outputFile = utility.get_add_dir(inputFile, tempdata)
    # fav_furniture_list = "Image/InteriorImage/test_furniture/sofa"
    # fav_furniture_list = utility.get_filenames(fav_furniture_list)
    # 기존 Data 출력.
    base_name = inputFile.split("/")[-1].split("Z")[-1]
    researched_files = utility.get_only_jpg_files(
        "C:/workspace/IOU-Backend/util/IOU-ML/Image/InteriorImage/test")
    checked_file = ""
    for rf in researched_files:
        if base_name in rf:
            checked_file = rf

    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = utility.get_od_data(checked_file)
    '''
	segment_data = []
	for f in rect_files:
		segment_data.append(utility.get_segment_data(f))
	fav_furniture_seg_data = []
	for f in fav_furniture_list:
		fav_furniture_seg_data.append(utility.get_segment_data(f))
	'''
    returnImageList = []
    for i in range(MAX_OUT_IMAGE):
        now_index = random.randint(0, len(preferenceImages) - 1)
        saveOutputFile = utility.add_name(outputFile, "_" + str(i))
        if i < MAX_OUT_IMAGE * 0.2:
            original_image = utility.read_image(inputFile)
            decrese_ratio = (1.0, 1.0)
            if original_image.shape[0] * original_image.shape[1] > 1200 * 960:
                decrese_ratio = (0.3, 0.3)
            changed_image = styler.set_color_with_image(
                inputFile, preferenceImages[now_index], mask_map=None)
            utility.save_image(changed_image, saveOutputFile)
        elif i < MAX_OUT_IMAGE * 1.0:
            original_image = utility.read_image(inputFile)
            # 특정 크기 이상이면 decrease ratio를 조절하여 1/3으로..
            decrese_ratio = (1.0, 1.0)
            if original_image.shape[0] * original_image.shape[1] > 1200 * 960:
                decrese_ratio = (0.3, 0.3)
                original_image = cv2.resize(original_image,
                                            None,
                                            fx=decrese_ratio[0],
                                            fy=decrese_ratio[1],
                                            interpolation=cv2.INTER_AREA)
            for i in range(len(str_tag)):
                if (str_tag[i] == "sofa" or str_tag[i] == "chair"):
                    styled_furniture = styler.set_color_with_image(
                        "C:/workspace/IOU-Backend/util/IOU-ML/" +
                        rect_files[i], preferenceImages[now_index], None,
                        decrese_ratio)
                    original_image = image_processing.add_up_image_to(original_image, styled_furniture, \
                     int(coord[i][0] * decrese_ratio[0]), int(coord[i][1] * decrese_ratio[0]), int(coord[i][2] * decrese_ratio[0]), int(coord[i][3] * decrese_ratio[0]))
            utility.save_image(original_image, saveOutputFile)
        else:
            original_image = utility.read_image(inputFile)
            for i in range(len(str_tag)):
                if (str_tag[i] == "sofa" or str_tag[i] == "chair"):
                    stylized_image = styler.set_style(
                        "C:/workspace/IOU-Backend/util/IOU-ML/" +
                        rect_files[i], preferenceImages[now_index])
                    stylized_image = np.array((stylized_image * 255)[0],
                                              np.uint8)
                    styled_furniture = cv2.cvtColor(stylized_image,
                                                    cv2.COLOR_BGR2RGB)
                    original_image = image_processing.add_up_image_to(
                        original_image, styled_furniture, int(coord[i][0]),
                        int(coord[i][1]), int(coord[i][2]), int(coord[i][3]))
            utility.save_image(original_image, saveOutputFile)
        returnImageList.append(saveOutputFile)
    returnImageList.append(MAX_OUT_IMAGE)
    return returnImageList