示例#1
0
def saveParameters(fileDir):
    # Model name 1 mean dataset`s folder 1.
    model_name = '1'
    detection_model = objectDetector.load_model(model_name)
    # File is directory
    files = utility.get_filenames(fileDir)
    fileNames = []
    domColors = []
    wallColors = []
    floorColors = []

    for f in files:
        if "." not in f:
            continue
        print("Now proceeding ", f, " [ ", files.index(f), " ]")

        coord, str_tag, number_tag, score = objectDetector.inference(
            detection_model, f)

        # Save file name make.
        save_file_name = utility.add_name(f, "_od", extension="bin")
        dirs = save_file_name.split("/")

        save_image_name = ""
        for d in dirs[0:-1]:
            save_image_name += d + "/"
        save_image_name += f.split("/")[-1].split(".")[0] + "/"

        utility.make_dir(save_image_name)

        rect_files = []
        additional_infor = []

        for i in range(len(str_tag)):
            additional_infor.append(-1)
            rect_image = image_processing.get_rect_image(
                f, int(coord[i][0]), int(coord[i][1]), int(coord[i][2]),
                int(coord[i][3]))
            rect_image_name = save_image_name + f.split("/")[-1]
            rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
            rect_files.append(rect_image_name)
            utility.save_image(rect_image, rect_image_name)

        dom_color = image_processing.get_dominant_color(f)
        n_color = utility.get_remarkable_color_n(dom_color, MAX_COLOR_LENGTH)
        fileNames.append(os.path.basename(f))
        domColors.append(n_color)
        wallColors.append([])
        floorColors.append([])
        utility.save_result([
            coord, str_tag, number_tag, score, rect_files, additional_infor,
            n_color
        ], save_file_name)

    utility.save_result([files, domColors, wallColors, floorColors],
                        config.RESEARCH_BASE_FILE)
示例#2
0
def segment(inputFile, outputFile, outputDataFile, total=True):
    divided_class, class_number, class_total, class_border, _, _, class_color, largest_mask, width, height = \
    segmentation.get_divided_class(inputFile, total=total)
    utility.save_result(
        [divided_class, class_number, class_total, class_border, largest_mask],
        outputDataFile)

    dc_image = utility.divided_class_into_image(divided_class, class_number,
                                                class_color, width, height,
                                                class_number)
    if not outputFile == None:
        utility.save_image(dc_image, outputFile)
示例#3
0
def getPartChangedImage(inputFile,
                        outputFile,
                        str_tag,
                        coord,
                        rect_files,
                        selectedPreferenceImage,
                        i,
                        j,
                        ratio=(0.5, 0.5)):
    partChangedOutFile = utility.add_name(outputFile,
                                          "_changed_" + str(i) + str(j))
    original_image = utility.read_image(inputFile)
    resized_coord = utility.change_arrcoords(coord, ratio=ratio)
    recommand_furniture = []
    changed_log = []

    for k in range(len(str_tag)):
        if (str_tag[k] == "sofa" or str_tag[k] == "chair"):
            inpaintingRandomValue = random.randint(0, 9)
            furniture_file = rect_files[k]
            # 만약 userinput 이 있다면, 그것을 대신 사용.
            if utility.is_exist(utility.get_userinput_bin(furniture_file)):
                furniture_data_file = utility.get_userinput_bin(furniture_file)
            else:
                furniture_data_file = utility.get_bin(furniture_file)
            styled_furniture, change_color = styleTransfer(
                furniture_file,
                furniture_data_file,
                selectedPreferenceImage,
                inpaintingRandomValue,
                ratio=ratio)
            original_image = image_processing.add_up_image_to(original_image, styled_furniture, \
             int(resized_coord[k][0]), int(resized_coord[k][1]), int(resized_coord[k][2]), int(resized_coord[k][3]))
            rec_furn = getRecommandFurnitureForImage(selectedPreferenceImage,
                                                     str_tag[k])
            if len(rec_furn) < 3:
                utility.logging(selectedPreferenceImage)
                utility.logging(str(rec_furn))
                recommand_furniture.append(["", "", ""])
            else:
                recommand_furniture.append(random.sample(rec_furn, 3))
            changed_log.append([resized_coord[k], change_color])

    out_res_file = utility.add_name(partChangedOutFile,
                                    "_result",
                                    extension=".bin")
    utility.save_result([changed_log, recommand_furniture], out_res_file)
    utility.save_image(original_image, partChangedOutFile)
    return partChangedOutFile, out_res_file
示例#4
0
def get_color_system(directory):
    '''
	directory 내부에 있는 모든 Color system의 목록을 조사한다. Remarkable Color로 조사한다.
	조사한 결과는 pickle을 통해 directory에 저장해둔다.
	'''
    fileNames = utility.get_filenames(directory)
    colors = []
    baseName = []

    for f in fileNames:
        print(f, " now doing .. ", str(fileNames.index(f)))
        baseName.append(utility.get_base_name(f))
        colors.append(getDominantColor(f))

    utility.save_result([baseName, colors],
                        RESEARCH_BASE_DIR + "/" + COLOR_SYSTEM_FILE)
示例#5
0
def segment(inputFile, outputFile, outputDataFile, total=False):
    '''
	입력받은 파일을 Segmentation 해서 output한다.
	Output 한 결과는 조각난 사진 모음.
	'''
    divided_class, class_number, class_total, class_border, _, _, class_color, largest_mask, width, height = \
    segmentation.get_divided_class(inputFile, total=total)
    utility.save_result(
        [divided_class, class_number, class_total, class_border, largest_mask],
        outputDataFile)

    dc_image = utility.divided_class_into_image(divided_class, class_number,
                                                class_color, width, height,
                                                class_number)
    if not outputFile == None:
        utility.save_image(dc_image, outputFile)
    return divided_class, class_number, class_total, class_border
示例#6
0
def saveParameter(fileName, detection_model):
    coord, str_tag, number_tag, score = objectDetector.inference(
        detection_model, fileName)

    # Save file name make.
    save_file_name = config.RESEARCH_BASE_DIR + "/" + os.path.basename(
        utility.get_od_bin(fileName))
    dirs = save_file_name.split("/")

    save_image_name = ""
    for d in dirs[0:-1]:
        save_image_name += d + "/"
    save_image_name += fileName.split("/")[-1].split(".")[0] + "/"

    utility.make_dir(save_image_name)

    rect_files = []
    additional_infor = []

    for i in range(len(str_tag)):
        additional_infor.append(-1)
        rect_image = image_processing.get_rect_image(fileName,
                                                     int(coord[i][0]),
                                                     int(coord[i][1]),
                                                     int(coord[i][2]),
                                                     int(coord[i][3]))
        rect_image_name = save_image_name + fileName.split("/")[-1]
        rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
        rect_files.append(rect_image_name)
        utility.save_image(rect_image, rect_image_name)

    dom_color = image_processing.get_dominant_color(fileName)
    n_color = utility.get_remarkable_color_n(dom_color, MAX_COLOR_LENGTH)
    utility.save_result([
        coord, str_tag, number_tag, score, rect_files, additional_infor,
        n_color
    ], save_file_name)
    return [
        coord, str_tag, number_tag, score, rect_files, additional_infor,
        n_color
    ]
示例#7
0
    def saveData(self):
        global nowIndex  # 현재 추가하고 있는 index
        global divided_class  # Class Number map
        global class_number  # Class Number 의 종류
        global class_total  # 각 Class들의 total Coords
        global class_border  # Class border.

        img = cv2.imread(IMAGE_NAME)
        (height, width, _) = img.shape

        class_total, class_number, divided_class = mergeGroup(
            class_total, class_number, divided_class, nowIndex)
        utility.save_result(
            [divided_class, class_number, class_total, class_border],
            SEG_SAVE_NAME)
        class_count = [len(class_total[i]) for i in range(len(class_total))]
        class_color = image_processing.get_class_color(
            utility.read_image(IMAGE_NAME), class_total, class_count)
        dc_image = utility.divided_class_into_image(divided_class,
                                                    class_number, class_color,
                                                    width, height,
                                                    class_number)
        utility.save_image(dc_image, CHANGE_DIVIED)
        self.imageLabel.changePixmap(CHANGE_DIVIED)
示例#8
0
label_min, label_max, label_median, label_var = get_label_args(label)
plot_label_distribution(label)

# Convert dataframe to array and normalize the data
train_array, label_array, test_array = scale_data(train, test, label)

# Train and predict

# Simple 3-layers neural nets
print "\n[Simple 3-layers neural nets]\n"
NeuralNet1 = load_model_if_exists(NeuralNet1, SIMPLE_3LAYERS_FILENAME)
NeuralNet1.fit(train_array, label_array)
NeuralNet1.save_params_to(SIMPLE_3LAYERS_FILENAME)
submission, prediction, score = predict(NeuralNet1, test_array, IMAGE_SIZE,
                                        label.columns.values)
save_result(submission)
plot_lasagne_learning_curves(NeuralNet1)
plot_images(test, prediction)

# Count running time
endtime = datetime.datetime.now()
print("Simple 3-layers neural nets score: %f, spend %d seconds" %
      (score, (endtime - starttime).seconds))

# LeNet5-style convolutional neural nets
print "\n[LeNet5-style convolutional neural nets]\n"
train_array, test_array = reshape_data(train_array, test_array, IMAGE_SIZE)
NeuralNet2 = load_model_if_exists(NeuralNet2, LENET5_CNN_FILENAME)
NeuralNet2.fit(train_array, label_array)
NeuralNet2.save_params_to(LENET5_CNN_FILENAME)
submission, prediction, score = predict(NeuralNet2, test_array, IMAGE_SIZE,
def eval(args):
    # parameters from arguments
    model_name = args.model
    pretrained_model = args.pretrained_model
    save_npy = args.save_npy
    image_shape = [int(m) for m in args.image_shape.split(",")]

    assert model_name in model_list, "{} is not in lists: {}".format(
        args.model, model_list)

    image = fluid.data(name='image',
                       shape=[None] + image_shape,
                       dtype='float32')
    group = fluid.data(name='group', shape=[None, 1], dtype='int64')
    label = fluid.data(name='label', shape=[None, 1], dtype='int64')
    seq_id = fluid.data(name='seq_id', shape=[None, 1], dtype='int64')

    test_loader = fluid.io.DataLoader.from_generator(
        feed_list=[image, group, label, seq_id],
        capacity=64,
        use_double_buffer=True,
        iterable=True)

    # model definition
    model = models.__dict__[model_name]()
    out = model.net(input=image, embedding_size=args.embedding_size)

    test_program = fluid.default_main_program().clone(for_test=True)

    place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    if pretrained_model:
        fluid.load(program=test_program,
                   model_path=pretrained_model,
                   executor=exe)

    test_loader.set_sample_generator(reader.test(args),
                                     batch_size=args.batch_size,
                                     drop_last=False,
                                     places=place)

    fetch_list = [out.name]

    f, l, g, s = [], [], [], []
    for batch_id, data in enumerate(test_loader()):
        t1 = time.time()
        [feas] = exe.run(test_program, fetch_list=fetch_list, feed=data)
        group = np.asarray(data[0]['group'])
        label = np.asarray(data[0]['label'])
        seq_id = np.asarray(data[0]['seq_id'])
        f.append(feas)
        g.append(np.squeeze(group))
        l.append(np.squeeze(label))
        s.append(np.squeeze(seq_id))

        t2 = time.time()
        period = t2 - t1
        if batch_id % 2 == 0:
            print("[%s] testbatch %d, time %2.2f sec" % \
                    (fmt_time(), batch_id, period))

    f = np.vstack(f)
    g = np.hstack(g)
    l = np.hstack(l)
    s = np.hstack(s)

    if save_npy == True:
        np.save(args.output_path + '_' + model_name, f)
        np.save(args.output_path + '_' + 'g', g)
        np.save(args.output_path + '_' + 'l', l)
        np.save(args.output_path + '_' + 's', s)

    res_final, res_score = post_process(f, g, l, s, args.thresh, k=args.top_k)

    output_path = args.output_path
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    print("Saving result to {}".format(output_path))
    save_result(res_final, res_score, output_path, args.detect_path)
    sys.stdout.flush()
示例#10
0
def getStyleChangedImage(inputFile,
                         preferenceImages,
                         od_model,
                         baseLight=[255, 255, 255],
                         changeLight=[178, 220, 240]):
    '''
	입력 Color는 BGR ( [178, 220, 240] 은 주황불빛 )
	preferenceImages 가 4장만 되어도 충분함.
	'''
    if len(preferenceImages) <= 2:
        preferenceImages = preferenceImages + preferenceImages
    print(preferenceImages)
    inputBaseFile, preferenceBaseFile = utility.file_basify(
        inputFile, preferenceImages)

    now = time.time()
    detection_model = pspnet_50_ADE_20K()
    outputFile = utility.get_add_dir(inputFile, "temp")

    # Object Detect & Segmentation
    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = getODandSegment(inputBaseFile, od_model)

    (imgHeight, imgWidth, _) = utility.read_image(inputFile).shape
    if imgWidth > destSize[0] and imgHeight > destSize[1]:
        ratio = (destSize[0] / imgWidth, destSize[1] / imgHeight)
    else:
        ratio = (1, 1)
    print("Loading Finished")

    temp = time.time()
    print("Loading Time : ", temp - now)

    # Wall Detection with input image.
    wall_divided = segmentation.detect_wall_floor(inputFile, detection_model)
    wall_divided = utility.resize_2darr(wall_divided, ratio=ratio)
    wall_total, wall_number = matrix_processing.divided_class_into_class_total(
        wall_divided)
    print("Wall Divided.")

    # Get preference image`s data.
    preferWallColor = []
    preferFloorColor = []
    selectedPreferenceImages = []
    [files, domColors, wallColors, floorColors] = utility.load_result(
        config.RESEARCH_BASE_FILE
    )  # Each files` dom color, wall color, floor color will be saved.
    baseNameFiles = [os.path.basename(files[f]) for f in range(len(files))]

    print("Wall Color start.")
    indx = list(range(0, len(preferenceBaseFile)))
    random.shuffle(indx)
    # Select 2 color of above to preferWallColor and preferFloorColor
    for i in range(MAX_WALL_IMAGE):
        ind = indx[i]
        preferImage = preferenceBaseFile[ind]
        loadIndex = baseNameFiles.index(os.path.basename(
            preferImage))  # We do only compare with base name.
        preferWallColor.append(wallColors[loadIndex])
        preferFloorColor.append(floorColors[loadIndex])
        selectedPreferenceImages.append(files[loadIndex])
    print("Wall Colored Selected.")

    # Change wall & floor
    wfColorChangeImage = []
    for i in range(MAX_WALL_IMAGE):
        wfOutputFile = changeWallFloor(inputFile,
                                       outputFile,
                                       wall_divided,
                                       wall_total,
                                       wall_number,
                                       i,
                                       preferWallColor,
                                       preferFloorColor,
                                       ratio=ratio)
        wfColorChangeImage.append(wfOutputFile)
    print("Wall Color Changed")

    temp = time.time()
    print("Wall Coloring Time : ", temp - now)

    # Change Object ( Table and Chair )
    partChangedFiles = []
    procs = []
    recommandFurnitureList = []
    changeFurnitureLocation = []
    changeFurnitureColor = []

    for i in range(MAX_WALL_IMAGE):
        for j in range(MAX_PART_CHANGE_IMAGE):
            # 넘겨줄 인자를 저장하고, Thread를 실행시켜서 속도 향상.
            argvFile = utility.add_name(
                config.SUBPROCESS_ARGV,
                "_" + str(MAX_PART_CHANGE_IMAGE * i + j))
            utility.save_result([
                selectedPreferenceImages, wfColorChangeImage, outputFile,
                str_tag, coord, rect_files, i, j, ratio
            ], argvFile)

            # Subprocess need to calculate with given ratio.
            proc = subprocess.Popen(
                ['python', 'getPartChangedImage.py', argvFile],
                shell=True,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                encoding="cp949")
            procs.append(proc)

    for i in range(len(procs)):
        out = procs[i].communicate()[0]
        out = str(out).split("\n")
        tout = []
        for i in range(len(out)):
            if len(out[i]) > 0:
                tout.append(out[i])
        [changed_log, recommand_furniture] = utility.load_result(tout[-1])
        partChangedFiles.append(tout[-2])
        recommandFurnitureList.append(recommand_furniture)
        for i in range(len(changed_log)):
            changeFurnitureLocation.append(changed_log[i][0])
            changeFurnitureColor.append(changed_log[i][1])

    print("Part Changed Finished")
    # Add some plant.
    # partChangedFiles = print() # Image number will not be changed.

    temp = time.time()
    print("Part Changing Time : ", temp - now)

    lightList = []
    # Change Light
    for i in range(MAX_OUT_IMAGE):
        print("Now Proceed : ", i)
        files = utility.add_name(partChangedFiles[i], "_lighter")
        if random.randint(1, MAX_OUT_IMAGE) > 4:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, changeLight)
            lightList.append(changeLight)
        else:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, baseLight)
            lightList.append(baseLight)
        utility.save_image(changed_file, files)
        partChangedFiles[i] = files
    # partChangedFiles 가 결국 바뀐 파일들
    temp = time.time()
    print("Total Time : ", temp - now)
    changeLog = makeChangeInfor(preferWallColor, preferFloorColor, [preferenceImages[indx[0]], preferenceImages[indx[1]]], partChangedFiles, lightList, changeFurnitureLocation, changeFurnitureColor, \
     recommandFurnitureList, [])

    resultDictionary = utility.save_log_dictionary(inputFile, partChangedFiles,
                                                   changeLog)
    utility.logging(str(resultDictionary))
    with open(FILE_OUTQUEUE, 'a') as f:
        f.write(str(resultDictionary) + "\n")
示例#11
0
def objectDetect(inputFile, outputFile):
    '''
	입력받은 inputFile의 가구를 ObjectDetection한 결과를 outputFile에 저장한다. json 형태로 저장한다.
	현재는 bin file로만 입출력이 가능.
	폴더를 입력하면 outputFile은 무시됨.
	'''
    if "." not in inputFile:
        # File is directory
        files = utility.get_filenames(inputFile)
        for f in files:
            if "." not in f:
                continue

            coord, str_tag, number_tag, score = objectDetector.inference(
                detection_model, f)
            # Save file name make.
            save_file_name = utility.add_name(f, "_od", extension="bin")
            dirs = save_file_name.split("/")
            save_image_name = ""
            for d in dirs[0:-1]:
                save_image_name += d + "/"
            save_image_name += f.split("/")[-1].split(".")[0] + "/"
            utility.make_dir(save_image_name)
            rect_files = []

            additional_infor = []
            for i in range(len(str_tag)):
                additional_infor.append(-1)
                rect_image = image_processing.get_rect_image(
                    f, int(coord[i][0]), int(coord[i][1]), int(coord[i][2]),
                    int(coord[i][3]))
                rect_image_name = save_image_name + f.split("/")[-1]
                rect_image_name = utility.add_name(rect_image_name,
                                                   "_" + str(i))
                rect_files.append(rect_image_name)
                utility.save_image(rect_image, rect_image_name)
            utility.save_result([
                coord, str_tag, number_tag, score, rect_files, additional_infor
            ], save_file_name)

    else:
        coord, str_tag, number_tag, score = objectDetector.inference(
            detection_model, inputFile)
        # Save file name make.
        save_file_name = utility.add_name(inputFile, "_od", extension="bin")
        dirs = save_file_name.split("/")
        save_image_name = ""
        for d in dirs[0:-1]:
            save_image_name += d + "/"
        save_image_name += inputFile.split("/")[-1].split(".")[0] + "/"
        utility.make_dir(save_image_name)
        rect_files = []
        additional_infor = []
        for i in range(len(str_tag)):
            additional_infor.append(-1)
            rect_image = image_processing.get_rect_image(
                inputFile, int(coord[i][0]), int(coord[i][1]),
                int(coord[i][2]), int(coord[i][3]))
            rect_image_name = save_image_name + inputFile.split("/")[-1]
            rect_image_name = utility.add_name(rect_image_name, "_" + str(i))
            rect_files.append(rect_image_name)
            utility.save_image(rect_image, rect_image_name)
        utility.save_result(
            [coord, str_tag, number_tag, score, rect_files, additional_infor],
            outputFile)