Beispiel #1
0
import os
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = ""

from keras_segmentation.models.model_utils import transfer_weights
from keras_segmentation.pretrained import pspnet_50_ADE_20K
from keras_segmentation.models.pspnet import pspnet_50

pretrained_model = pspnet_50_ADE_20K()

keji_model1 = pspnet_50(n_classes=150)

transfer_weights(
    pretrained_model,
    keji_model1)  # transfer weights from pre-trained model to your model

keji_model1.train(train_images="../VGdata/images_prepped_train_png/",
                  train_annotations="../VGdata/annotations_prepped_train_png/",
                  checkpoints_path="./keji1check",
                  epochs=5)
from keras_segmentation.pretrained import pspnet_50_ADE_20K, pspnet_101_cityscapes, pspnet_101_voc12
import tensorflow as tf

config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)

model = pspnet_50_ADE_20K(
)  # load the pretrained model trained on ADE20k dataset
#
# model = pspnet_101_cityscapes() # load the pretrained model trained on Cityscapes dataset

# model = pspnet_101_voc12() # load the pretrained model trained on Pascal VOC 2012 dataset

# load any of the 3 pretrained models

out = model.predict_segmentation(inp="data/ADE_val_00001054.jpg",
                                 out_fname="out.png")
Beispiel #3
0
def test_pretrained():

    model = pspnet_50_ADE_20K()

    out = model.predict_segmentation(inp=te_im + "/0016E5_07959.png",
                                     out_fname="/tmp/out.png")
Beispiel #4
0
def getStyleChangedImage(inputFile,
                         preferenceImages,
                         od_model,
                         baseLight=[255, 255, 255],
                         changeLight=[178, 220, 240]):
    '''
	입력 Color는 BGR ( [178, 220, 240] 은 주황불빛 )
	preferenceImages 가 4장만 되어도 충분함.
	'''
    if len(preferenceImages) <= 2:
        preferenceImages = preferenceImages + preferenceImages
    print(preferenceImages)
    inputBaseFile, preferenceBaseFile = utility.file_basify(
        inputFile, preferenceImages)

    now = time.time()
    detection_model = pspnet_50_ADE_20K()
    outputFile = utility.get_add_dir(inputFile, "temp")

    # Object Detect & Segmentation
    [coord, str_tag, number_tag, score, rect_files, additional_infor,
     n_color] = getODandSegment(inputBaseFile, od_model)

    (imgHeight, imgWidth, _) = utility.read_image(inputFile).shape
    if imgWidth > destSize[0] and imgHeight > destSize[1]:
        ratio = (destSize[0] / imgWidth, destSize[1] / imgHeight)
    else:
        ratio = (1, 1)
    print("Loading Finished")

    temp = time.time()
    print("Loading Time : ", temp - now)

    # Wall Detection with input image.
    wall_divided = segmentation.detect_wall_floor(inputFile, detection_model)
    wall_divided = utility.resize_2darr(wall_divided, ratio=ratio)
    wall_total, wall_number = matrix_processing.divided_class_into_class_total(
        wall_divided)
    print("Wall Divided.")

    # Get preference image`s data.
    preferWallColor = []
    preferFloorColor = []
    selectedPreferenceImages = []
    [files, domColors, wallColors, floorColors] = utility.load_result(
        config.RESEARCH_BASE_FILE
    )  # Each files` dom color, wall color, floor color will be saved.
    baseNameFiles = [os.path.basename(files[f]) for f in range(len(files))]

    print("Wall Color start.")
    indx = list(range(0, len(preferenceBaseFile)))
    random.shuffle(indx)
    # Select 2 color of above to preferWallColor and preferFloorColor
    for i in range(MAX_WALL_IMAGE):
        ind = indx[i]
        preferImage = preferenceBaseFile[ind]
        loadIndex = baseNameFiles.index(os.path.basename(
            preferImage))  # We do only compare with base name.
        preferWallColor.append(wallColors[loadIndex])
        preferFloorColor.append(floorColors[loadIndex])
        selectedPreferenceImages.append(files[loadIndex])
    print("Wall Colored Selected.")

    # Change wall & floor
    wfColorChangeImage = []
    for i in range(MAX_WALL_IMAGE):
        wfOutputFile = changeWallFloor(inputFile,
                                       outputFile,
                                       wall_divided,
                                       wall_total,
                                       wall_number,
                                       i,
                                       preferWallColor,
                                       preferFloorColor,
                                       ratio=ratio)
        wfColorChangeImage.append(wfOutputFile)
    print("Wall Color Changed")

    temp = time.time()
    print("Wall Coloring Time : ", temp - now)

    # Change Object ( Table and Chair )
    partChangedFiles = []
    procs = []
    recommandFurnitureList = []
    changeFurnitureLocation = []
    changeFurnitureColor = []

    for i in range(MAX_WALL_IMAGE):
        for j in range(MAX_PART_CHANGE_IMAGE):
            # 넘겨줄 인자를 저장하고, Thread를 실행시켜서 속도 향상.
            argvFile = utility.add_name(
                config.SUBPROCESS_ARGV,
                "_" + str(MAX_PART_CHANGE_IMAGE * i + j))
            utility.save_result([
                selectedPreferenceImages, wfColorChangeImage, outputFile,
                str_tag, coord, rect_files, i, j, ratio
            ], argvFile)

            # Subprocess need to calculate with given ratio.
            proc = subprocess.Popen(
                ['python', 'getPartChangedImage.py', argvFile],
                shell=True,
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                encoding="cp949")
            procs.append(proc)

    for i in range(len(procs)):
        out = procs[i].communicate()[0]
        out = str(out).split("\n")
        tout = []
        for i in range(len(out)):
            if len(out[i]) > 0:
                tout.append(out[i])
        [changed_log, recommand_furniture] = utility.load_result(tout[-1])
        partChangedFiles.append(tout[-2])
        recommandFurnitureList.append(recommand_furniture)
        for i in range(len(changed_log)):
            changeFurnitureLocation.append(changed_log[i][0])
            changeFurnitureColor.append(changed_log[i][1])

    print("Part Changed Finished")
    # Add some plant.
    # partChangedFiles = print() # Image number will not be changed.

    temp = time.time()
    print("Part Changing Time : ", temp - now)

    lightList = []
    # Change Light
    for i in range(MAX_OUT_IMAGE):
        print("Now Proceed : ", i)
        files = utility.add_name(partChangedFiles[i], "_lighter")
        if random.randint(1, MAX_OUT_IMAGE) > 4:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, changeLight)
            lightList.append(changeLight)
        else:
            changed_file = styler.get_light_change(partChangedFiles[i],
                                                   baseLight, baseLight)
            lightList.append(baseLight)
        utility.save_image(changed_file, files)
        partChangedFiles[i] = files
    # partChangedFiles 가 결국 바뀐 파일들
    temp = time.time()
    print("Total Time : ", temp - now)
    changeLog = makeChangeInfor(preferWallColor, preferFloorColor, [preferenceImages[indx[0]], preferenceImages[indx[1]]], partChangedFiles, lightList, changeFurnitureLocation, changeFurnitureColor, \
     recommandFurnitureList, [])

    resultDictionary = utility.save_log_dictionary(inputFile, partChangedFiles,
                                                   changeLog)
    utility.logging(str(resultDictionary))
    with open(FILE_OUTQUEUE, 'a') as f:
        f.write(str(resultDictionary) + "\n")
Beispiel #5
0
                                    model.output_width,
                                    model.output_height,
                                    no_reshape=True)
        gt = gt.argmax(-1)
        pr = pr.flatten()
        gt = gt.flatten()

        for cl_i in range(model.n_classes):

            tp[cl_i] += np.sum((pr == cl_i) * (gt == cl_i))
            fp[cl_i] += np.sum((pr == cl_i) * ((gt != cl_i)))
            fn[cl_i] += np.sum((pr != cl_i) * ((gt == cl_i)))
            n_pixels[cl_i] += np.sum(gt == cl_i)

    cl_wise_score = tp / (tp + fp + fn + 0.000000000001)
    n_pixels_norm = n_pixels / np.sum(n_pixels)
    frequency_weighted_IU = np.sum(cl_wise_score * n_pixels_norm)
    mean_IU = np.mean(cl_wise_score)
    return {
        "frequency_weighted_IU": frequency_weighted_IU,
        "mean_IU": mean_IU,
        "class_wise_IU": cl_wise_score
    }


predict_multiple(model=pspnet_50_ADE_20K(),
                 inp_dir='../VGdata/images_prepped_train_png',
                 out_dir='../VGdata/annotations_prepped_train_png')

#predict_multiple(model='pspnet_50', inp_dir='VGdata/imgtrain', out_dir='VGdata/anntrain')
Beispiel #6
0
@author: craig
"""
import matplotlib.pyplot as plt
import numpy as np
import cv2
from PIL import Image, ImageOps 
import os.path

from keras.models import load_model
from keras_segmentation.pretrained import pspnet_50_ADE_20K , pspnet_101_cityscapes, pspnet_101_voc12




model = pspnet_50_ADE_20K() # in between detail - load the pretrained model trained on ADE20k dataset

#model = pspnet_101_cityscapes() # too much detail - load the pretrained model trained on Cityscapes dataset

#model = pspnet_101_voc12() # Just the People - load the pretrained model trained on Pascal VOC 2012 dataset

#### model = load_model('vgg_unet_1.h5')

# Use any of the 3 pretrained models above



out = model.predict_segmentation(
    inp="sample_images/1_input.jpg",
    out_fname="bed_out.png"
)
Beispiel #7
0
def get_semantic_segments(img):
    model = pspnet_50_ADE_20K()
    out = predict_segmentation(model, img)
    return out
Beispiel #8
0
    global model_scene_parsing, model_cityscapes, model_visual_object
    global ALLOWED_EXTENSIONS
    global prewarm

    ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])

    result_directory = '/src/results/'
    create_directory(result_directory)

    upload_directory = '/src/upload/'
    create_directory(upload_directory)

    prewarm = True if os.getenv('PREWARM', 'TRUE') == 'TRUE' else False

    if prewarm:
        model_scene_parsing = pretrained.pspnet_50_ADE_20K(
        )  # load the pretrained model trained on ADE20k dataset
        model_cityscapes = pretrained.pspnet_101_cityscapes(
        )  # load the pretrained model trained on Cityscapes dataset
        model_visual_object = pretrained.pspnet_101_voc12(
        )  # load the pretrained model trained on Pascal VOC 2012 dataset
    else:
        get_model_bin(
            "https://www.dropbox.com/s/0uxn14y26jcui4v/pspnet50_ade20k.h5?dl=1",
            "/root/.keras/dataset/pspnet50_ade20k.h5")
        get_model_bin(
            "https://www.dropbox.com/s/c17g94n946tpalb/pspnet101_cityscapes.h5?dl=1",
            "/root/.keras/dataset/pspnet101_cityscapes.h5")
        get_model_bin(
            "https://www.dropbox.com/s/uvqj2cjo4b9c5wg/pspnet101_voc2012.h5?dl=1",
            "/root/.keras/dataset/pspnet101_voc2012.h5")