예제 #1
0
def train_vgg_model(weight_save, checkpoints_path, epochs=n_epochs):
    model = vgg_unet(n_classes=n_classes,
                     input_height=input_height,
                     input_width=input_width)
    # model.load_weights(os.path.join(checkpt_paths, '.38'))

    # Set a cyclical learning rate as a callback. The learning rate varies between 0.005 and 0.0001
    # A triangle shape of cycle is used
    cyclic_lr = CyclicLR(base_lr=0.001,
                         max_lr=0.01,
                         step_size=1024,
                         mode='triangular')
    print_lr = PrintLR()

    model.train(train_images=os.path.join(AUGMENTED_IMAGES_DIRECTORY, 'img'),
                train_annotations=os.path.join(AUGMENTED_LABEL_DIRECTORY,
                                               'img'),
                auto_resume_checkpoint=True,
                checkpoints_path=checkpoints_path,
                verify_dataset=False,
                epochs=epochs,
                more_callbacks=[cyclic_lr, print_lr])

    model.save_weights(os.path.join(MODELS_DIRECTORY, weight_save))
    return model
예제 #2
0
def load_vgg16_model(model_path):
    model = vgg_unet(n_classes,
                     input_width=input_width,
                     input_height=input_height)
    # model.build((input_width, input_height))
    model.load_weights(model_path)
    return model
예제 #3
0
    def predict_folder(self):
        self.model = vgg_unet(n_classes=256,
                              input_height=1024,
                              input_width=1024)
        old_model = load_model(self.model_file)
        transfer_weights(old_model, self.model)
        self.model.predict_multiple(inp_dir=self.dir_images, out_dir="outputs")

        self.mask = 'outputs/' + os.path.basename(os.path.normpath(self.image))
        im = PIL.Image.open(self.mask)
        im.thumbnail((600, 400), PIL.Image.ANTIALIAS)
        ph = PIL.ImageTk.PhotoImage(im)
        self.canvas = Canvas(self.window, width=ph.width(), height=ph.height())
        self.canvas.pack()
        self.canvas.create_image(0, 0, anchor=W, image=ph)
        self.canvas.image = ph
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 18:08:34 2020

@author: craig
"""

from keras_segmentation.models.unet import vgg_unet
from keras_segmentation.predict import model_from_checkpoint_path

model = vgg_unet(n_classes=51, input_height=416, input_width=608)

model = model_from_checkpoint_path("weights/vgg_unet_1")

model.train(
    train_images="dataset1/images_prepped_train/",
    train_annotations="dataset1/annotations_prepped_train/",
    val_images="dataset1/images_prepped_test/",
    val_annotations="dataset1/annotations_prepped_test/",
    verify_dataset=True,
    #    load_weights="weights/vgg_unet_1.4" ,
    optimizer_name='adadelta',
    do_augment=True,
    augmentation_name="aug_all",
    checkpoints_path="weights/vgg_unet_1",
    epochs=10)

# Display the model's architecture
model.summary()

# %% Folders creation of the training
# images
images_folder_val = "D:\\Dataset\\coco2017\\val2017\\val2017\\"
images_folder_train = "D:\\Dataset\\coco2017\\train2017\\train2017\\"
# annotations
annotations_images_folder_val = "D:\\Dataset\\new_seg\\semantic_validation\\semantic_validation\\"
annotations_images_folder_train = "D:\\Dataset\\new_seg\\semantic_train\\semantic_train\\"
# Altered images
dist_images_path = "C:\\Users\\Arnaud\\Google Drive\\shared\\ECE6258\\SAMPLE"
# model folder for storage on the hard drive
model_folder_savings = "D:\\Dataset\\models\\"

# %% Model declaration
model = vgg_unet(n_classes=134 ,  input_height=224, input_width=224)
winsound.Beep(1000,1000)

# %% loads a model
model.load_weights("D:\\Dataset\models\\model day 27 month 11 year 19 at 12 45 25\\weights.hdf5")


# %% Test
# validation image
os.chdir(images_folder_val)
img_file="000000000285.jpg"
img = mpimg.imread(img_file)
# annotation of the image
os.chdir(annotations_images_folder_val)
semantic_file = img_file[:-3]+'png'
img_semantic = plt.imread(semantic_file)
예제 #6
0
from keras_segmentation.models.unet import vgg_unet
import tensorflow as tf

config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)

model = vgg_unet(n_classes=19, input_height=416, input_width=256)
# Display the model's architecture
model.summary()

model.load_weights("models/vgg_unet_1.99")

out = model.predict_segmentation(
    inp="dataset2/images_prepped_test/person_240_0.png", out_fname="out1.png")
# print(model.evaluate_segmentation(inp_images_dir="dataset2/images_prepped_test/", annotations_dir="dataset2/annotations_prepped_test_convert/"))
예제 #7
0
# import matplotlib.pyplot as plt
from IPython.display import Image
import os
model_name = input("Enter model name: ")
if model_name == "vgg_unet":
    from keras_segmentation.models.unet import vgg_unet
    model = vgg_unet(n_classes=50, input_height=320, input_width=640)
elif model_name == "vgg_unet_dt" or model_name == "vgg_unet_dt_mid_dataset" or model_name == "vgg_unet_dt_big_dataset"\
        or model_name == "vgg_unet_dt_mid_dataset_dt4" or model_name == "vgg_unet_dt_big_dataset_dt5":
    from keras_segmentation.models.unet import vgg_unet
    model = vgg_unet(n_classes=2, input_height=480, input_width=640)
elif model_name == "segnet_dt" or model_name == "segnet_dt_mid_dataset" or model_name == "segnet_dt_big_dataset" \
        or model_name == "segnet_dt_mid_dataset_dt4" or model_name == "segnet_dt_big_dataset_dt5":
    from keras_segmentation.models.segnet import segnet
    model = segnet(n_classes=2, input_height=480, input_width=640)
elif model_name == "densenet_unet_dt" or model_name == "densenet_unet_dt_big_dataset":
    from keras_segmentation.models.unet import densenet_unet
    model = densenet_unet(n_classes=2, input_height=480, input_width=640)
else:
    raise Exception("UnknownModelError")

#path = "C:/Serge/Desktop/files/project files/engewiki duckietown NN course/keras segmentation data/"
#path = "C:/Users/Serg/Desktop/some_files/engewiki duckietown NN course/keras segmentation data/"
path = "H:/some_files/engewiki duckietown NN course/keras segmentation data/"

model.load_weights(path + "tmp/" + model_name + "/weights/" + model_name + "_weights")

# Sky, Building, Pole, Road, Pavement, Tree, SingSymbol, Fence, Car, Pedestrian, Bicyclist
# Other, RoadMark
classes = input("enter model classes: ").split(", ")
예제 #8
0
from keras_segmentation.models.unet import vgg_unet
import matplotlib.pyplot as plt
import os

here = os.path.abspath(os.path.dirname(__file__))
path_to_repo = os.path.join(
    here.split("fashion_segmentation")[0], "fashion_segmentation")

model = vgg_unet(n_classes=46, input_height=1024, input_width=1024)

model.train(
    train_images=os.path.join(path_to_repo, "data/train"),
    train_annotations=os.path.join(path_to_repo, "data/train_annotations"),
    checkpoints_path="/tmp/vgg_unet_1",
    epochs=5,
    val_images=os.path.join(path_to_repo, "data/val_train"),
    val_annotations=os.path.join(path_to_repo, "data/val_annotations"),
)

out = model.predict_segmentation(
    inp=os.path.join(path_to_repo, "data/test",
                     "00000663ed1ff0c4e0132b9b9ac53f6e.jpg"),
    out_fname=os.path.join(path_to_repo, "data", "out_vgg_unet.png"))

plt.imshow(out)
plt.show()

# evaluating the model
print(
    model.evaluate_segmentation(
        inp_images_dir=os.path.join(path_to_repo, "data/test"),
예제 #9
0
from keras_segmentation.models.unet import vgg_unet
from keras_segmentation.models.unet import resnet50_unet

import cv2 as cv
import numpy as np
import sys
np.set_printoptions(threshold=sys.maxsize)

# 모델 입력 이미지 사이즈는 32의 배수가 되어야 한다.
ModelInputHeight = 640
ModelInputWidth = 640
Model_Path = "E:\\SKKU\\PillRecog\\PillRecogApp\\Engine\\Models\\Background\\background.hdf4"

model = vgg_unet(n_classes=2,
                 input_height=ModelInputHeight,
                 input_width=ModelInputWidth)
#model = resnet50_unet(n_classes=2 ,  input_height=ModelInputHeight, input_width= ModelInputWidth )

model.load_weights(Model_Path)

OneDataFlag = True

# 테스트로 1개 파일에 대해 적용
if OneDataFlag:
    FilePath = 'E:\\198200157.jpg'
    #FilePath = 'E:\\2.jpg'
    out = model.predict_segmentation(
        inp=FilePath, out_fname="E:\\Study\\Pill\\SegTest\\Data\\dd\\out.png")

    org = cv.imread(FilePath)
    # 모델 아웃풋 사이즈는 입력 사이즈의 절반이다.
"train_annotations": annotations_images_folder_train,
"val_images":images_folder_val,
"val_annotations": annotations_images_folder_val,
"epochs": 20,
"batch_size":batch_size,
"val_batch_size": 22,
"steps_per_epoch":5300,
"verify_dataset":False,
"n_classes":134,
"load_weights":None,
"callbacks": [tensorboard_callback,checkpointer],
"validate":True,
"optimizer_name":'adadelta'}

# %% Model declaration
model = vgg_unet(n_classes=kwargs["n_classes"] ,  input_height=224, input_width=224)
winsound.Beep(1000,1000)

# %% loads a model
model.load_weights("D:\\Dataset\models\\model day 27 month 11 year 19 at 12 45 25\\weights.hdf5")

# %% Training
try:
  history = model.train(**kwargs)
  # notification for end of epoch.
  notification()
except Exception as e:
  notification()
  time.sleep(1)
  notification()
  save_model(model,model_folder_savings)
예제 #11
0
                                                     class_mode=None)

train_generator = zip(train_image_generator, train_mask_generator)
val_generator = zip(val_image_generator, val_mask_generator)

#Training
from keras.models import Sequential

nb_train_samples = 1
nb_validation_samples = 1
epoches = 2

from keras import backend as K
if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

from keras_segmentation.models.unet import vgg_unet

model = vgg_unet(n_classes=2, input_height=img_height, input_width=img_width)

model.compile(loss='mse', optimizer='adam', metrics=['accuracy', 'mse'])

history = model.fit_generator(train_generator,
                              steps_per_epoch=nb_train_samples // batch_size,
                              epochs=epoches,
                              validation_data=val_generator,
                              validation_steps=nb_validation_samples //
                              batch_size)
steps_per_epoch = 512
val_steps_per_epoch = 512
gen_use_multiprocessing = False
ignore_zero_class = False
lr = 0.0001
optimizer_name = Adam(lr=lr, decay=1e-6)
do_augment = False
augmentation_name = "aug_all"
pred_dir = '/home/GDDC-CV2/Desktop/data_1024/pred_x_presentation/'
out_dir = '/home/GDDC-CV2/Desktop/data_1024/pred_out/'
patience = 10
model_save_path = '/home/GDDC-CV2/Desktop/CV-Semantic-Segmentation/model_transfer_learning/checkpoint/whole_model/'

#### physical GPU (device: 0, name: Tesla V100-PCIE-16GB, pci bus id: f114:00:00.0, compute capability: 7.0) ####
model = vgg_unet(n_classes=n_classes,
                 input_height=input_height,
                 input_width=input_width)

model.train(train_images=train_images,
            train_annotations=train_annotations,
            checkpoints_path=checkpoints_path,
            epochs=epoch,
            batch_size=batch_size,
            validate=validate,
            val_images=val_images,
            val_annotations=val_annotations,
            val_batch_size=batch_size,
            auto_resume_checkpoint=auto_resume_checkpoint,
            optimizer_name=optimizer_name,
            patience=patience,
            steps_per_epoch=steps_per_epoch,
예제 #13
0
import cv2
import os
import matplotlib.pyplot as plt

from keras.models import load_model
from keras_segmentation.models.unet import vgg_unet
from IPython.display import Image
from keras_segmentation.metrics import get_iou
from keras_segmentation.models.model_utils import transfer_weights
from IPython.display import Image
from keras_segmentation.predict import predict_multiple

model = vgg_unet(n_classes=6, input_height=320, input_width=640)
m = load_model(os.path.dirname(os.getcwd()) + "/datasets/op/vgg_unet")

transfer_weights(m, model, verbose=True)
'''
# TESTING THE MODEL ON TEST IMAGES (#1)
out = model.predict_segmentation(
    inp=os.path.dirname(os.getcwd()) + "/datasets/data/images_prepped_test/b1-09517_Clipped.jpg",
    out_fname=os.path.dirname(os.getcwd()) + "/datasets/op/outout1.png",
    checkpoints_path=os.path.dirname(os.getcwd()) + "/datasets/op/vgg_unet_1")
print('Done')

# TESTING THE MODEL ON TEST IMAGES (#2)
o = model.predict_segmentation(
    inp=os.path.dirname(os.getcwd()) + "/datasets/data/images_prepped_test/b1-09517_Clipped.jpg",
    out_fname=os.path.dirname(os.getcwd()) + "/datasets/op/outout2.png", overlay_img=False, show_legends=True,
    class_names=["Sky", "Forest Floor", "Vegetation", "Grass", "Obstacle", "Tree"],
    checkpoints_path=os.path.dirname(os.getcwd()) + "/datasets/op/vgg_unet_1")
#%%
label_image_semantic

# %% Train on training data
epochs = 1
from keras_segmentation.models.unet import vgg_unet

# If utilizing the GPU, this may be needed to prevent OOM errors.
# import tensorflow as tf
# gpus= tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(gpus[0], True)

n_classes = 16  # iSAID Dataset
# See https://github.com/divamgupta/image-segmentation-keras/blob/master/keras_segmentation/models/unet.py
model = vgg_unet(n_classes=n_classes, input_height=800, input_width=800)

model.train(train_images="./data/iSAID_patches/train/rgb_source_images",
            train_annotations="./data/iSAID_patches/train/sem_label_mask_cat",
            checkpoints_path="./checkpoints/iSAID/vgg_unet",
            epochs=epochs,
            verify_dataset=False,
            batch_size=8,
            steps_per_epoch=3500,
            auto_resume_checkpoint=True)
# %% Predict
start = time.time()

input_image = "./data/iSAID_patches/train/rgb_source_images/P0000_600_1400_2400_3200.png"
out = model.predict_segmentation(inp=input_image, out_fname="out.png")
done = time.time()