def train_net(img, msk, x_val, y_val):
    print("start train net")
    s = 0
    model = Deeplabv3(input_shape=(160, 160, 8),
                      classes=6,
                      backbone='xception')
    model.load_weights('weights/deeplab_x_jk0.6541', by_name=True)
    #model = Deeplabv3(input_shape=(160,160,8), classes=6,backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_m_jk0.6479', by_name=True)
    model.compile(optimizer=Adam(),
                  loss='binary_crossentropy',
                  metrics=[jaccard_coef, jaccard_coef_int, 'accuracy'])
    #model_checkpoint = ModelCheckpoint('weights/deeplab_tmp.h5', monitor='loss', save_best_only=True)
    for i in range(6):
        model.fit(img,
                  msk,
                  batch_size=8,
                  epochs=1,
                  verbose=1,
                  shuffle=True,
                  validation_data=(x_val, y_val))
        #model.fit(img, msk, batch_size=8, epochs=1, verbose=1, shuffle=True, validation_split=0.2)
        score, trs = calc_jacc(model, x_val, y_val)
        print('val jk', score)
        if score > s:
            model.save_weights('weights/deeplab_x_jk%.4f' % score)
            s = score
    return model, trs
Esempio n. 2
0
def generate_image_labels(image,
                          trained_image_width=512,
                          mean_subtraction_value=127.5):
    """# Generates labels using most basic setup.  Supports various image sizes.  Returns image labels in same format
     as original image."""

    # resize to max dimension of images from training dataset
    w, h, _ = image.shape
    ratio = float(trained_image_width) / np.max([w, h])
    resized_image = np.array(
        Image.fromarray(image.astype('uint8')).resize(
            (int(ratio * h), int(ratio * w))))

    # apply normalization for trained dataset images
    resized_image = (resized_image / mean_subtraction_value) - 1.

    # pad array to square image to match training images
    pad_x = int(trained_image_width - resized_image.shape[0])
    pad_y = int(trained_image_width - resized_image.shape[1])
    resized_image = np.pad(resized_image, ((0, pad_x), (0, pad_y), (0, 0)),
                           mode='constant')

    # make prediction
    deeplab_model = Deeplabv3()
    res = deeplab_model.predict(np.expand_dims(resized_image, 0))
    labels = np.argmax(res.squeeze(), -1)

    # remove padding and resize back to original image
    if pad_x > 0:
        labels = labels[:-pad_x]
    if pad_y > 0:
        labels = labels[:, :-pad_y]
    labels = np.array(Image.fromarray(labels.astype('uint8')).resize((h, w)))

    return labels
Esempio n. 3
0
def get_deeplab():
    deeplab_model = Deeplabv3(input_shape=(512, 512, 1),
                              weights=None,
                              classes=1,
                              OS=16)
    deeplab_model.compile(optimizer=Adam(lr=1e-3),
                          loss=dice_coef_loss,
                          metrics=[dice_coef, 'accuracy', precision, recall])
    deeplab_model.summary()
    return deeplab_model
Esempio n. 4
0
    def segment_and_recognize(input):
        trained_image_width = 512
        mean_subtraction_value = 127.5
        image = np.array(Image.open(input))

        # get the YOLOv3 bounding boxes, labels and confidences
        bbox, YOLO_label, conf = cv.detect_common_objects(image)

        # resize to max dimension of images from training dataset
        w, h, _ = image.shape
        ratio = float(trained_image_width) / np.max([w, h])
        resized_image = np.array(
            Image.fromarray(image.astype('uint8')).resize(
                (int(ratio * h), int(ratio * w))))

        # apply normalization for trained dataset images
        resized_image = (resized_image / mean_subtraction_value) - 1.

        # pad array to square image to match training images
        pad_x = int(trained_image_width - resized_image.shape[0])
        pad_y = int(trained_image_width - resized_image.shape[1])
        resized_image = np.pad(resized_image, ((0, pad_x), (0, pad_y), (0, 0)),
                               mode='constant')

        # make prediction for Deeplab
        deeplab_model = Deeplabv3()
        res = deeplab_model.predict(np.expand_dims(resized_image, 0))
        labels = np.argmax(res.squeeze(), -1)

        # remove padding and resize back to original image
        if pad_x > 0:
            labels = labels[:-pad_x]
        if pad_y > 0:
            labels = labels[:, :-pad_y]
        labels = Image.fromarray(labels.astype('uint8')).resize(
            (h, w)).convert()

        # converts PIL greyscale image to numpy array compatible with cvlib
        # and overlays the segment over original input image
        temp = 'temp.png'
        plt.imsave(temp, labels)
        background = Image.open(input).convert('RGBA')
        segments = Image.open(temp)
        overlay = Image.blend(background, segments, 0.5)
        img = np.array(overlay)

        # display the YOLOv3 bounding boxes and labels on the overlay
        img = draw_bbox(img=img, bbox=bbox, labels=YOLO_label, confidence=conf)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        os.remove(temp)

        # show final result
        plt.imshow(img)
        plt.waitforbuttonpress()
        plt.show()
    def create_seg_model(self,
                         net,
                         n=183,
                         backbone='mobilenetv2',
                         load_weights=False,
                         multi_gpu=True):
        '''
        Net is:
        1. original deeplab v3+
        2. original deeplab v3+ and subpixel upsampling layer
        '''
        model = Deeplabv3(weights='pascal_voc',
                          input_shape=self.sz + (3, ),
                          classes=n,
                          backbone=backbone,
                          OS=16)

        base_model = Model(model.input, model.layers[-5].output)
        self.net = net
        self.modelpath = '{}_{}.h5'.format(backbone, net)
        if backbone == 'xception':
            scale = 4
        else:
            scale = 8
        if net == 'original':
            x = Conv2D(n, (1, 1), padding='same',
                       name='conv_upsample')(base_model.output)
            x = Lambda(lambda x: tf.compat.v1.image.resize_bilinear(
                x, size=(self.sz[0], self.sz[1])))(x)
            x = Reshape((self.sz[0] * self.sz[1], n))(x)
            x = Activation('softmax', name='pred_mask')(x)
            model = Model(base_model.input, x, name='deeplabv3p')
        elif net == 'subpixel':
            x = Subpixel(n, 1, scale, padding='same')(base_model.output)
            x = Reshape((self.sz[0] * self.sz[1], n))(x)
            x = Activation('softmax', name='pred_mask')(x)
            model = Model(base_model.input, x, name='deeplabv3p_subpixel')
        # Do ICNR
        for layer in model.layers:
            if type(layer) == Subpixel:
                c, b = layer.get_weights()
                w = icnr_weights(scale=scale, shape=c.shape)
                layer.set_weights([w, b])

        if load_weights:
            model.load_weights('{}_{}.h5'.format(backbone, net))

        # if multi_gpu:
        #     print("Multi GPU Mode activated")
        # from tensorflow.keras.utils import multi_gpu_model
        # model = multi_gpu_model(model, gpus = 2)

        self.model = model
        return model
Esempio n. 6
0
 def __init__(self, lambda_adv, target_size, n_class, gen_model_path=None, disc_model_path=None):
   super(AdverGAN, self).__init__()
   if gen_model_path is not None:
     self.gen = load_model(gen_model_path)
   else:
     self.gen = Deeplabv3(input_shape=(target_size[0], target_size[1], 3), backbone='xception', activation='softmax')
   if disc_model_path is not None:
     self.disc = load_model(disc_model_path)
   else:
     self.disc = get_discriminator_tiny(target_size[0], target_size[1], n_class)
   self.lambda_adv = lambda_adv
   self.t_size = target_size
Esempio n. 7
0
def EmbeddingModel(params):
    side = params.SIDE
    deeplab_model       = Deeplabv3(input_shape = (side, side, 3), backbone = params.BACKBONE)
    inputs              = deeplab_model.input
    middle              = deeplab_model.get_layer(deeplab_model.layers[-3].name).output
    front_class         = softmax_module(middle, params.NUM_FILTER, params.CLASS_NUM)
    back_class          = softmax_module(middle, params.NUM_FILTER, params.CLASS_NUM)
    front_embedding     = embedding_module(middle, params.NUM_FILTER, params.EMBEDDING_DIM)
    back_embedding      = embedding_module(middle, params.NUM_FILTER, params.EMBEDDING_DIM)
    final_results       = Concatenate(axis=-1)([front_class,
                                                back_class,
                                                front_embedding, 
                                                back_embedding])
    embedding_model = Model(inputs = inputs, outputs = final_results)
    return embedding_model
 def __init__(self, lambda_adv, lambda_semi, threshold_semi, target_size, n_class, gen_model_path=None, disc_model_path=None):
   super(AdverGAN, self).__init__()
   if gen_model_path is not None:
     self.gen = load_model(gen_model_path)
   else:
     self.gen = Deeplabv3(input_shape=(target_size[0], target_size[1], 3), backbone='xception', activation='softmax')
   if disc_model_path is not None:
     self.disc = load_model(disc_model_path)
   else:
     # Use get_discriminator for the original implementation
     self.disc = get_discriminator_tiny(target_size[0], target_size[1], n_class)
   self.lambda_adv = lambda_adv
   self.t_size = target_size
   self.lambda_semi = lambda_semi
   self.threshold_semi = threshold_semi
   self.n_class = n_class
Esempio n. 9
0
    def __init__(self, args):
        self.args = args
        input_shape = (args.input_height, args.input_width, args.input_channel)
        self.model = Deeplabv3(input_shape=input_shape,
                               classes=args.classes,
                               backbone='xception',
                               activation=args.activation)
        self.model.compile(optimizer=optimizers.Adam(lr=args.learning_rate),
                           loss='sparse_categorical_crossentropy',
                           metrics=[MeanIoU(num_classes=args.classes)])
        self.image_read()
        if not os.path.isdir(self.args.save_dir): os.mkdir(self.args.save_dir)

        if self.args.image_path and self.args.mask_path:
            self.train()

        if self.args.test_path and self.args.model_path:
            self.test()
Esempio n. 10
0
def start_video():
    # deeplab_model = Deeplabv3(backbone='xception', OS=8)

    deeplab_model = Deeplabv3(OS=8)
    vid = WebcamVideoStream(src=0).start()
    cv2.namedWindow("result", cv2.WINDOW_NORMAL)
    blurValue = (3, 3)
    blur_bg_value = 81

    while True:
        frame = vid.read()
        if frame is None:
            break
        w, h, _ = frame.shape
        ratio = 512. / np.max([w, h])

        resized = cv2.resize(frame, (int(ratio * h), int(ratio * w)))
        resized = resized / 127.5 - 1.
        pad_x = int(512 - resized.shape[0])
        resized2 = np.pad(resized, ((0, pad_x), (0, 0), (0, 0)), mode='constant')
        res = deeplab_model.predict(np.expand_dims(resized2, 0))
        labels = np.argmax(res.squeeze(), -1)

        labels = labels[:-pad_x]
        mask = labels == 0
        mask_person = labels != 0

        resizedFrame = cv2.resize(frame, (labels.shape[1], labels.shape[0]))
        blur = cv2.GaussianBlur(resizedFrame, (blur_bg_value,blur_bg_value), 0)

        blur_person = cv2.GaussianBlur(resizedFrame, blurValue, 0)


        resizedFrame[mask] = blur[mask]
        resizedFrame[mask_person] = blur_person[mask_person]

        cv2.imshow("result", resizedFrame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    vid.stop()
    cv2.destroyAllWindows()
Esempio n. 11
0
def train(X_train, Y_train, X_dev, Y_dev, batch_size=4,
          freeze=False, pretrained=True, model="test"):

    model_dir = os.path.join(DIRNAME, "models/deeplabv3/results/{}".format(model))
    os.makedirs(model_dir, exist_ok=True)

    if pretrained:
        weights = "pascal_voc"
    else:
        weights = None

    deeplab_model = Deeplabv3(weights=weights, input_shape=(X_train.shape[1:]),
                              classes=2)

    if freeze:
        for layer in deeplab_model.layers[:147]:
            layer.trainable = False

    optimizer = SGD(momentum=0.9, clipnorm=1.)

    deeplab_model.compile(loss='binary_crossentropy',
                          optimizer=optimizer,
                          metrics=['accuracy', jacard_coef])

    tensorboard = TensorBoard(log_dir=model_dir,
                              batch_size=batch_size, update_freq="batch")
    saver = ModelCheckpoint("{}/model.hdf5".format(model_dir), verbose=1,
                            save_best_only=True, monitor="val_jacard_coef",
                            mode="max")
    stopper = EarlyStopping(patience=50, verbose=1, monitor="val_acc",
                            mode="max")
    reduce_lr = ReduceLROnPlateau(monitor="loss", factor=0.5,
                                  patience=5, verbose=1, min_lr=0.001)

    deeplab_model.fit(X_train, Y_train, batch_size=batch_size, verbose=2,
                      validation_data=(X_dev, Y_dev),
                      epochs=100,
                      callbacks=[tensorboard, saver, stopper, reduce_lr])
    print("Modelo {} treinado!".format(model))
Esempio n. 12
0
def blur_image(image_path):
    blurValue = (3, 3)
    blur_bg_value = 31

    img = plt.imread(image_path)
    w, h, _ = img.shape
    # deeplab_model = Deeplabv3(OS=8)
    deeplab_model = Deeplabv3(backbone='xception', OS=8)

    ratio = 512. / np.max([w, h])

    resized = cv2.resize(img, (int(ratio * h), int(ratio * w)))
    resized = resized / 127.5 - 1
    pad_x = int(512 - resized.shape[0])
    resized2 = np.pad(resized, ((0, pad_x), (0, 0), (0, 0)), mode='constant')

    res = deeplab_model.predict(np.expand_dims(resized2, 0))
    labels = np.argmax(res.squeeze(), -1)
    labels = labels[:-pad_x ]

    # # print(np.unique(labels))

    mask = labels == 0
    mask_person = labels != 0

    resizedFrame = cv2.resize(img, (labels.shape[1], labels.shape[0]))
    blur_person = cv2.GaussianBlur(resizedFrame, blurValue, 0)

    blur_bg = cv2.medianBlur(resizedFrame,blur_bg_value)

    resizedFrame[mask] = blur_bg[mask]
    resizedFrame[mask_person] = blur_person[mask_person]

    # plt.imshow(resizedFrame)
    # plt.waitforbuttonpress()

    cv2.imshow("result", resizedFrame)
    cv2.waitKey(0)
Esempio n. 13
0
def imagePreprocessing(pathImage):
    """ Return image processada"""
    deeplab_model = Deeplabv3()

    #we resized the entry to be 512px wide.
    input_img = cv2.imread(pathImage)
    w, h, _ = input_img.shape
    ratio = 512. / np.max([w, h])

    resized = cv2.resize(input_img, (int(ratio * h), int(ratio * w)))
    resized = resized / 127.5 - 1.

    pad_x = int(512 - resized.shape[0])
    resized2 = np.pad(resized, ((0, pad_x), (0, 0), (0, 0)), mode='constant')
    res = deeplab_model.predict(np.expand_dims(resized2, 0))
    labels = np.argmax(res.squeeze(), -1)

    labels = labels[:-pad_x - 25]
    mask = labels == 0
    resizedFrame = cv2.resize(input_img, (labels.shape[1], labels.shape[0]))
    resizedFrame[mask] = 0

    cv2.imwrite('./imgs/input.jpg', resizedFrame)
Esempio n. 14
0
# In[13]:


DiscSeg_model = DiscModel.DeepModel(size_set=DiscSeg_size)
DiscSeg_model.load_weights('Model_DiscSeg_ORIGA_pretrain_Gen_ROI.h5')


# In[14]:


imagePaths = sorted(list(paths.list_images(dataset)))
# load the trained convolutional neural network
print("[INFO] loading network...")

model_1=Deeplabv3(input_shape=(image_width,image_high,3), classes=2)
model_1.load_weights(modelName[0])




# In[15]:


header = ['FileName','Glaucoma Risk']
with open('result_DeepLabv3_21_train_ROI600.csv', 'wb') as csvfile:
    # loop over the input images
    #writer = csv.DictWriter(csvfile, fieldnames =header)
    #writer.writeheader()
    for imagePath in imagePaths:
Esempio n. 15
0
from model import Deeplabv3
import numpy as np

channels = 2
deeplab_model = Deeplabv3(classes=channels)
# deeplab_model.load_weights("model_irobot_synthetic.h5")
res = deeplab_model.predict(np.empty((1,512,512,3)))

print (res.shape)

print ('worked!')
Esempio n. 16
0
    # adaptively change the learning rate
    reduce_on_plateau = ReduceLROnPlateau(monitor="val_acc",
                                          mode="max",
                                          factor=0.1,
                                          patience=20,
                                          verbose=1)

    tbCallBack = TensorBoard(log_dir='./logs/' + basestr,
                             histogram_freq=0,
                             write_graph=True,
                             write_images=True)

    callbacks_list = [checkpoint, reduce_on_plateau, tbCallBack]

    model = Deeplabv3(weights=None,
                      input_shape=(im_reshape_height, im_reshape_width,
                                   im_chan),
                      classes=1)
    accum_factor = 1
    # opt = AccumOptimizer(Adam(), accum_factor)  # accumulation gradient(soft batch)
    model.compile(optimizer='Adam',
                  loss='binary_crossentropy',
                  metrics=[dice_coef, 'acc', 'mse'])
    model.summary()
    model.fit(X_train,
              Y_train,
              validation_split=.2,
              batch_size=8,
              epochs=30 * accum_factor,
              callbacks=callbacks_list)

    #################################
Esempio n. 17
0
from __future__ import print_function

import os
import numpy as np
from tqdm import tqdm
from keras.models import Model
from model import Deeplabv3


WEIGHTS_DIR = 'weights'
MODEL_DIR = 'models'
OUTPUT_WEIGHT_FILENAME = 'deeplabv3_weights_tf_dim_ordering_tf_kernels.h5'


print('Instantiating an empty InceptionResNetV2 model...')
model = Deeplabv3(input_shape=(512, 512, 3),num_classes = 21)

WEIGHTS_DIR = 'weights/'
print('Loading weights from', WEIGHTS_DIR)

for layer in tqdm(model.layers):
    if layer.weights:
        weights = []
        for w in layer.weights:
            weight_name = os.path.basename(w.name).replace(':0', '')
            weight_file = layer.name + '_' + weight_name + '.npy'
            weight_arr = np.load(os.path.join(WEIGHTS_DIR, weight_file))
            weights.append(weight_arr)
        layer.set_weights(weights)

Esempio n. 18
0
#%%
from model import Deeplabv3

deeplab_model = Deeplabv3(input_shape=(512, 512, 3), classes=4)

deeplab_model.summary()

# %%
int_masks = pickle.load( open( "PASCAL_VOC2012_int_mask.p", "rb" ) )
#pickle.dump( int_masks, open( "PASCAL_VOC2012_int_mask.p", "wb" ) )


"""
from keras.utils.np_utils import to_categorical
one_hot_masks = to_categorical(int_masks, num_classes=22)
del int_masks
"""

x_train, x_valid, y_train, y_valid = train_test_split(
   np.array(train_images).reshape(-1, 101, 101, 3),
   np.array(int_masks).reshape(-1, 101, 101, 1),
   test_size=0.2, random_state=1337)

model =  Deeplabv3(input_shape=(101,101,3),backbone="mobilenetv2", classes=22)
#model =  Deeplabv3(input_shape=(101,101,3),backbone="xception", classes=22)
model.compile(loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy","sparse_categorical_crossentropy"])


# In[33]:


print(model.summary())



x_train = np.append(x_train, [np.fliplr(x) for x in x_train], axis=0)
y_train = np.append(y_train, [np.fliplr(x) for x in y_train], axis=0)

Esempio n. 20
0
def main():
    # GPU
    limit_keras_gpu_usage(settings.gpu_limit)
    # configurations
    global exp_name
    exp_name = datetime.strftime(datetime.now(), '%y%m%d-%H%M%S')
    opt = {
        'width': settings.W,
        'height': settings.H,
        'n_classes': settings.n_classes,
        'batch_size': settings.batch_size,
        'epochs': settings.epochs,
        'workers': settings.workers,
        'wandb': settings.use_wandb,
        'monitor': settings.monitor,
        'mode': settings.mode
    }

    if settings.use_wandb:
        wandb.init(
            project="seg_keras",
            name=exp_name,
            config=opt,  #TODO: opt
            sync_tensorboard=True)

    # Setup model directory
    if not os.path.exists("trainings"):
        os.makedirs("trainings")
    if not os.path.exists(os.path.join('.', 'trainings', exp_name)):
        os.makedirs(os.path.join('.', 'trainings', exp_name))

    config_file_dst = os.path.join('.', 'trainings', exp_name,
                                   os.path.basename(settings.CONFIG_PATH))
    with open(config_file_dst, 'w') as f:
        yaml.dump(opt, f, default_flow_style=False, default_style='')

    if settings.use_wandb:
        wandb.save(config_file_dst)

    # Build data generators
    train_gen = BatchGenerator(settings.DATA_PATH,
                               settings.batch_size,
                               mode='train',
                               n_classes=settings.n_classes)
    valid_gen = BatchGenerator(settings.DATA_PATH,
                               settings.batch_size,
                               mode='valid',
                               n_classes=settings.n_classes)

    # Initialize a model
    cce = categorical_crossentropy
    metrics = [MIOU(settings.n_classes), categorical_accuracy]

    model_path = os.path.join('.', 'trainings', exp_name, exp_name + '.h5')
    model = Deeplabv3(weights=None,
                      input_shape=(settings.H, settings.W, 3),
                      classes=settings.n_classes,
                      activation='softmax',
                      backbone='mobilenetv2')
    model.summary()
    model.compile(optimizer=Adam(lr=settings.lr, epsilon=1e-8, decay=1e-6),
                  sample_weight_mode="temporal",
                  loss=cce,
                  metrics=metrics)
    #model.summary()

    # training
    model.fit_generator(train_gen,
                        steps_per_epoch=len(train_gen),
                        epochs=settings.epochs,
                        verbose=1,
                        callbacks=get_callbacks(model_path),
                        validation_data=valid_gen,
                        validation_steps=len(valid_gen),
                        max_queue_size=10,
                        workers=settings.workers,
                        use_multiprocessing=False)

    # save trflite model
    new_path = os.path.join('.', 'trainings', exp_name, exp_name + '.tflite')
    convert_to_tflite(model_path, new_path)
    if settings.use_wandb:
        wandb.save(os.path.join('trainings', exp_name))
import os
import numpy as np
from tqdm import tqdm
from model import Deeplabv3
import coremltools
from coremltools.proto import NeuralNetwork_pb2
from matplotlib import pyplot as plt
import cv2  # used for resize. if you dont have it, use anything else
from PIL import Image

MODEL_DIR = 'models'

backbone = 'mobilenetv2'
print('Instantiating an empty Deeplabv3+ model...')
keras_model = Deeplabv3(input_shape=(512, 512, 3),
                        classes=21,
                        backbone=backbone,
                        weights=None)

WEIGHTS_DIR = 'weights/' + backbone
print('Loading weights from', WEIGHTS_DIR)
for layer in tqdm(keras_model.layers):
    if layer.weights:
        weights = []
        for w in layer.weights:
            weight_name = os.path.basename(w.name).replace(':0', '')
            weight_file = layer.name + '_' + weight_name + '.npy'
            weight_arr = np.load(os.path.join(WEIGHTS_DIR, weight_file))
            weights.append(weight_arr)
        layer.set_weights(weights)

# CoreML model needs to normalize the input (by converting image bits from (-1,1)), which is
from __future__ import print_function

import os
import numpy as np
from tqdm import tqdm
from model import Deeplabv3

MODEL_DIR = 'models'

print('Instantiating an empty Deeplabv3+ model...')
model = Deeplabv3(input_shape=(512, 512, 3),
                  classes=151,
                  backbone='xception',
                  weights=None)

WEIGHTS_DIR = 'weights/' + 'xception_ade'
print('Loading weights from', WEIGHTS_DIR)
for layer in tqdm(model.layers):
    if layer.weights:
        weights = []
        for w in layer.weights:
            weight_name = os.path.basename(w.name).replace(':0', '')
            weight_file = layer.name + '_' + weight_name + '.npy'
            weight_arr = np.load(os.path.join(WEIGHTS_DIR, weight_file))
            weights.append(weight_arr)
        layer.set_weights(weights)

print('Saving model weights...')
OUTPUT_WEIGHT_FILENAME = 'deeplabv3_' + \
    'xception_ade' + '_tf_dim_ordering_tf_kernels.h5'
if not os.path.exists(MODEL_DIR):
os.makedirs(folder)
current_path = os.path.abspath(os.getcwd())
video_file = input('Enter file path/name: ')
file = str(dt.datetime.now()).replace(':', '.')[:19]
save_file = os.path.join(current_path, folder, f'Cleaned_{file}.avi')

os.chdir('keras-deeplab-v3-plus')

from matplotlib import pyplot as plt
import cv2
import numpy as np
from model import Deeplabv3
from imutils.video import WebcamVideoStream

deeplab_model = Deeplabv3()

vid = cv2.VideoCapture(video_file)
try:
    blur_value = int(input('Enter odd blur integer value for kernel: '))
    blurValue = (blur_value, blur_value)
except:
    print(
        'The provided blur value is not acceptable. Using default value of 35x35.'
    )
    blurValue = (35, 35)

width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
codec = cv2.VideoWriter_fourcc(*'DIVX')
writer = cv2.VideoWriter(save_file, codec, 15, (width, height))
Esempio n. 24
0
import numpy as np
from matplotlib import pyplot as plt

from model import Deeplabv3

if __name__ == '__main__':
    deeplab_model = Deeplabv3(weights='pascal_voc', backbone='xception', OS=8)
    img = plt.imread("./test/test-0.PNG")
    img = img[..., :3]
    # w, h, _ = img.shape
    # ratio = 512. / np.max([w, h])
    # resized = cv2.resize(img, (int(ratio * h), int(ratio * w)))
    # resized = resized / 127.5 - 1.
    pad_x = int(512 - img.shape[0])
    pad_y = int(512 - img.shape[1])
    resized2 = np.pad(img, ((0, pad_x), (0, pad_y), (0, 0)), mode='constant')
    score = deeplab_model.predict(np.expand_dims(resized2, 0))
    preds = np.argmax(score.squeeze(), -1)[:-pad_x, :-pad_y]
    plt.imshow(img)
    plt.show()
    print(np.unique(preds))
    mask = np.zeros_like(img)
    mask[preds == 15] = 1
    alpha = 0.5
    img_ = (1 - mask) * img + mask * (np.array([1, 0, 0]) * 0.5 + 0.5 * img)
    plt.imshow(img_)
    plt.show()
        map_func=lambda x, y: load_data(
            image_path=x, mask_path=y, augment=AUGMENT),
        batch_size=BATCH_SIZE,
        num_parallel_calls=tf.data.experimental.AUTOTUNE,
        drop_remainder=True))
# val_dataset = val_dataset.repeat()
val_dataset = val_dataset.prefetch(tf.data.experimental.AUTOTUNE)

print(val_dataset)

# pdb.set_trace()

from model import Deeplabv3

model = Deeplabv3(input_shape=(IMG_SIZE, IMG_SIZE, 3),
                  classes=2,
                  backbone='xception',
                  activation='softmax')
# model.load_weights("/home/jli/examode/camelyon-master/CAMELYON16_PREPROCESSING/level_1_patch/tmp_cam16/Deeplabv3_level2_768_10per_step25000.h5", by_name= True)

if HOROVOD:
    # Horovod: (optional) compression algorithm.
    compression = hvd.Compression.fp16 if fp16_ALLREDUCE else hvd.Compression.none

    opt = tf.optimizers.Adam(0.0001 * hvd.size(), epsilon=1e-1)
# Horovod: add Horovod DistributedOptimizer.

print("Compiling model...")

model.build(input_shape=(IMG_SIZE, IMG_SIZE, 3))
model.summary()
    #trs = [0.4,0.4,0.4,0.4,0.5,0.3]
    
    for i in range(N_Cls):
        prd[:,:,i] = prd[:,:,i] > trs[i]
    return prd

def check_predict(id='6120_2_3'):
    model = Deeplabv3(backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_x_jk0.5970')
    msk = predict_id(id, model)
    plt.show()

if __name__ == '__main__':
    data=np.load('test.npy')
    data2=np.load('612022.npy')   
    model = Deeplabv3(backbone='xception')
    model.load_weights('weights/deeplab_x_jk0.6617', by_name=True)
    #model = Deeplabv3(backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_m_jk0.6350')
    msk1 = predict_id(data, model)
    msk2 = predict_id(data2, model)
    tiff.imshow(msk2[:,:,1])
    model2 = get_unet()
    model2.load_weights('weights/unet_jk0.6198')
    msk2 = predict_id(data2, model2)
    for i in range(N_Cls):
        plt.imshow(msk[:,:,1])
        #plt.imsave('picturem/_{}.tif'.format(i),msk[:,:,i])
    #model.summary()
    #plot_model(model,to_file='deeplab_x.png',show_shapes=True)
Esempio n. 27
0
import cv2
from model import Deeplabv3
import numpy as np

np.set_printoptions(precision=3)
img_size = 512  #do Model pretrain tren dataset size 512
#--------------------------------------

image = cv2.imread("1.jpeg", 1)
# np.set_printoptions(threshold=np.inf)  #option nhin duoc het cac sai so
weights = 'pascal_voc'  #{'pascal_voc','cityscapes'}
input_tensor = None
input_shape = (img_size, img_size, 3)
classes = 21
backbone = 'mobilenetv2'  #{'xception','mobilenetv2'}
activation = None  #{'softmax', 'sigmoid',None}
OS = 16  #{8,16}
#---------------------------------------

resized_image = cv2.resize(image, (img_size, img_size))
# make prediction
deeplab_model = Deeplabv3(weights, input_tensor, input_shape, classes,
                          backbone, activation, OS)
res = deeplab_model.predict(np.expand_dims(resized_image, 0))
res = res.squeeze()
print(res)

# labels = np.argmax(res, -1)
def check_predict(id='6120_2_3'):
    model = Deeplabv3(backbone='mobilenetv2')
    #model.load_weights('weights/deeplab_x_jk0.5970')
    msk = predict_id(id, model)
    plt.show()
'''

import numpy as np

from data_gen import data_generator, x_test, y_test
from model import Deeplabv3

import matplotlib.pyplot as plt

batch_size = 10
gen = data_generator(x_test, y_test, batch_size=batch_size)
x, y, _ = next(gen)

print('load model...')
basemodel = Deeplabv3(input_shape=(28, 28, 3),
                      classes=11,
                      backbone='mobilenetv2')
basemodel.load_weights('checkpoints/DeepLabV3+-Weights-120.hdf5', by_name=True)
print('load model done.')

logits = basemodel.predict(x)
logits = np.argmax(logits, axis=-1)

for i in range(batch_size):
    img = x[i]
    logit = logits[i]
    label = np.max(logit)

    # 展示图片和预测的mask以及分类label
    plt.subplot(121)
    plt.title('Image', fontsize='large', fontweight='bold')
Esempio n. 30
0
import tensorflow as tf
from keras.layers import Input

print(tf.__version__) #必须是2.0.0才行.




from matplotlib import pyplot as plt
import cv2 # used for resize. if you dont have it, use anything else
import numpy as np
from model import Deeplabv3

new_deeplab_model = Deeplabv3(weights='cityscapes',backbone='xception',input_shape=(512,512,3), OS=16,classes=19)  #city数据集对应分类是19

img = plt.imread("imgs/image1.jpg")
w, h, _ = img.shape
ratio = 512. / np.max([w,h])
resized = cv2.resize(img,(int(ratio*h),int(ratio*w)))#归一化处理!
resized = resized / 127.5 - 1. #因为要保持原来的长宽比例,所以短边会需要padding操作才行.
pad_x = int(512 - resized.shape[0])
resized2 = np.pad(resized,((0,pad_x),(0,0),(0,0)), mode='constant') #表示填充到尾部
res = new_deeplab_model.predict(np.expand_dims(resized2,0))
#公式:np.argmax(axis=t) 那么最后的结果中就不包含这个t轴.公式要记住
labels = np.argmax(res.squeeze(),-1) #得到512*512 每一个像素属于哪个物体的编号.直接输出就保证了,图片同一个分类对应的颜色是相同的.

plt.imshow(labels[:-pad_x]) #最后填充的不需要输出,裁剪出去.
plt.savefig("output.png")