def main():
    with CustomObjectScope(custom_objects()):
        model = load_model(SAVED_MODEL)

    img_in = misc.imread("data/border.jpg")

    i_width = 224
    i_height = 224

    img_resize = skimage.transform.resize(img_in, (i_width, i_height),
                                          preserve_range=True)
    img = np.copy(img_resize).astype('uint8')

    img_reshape = img.reshape(1, size, size, 3).astype(float)

    t1 = time.time()
    pred = model.predict(standardize(img_reshape)).reshape(size, size)
    elapsed = time.time() - t1
    print('elapsed1: ', elapsed)

    plt.subplot(2, 2, 1)
    plt.imshow(img)

    plt.subplot(2, 2, 2)
    plt.imshow(pred)

    plt.show()
Пример #2
0
def main():
    """
    Generate CoreML model for benchmark by using non-trained model.
    It's useful if you just want to measure the inference speed
    of your model
    """
    hack_coremltools()

    sizes = [224, 192, 160, 128]
    alphas = [1., .75, .50, .25]
    name_fmt = 'mobile_unet_{0:}_{1:03.0f}_{2:03.0f}'

    experiments = [{
        'name':
        name_fmt.format(s, a * 100, a * 100),
        'model':
        MobileUNet(input_shape=(s, s, 3),
                   input_tensor=Input(shape=(s, s, 3)),
                   alpha=a,
                   alpha_up=a)
    } for s, a in product(sizes, alphas)]

    for e in experiments:
        model = e['model']
        name = e['name']

        model.summary()

        with CustomObjectScope(custom_objects()):
            coreml_model = coremltools.converters.keras.convert(
                model, input_names='data')
        coreml_model.save('artifacts/{}.mlmodel'.format(name))
def main(img_dir, only_weight, img_size):
    if only_weight != 0:
        with CustomObjectScope(custom_objects()):
            model = load_model(SAVED_MODEL)
            print("loading model file: ", SAVED_MODEL, img_size)

    else:
        weight_file = SAVED_WEIGHT
        model = MobileUNet(input_shape=(img_size, img_size, 3),
                           alpha=1,
                           alpha_up=0.25)

        model.load_weights(weight_file, by_name=True)
        print("loading weight file: ", weight_file, img_size)

    model.summary()

    img_files = glob(img_dir + '/*.jpg')
    img_files = sorted(img_files, key=os.path.getmtime)

    for img_file in reversed(img_files):
        img = imread(img_file)
        img = imresize(img, (img_size, img_size))

        mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

        mask = imread(mask_file)

        mask = imresize(mask, (img_size, img_size), interp='nearest')
        mask1 = mask[:, :, 0]

        batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
        pred1 = model.predict(standardize(batched1)).reshape(
            img_size, img_size)

        mask1 = mask1.astype(float) / 255

        dice = np_dice_coef(mask1, pred1)

        print('dice1: ', dice)
        print('sum ', np.sum(mask1))
        print('shap ', mask1.shape)

        pred1 = beautify(pred1)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            #plt.subplot(2, 2, 2)
            #plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.subplot(2, 2, 4)
            plt.imshow(mask1)
            plt.show()
Пример #4
0
def main(input_model_path):
    """
    Convert hdf5 file to CoreML model.
    :param input_model_path:
    :return:
    """
    out_path = re.sub(r"h5$", 'mlmodel', input_model_path)

    hack_coremltools()

    with CustomObjectScope(custom_objects()):
        model = load_model(input_model_path)
        coreml_model = coremltools.converters.keras.convert(model,
                                                            input_names='data')
    coreml_model.save(out_path)

    print('CoreML model is created at %s' % out_path)
def main():
    with CustomObjectScope(custom_objects()):
        model1 = load_model(SAVED_MODEL1)
        model2 = load_model(SAVED_MODEL2)

    images = np.load('data/images-224.npy')
    masks = np.load('data/masks-224.npy')
    # only hair
    masks = masks[:, :, :, 0].reshape(-1, size, size)

    _, images, _, masks = train_test_split(images,
                                           masks,
                                           test_size=0.2,
                                           random_state=seed)

    for img, mask in zip(images, masks):
        batched1 = img.reshape(1, size, size, 3).astype(float)
        batched2 = img.reshape(1, size, size, 3).astype(float)

        t1 = time.time()
        pred1 = model1.predict(standardize(batched1)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed1: ', elapsed)

        t1 = time.time()
        pred2 = model2.predict(standardize(batched2)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed2: ', elapsed)

        dice = np_dice_coef(mask.astype(float) / 255, pred1)
        print('dice1: ', dice)

        dice = np_dice_coef(mask.astype(float) / 255, pred2)
        print('dice2: ', dice)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            plt.subplot(2, 2, 2)
            plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.subplot(2, 2, 4)
            plt.imshow(pred2)
            plt.show()
def main(img_file):
    with CustomObjectScope(custom_objects()):
        model1 = load_model(SAVED_MODEL1)

    img = imread(img_file)
    img = imresize(img, (img_size, img_size))

    mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

    mask = imread(mask_file)
    mask = imresize(mask, (img_size, img_size))

    batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
    pred1 = model1.predict(standardize(batched1)).reshape(img_size, img_size)

    if True:
        plt.subplot(2, 2, 1)
        plt.imshow(img)
        plt.subplot(2, 2, 2)
        plt.imshow(pred1)
        plt.subplot(2, 2, 3)
        plt.imshow(mask)
        plt.show()
def main(input_model_path, output_dir, output_fn):
    """
    Convert hdf5 file to protocol buffer file to be used with TensorFlow.
    :param input_model_path:
    :param output_dir:
    :param output_fn:
    :return:
    """
    K.set_learning_phase(0)

    model = load_model(input_model_path, custom_objects=custom_objects())

    pred_node_names = ['output_%s' % n for n in range(num_output)]
    print('output nodes names are: ', pred_node_names)

    for idx, name in enumerate(pred_node_names):
        tf.identity(model.output[idx], name=name)

    sess = K.get_session()
    constant_graph = convert_variables_to_constants(sess,
                                                    sess.graph.as_graph_def(),
                                                    pred_node_names)
    graph_io.write_graph(constant_graph, output_dir, output_fn, as_text=False)
def main(input_model_path):
    """
    Convert hdf5 file to CoreML model.
    :param input_model_path:
    :return:
    """
    out_path = re.sub(r"h5$", 'mlmodel', input_model_path)

    hack_coremltools()

    with CustomObjectScope(custom_objects()):
        model = load_model(input_model_path)
        # https://github.com/akirasosa/mobile-semantic-segmentation/issues/6#issuecomment-344508193
        coreml_model = coremltools.converters.keras.convert(
            model,
            input_names='image',
            image_input_names='image',
            red_bias=29.24429131 / 64.881128947,
            green_bias=29.24429131 / 64.881128947,
            blue_bias=29.24429131 / 64.881128947,
            image_scale=1. / 64.881128947)
    coreml_model.save(out_path)

    print('CoreML model is created at %s' % out_path)
def train(epochs, batch_size, img_size):
    fresh_training = True
    alpha_value = 0.9

    img_file = 'data/id_pack/images-{}.npy'.format(img_size)
    mask_file = 'data/id_pack/masks-{}.npy'.format(img_size)
    trained_model_path = 'artifacts/model-{}.h5'.format(img_size)

    print("training on image file:")
    print(img_file)
    print(mask_file)

    # Load the data
    train_gen, validation_gen, img_shape, train_len, val_len = load_data(
        img_file, mask_file)

    img_height = img_shape[0]
    img_width = img_shape[1]

    print(img_height, img_width)

    lr_base = 0.01 * (float(batch_size) / 16)

    if fresh_training:
        model = MobileUNet(input_shape=(img_height, img_width, 3),
                           alpha=alpha_value,
                           alpha_up=0.25)
    else:
        with CustomObjectScope(custom_objects()):
            model = load_model(SAVED_MODEL)

    model.summary()
    model.compile(
        #        optimizer=optimizers.SGD(lr=0.0001, momentum=0.9),
        optimizer=optimizers.Adam(lr=0.0001),
        # optimizer=Adam(lr=0.001),
        # optimizer=optimizers.RMSprop(),
        #loss=dice_coef_loss,
        loss='mean_absolute_error',
        #loss = loss_gu,
        metrics=[
            dice_coef, recall, precision, dice_coef_loss, 'mean_absolute_error'
        ],
    )

    # callbacks
    scheduler = callbacks.LearningRateScheduler(
        create_lr_schedule(epochs, lr_base=lr_base, mode='progressive_drops'))
    tensorboard = callbacks.TensorBoard(log_dir='./logs')
    csv_logger = callbacks.CSVLogger('logs/training.csv')
    checkpoint = callbacks.ModelCheckpoint(filepath=checkpoint_path,
                                           save_weights_only=True,
                                           save_best_only=True)
    '''
        looks like we do have some legacy support issue
        the steps_per_epoch and validation_steps is actually the number of sample
        
        legacy_generator_methods_support = generate_legacy_method_interface(
            allowed_positional_args=['generator', 'steps_per_epoch', 'epochs'],
            conversions=[('samples_per_epoch', 'steps_per_epoch'),
                        ('val_samples', 'steps'),
                        ('nb_epoch', 'epochs'),
        ('nb_val_samples', 'validation_steps'),
    '''
    nb_train_samples = train_len
    nb_validation_samples = val_len

    print("training sample is ", nb_train_samples)

    if fresh_training:
        cb_list = [scheduler, tensorboard, checkpoint, csv_logger]
    else:
        cb_list = [tensorboard, checkpoint, csv_logger]

    model.fit_generator(
        generator=train_gen(),
        steps_per_epoch=nb_train_samples // batch_size,
        epochs=epochs,
        validation_data=validation_gen(),
        validation_steps=nb_validation_samples // batch_size,
        callbacks=cb_list,
    )

    model.save(trained_model_path)
SAVED_MODEL1 = 'artifacts/model.h5'

size = 224

width = size
height = size

x = 40
y = 0

# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream().start()
time.sleep(2.0)

with CustomObjectScope(custom_objects()):
    model1 = load_model(SAVED_MODEL1)

# loop over the frames from the video stream
while True:
    frame = vs.read()
    resized = imutils.resize(frame, height=size)

    crop_frame = resized[y:y + height, x:x + width]
    reshaped = crop_frame.reshape(1, size, size, 3).astype(float)

    pred1 = model1.predict(standardize(reshaped)).reshape(size, size)

    # show the frame
    cv2.imshow("Frame", crop_frame)
    cv2.imshow("Segment", pred1)