def main():
    with CustomObjectScope(custom_objects()):
        model = load_model(SAVED_MODEL)

    img_in = misc.imread("data/border.jpg")

    i_width = 224
    i_height = 224

    img_resize = skimage.transform.resize(img_in, (i_width, i_height),
                                          preserve_range=True)
    img = np.copy(img_resize).astype('uint8')

    img_reshape = img.reshape(1, size, size, 3).astype(float)

    t1 = time.time()
    pred = model.predict(standardize(img_reshape)).reshape(size, size)
    elapsed = time.time() - t1
    print('elapsed1: ', elapsed)

    plt.subplot(2, 2, 1)
    plt.imshow(img)

    plt.subplot(2, 2, 2)
    plt.imshow(pred)

    plt.show()
Ejemplo n.º 2
0
def main(img_file, weight_file):
    model = MobileUNet(input_shape=(128, 128, 3), alpha=1, alpha_up=0.25)

    model.summary()

    model.load_weights(weight_file, by_name=True)

    img = imread(img_file)
    img = imresize(img, (img_size, img_size))

    mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

    mask = imread(mask_file)
    mask = imresize(mask, (img_size, img_size))

    batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
    pred1 = model.predict(standardize(batched1)).reshape(img_size, img_size)

    if True:
        plt.subplot(2, 2, 1)
        plt.imshow(img)
        plt.subplot(2, 2, 2)
        plt.imshow(pred1)
        plt.subplot(2, 2, 3)
        plt.imshow(mask)
        plt.show()
Ejemplo n.º 3
0
def test(model, train, test):
    m = tf.keras.models.load_model(model)
    x_test, y_test = data.load(test)

    # normalize test set relative to the train set
    x_train, y_train = data.load(train)
    mean = np.mean(x_train, axis=(0,1,2))
    std = np.std(x_train, axis=(0,1,2))
    x_test = data.standardize(x_test, mean, std)

    y_pred = m.predict(
        x=x_test,
        batch_size=32,
        verbose=1,
    )

    # Keep raw probability scores
    y_pred.reshape(-1)
    y_scores = y_pred

    # Threshold default predictions at 0.5
    threshold = 0.5
    y_pred[y_pred <= threshold] = 0. # not melanoma
    y_pred[y_pred > threshold] = 1. # melanoma

    np.savez_compressed(os.path.join(os.path.dirname(model), 'predictions'), y_true=y_test, y_pred=y_pred, y_scores=y_scores)
def main(pb_file, img_file):
    """
    Predict and visualize by TensorFlow.
    :param pb_file:
    :param img_file:
    :return:
    """
    with tf.gfile.GFile(pb_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def, name=prefix)

    for op in graph.get_operations():
        print(op.name)

    x = graph.get_tensor_by_name('%s/input_1:0' % prefix)
    y = graph.get_tensor_by_name('%s/output_0:0' % prefix)

    images = np.load(img_file).astype(float)
    img_h = images.shape[1]
    img_w = images.shape[2]

    with tf.Session(graph=graph) as sess:
        for img in images:
            batched = img.reshape(-1, img_h, img_w, 3)
            normalized = standardize(batched)

            pred = sess.run(y, feed_dict={
                x: normalized
            })
            plt.imshow(pred.reshape(img_h, img_w))
            plt.show()
def main():
    with CustomObjectScope(custom_objects()):
        model1 = load_model(SAVED_MODEL1)
        model2 = load_model(SAVED_MODEL2)

    images = np.load('data/images-224.npy')
    masks = np.load('data/masks-224.npy')
    # only hair
    masks = masks[:, :, :, 0].reshape(-1, size, size)

    _, images, _, masks = train_test_split(images,
                                           masks,
                                           test_size=0.2,
                                           random_state=seed)

    for img, mask in zip(images, masks):
        batched1 = img.reshape(1, size, size, 3).astype(float)
        batched2 = img.reshape(1, size, size, 3).astype(float)

        t1 = time.time()
        pred1 = model1.predict(standardize(batched1)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed1: ', elapsed)

        t1 = time.time()
        pred2 = model2.predict(standardize(batched2)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed2: ', elapsed)

        dice = np_dice_coef(mask.astype(float) / 255, pred1)
        print('dice1: ', dice)

        dice = np_dice_coef(mask.astype(float) / 255, pred2)
        print('dice2: ', dice)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            plt.subplot(2, 2, 2)
            plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.subplot(2, 2, 4)
            plt.imshow(pred2)
            plt.show()
Ejemplo n.º 6
0
def save_dataset(dir_out, x, y, x_test, y_test, standardize=True):
    if not os.path.exists(dir_out):
        os.makedirs(dir_out)
    if standardize:
        x, y, x_test, y_test = data.standardize(x, y, x_test, y_test)
    np.savetxt(os.path.join(dir_out, 'x.csv'), x)
    np.savetxt(os.path.join(dir_out, 'y.csv'), y)
    np.savetxt(os.path.join(dir_out, 'x_test.csv'), x_test)
    np.savetxt(os.path.join(dir_out, 'y_test.csv'), y_test)
def main(img_dir, only_weight, img_size):
    if only_weight != 0:
        with CustomObjectScope(custom_objects()):
            model = load_model(SAVED_MODEL)
            print("loading model file: ", SAVED_MODEL, img_size)

    else:
        weight_file = SAVED_WEIGHT
        model = MobileUNet(input_shape=(img_size, img_size, 3),
                           alpha=1,
                           alpha_up=0.25)

        model.load_weights(weight_file, by_name=True)
        print("loading weight file: ", weight_file, img_size)

    model.summary()

    img_files = glob(img_dir + '/*.jpg')
    img_files = sorted(img_files, key=os.path.getmtime)

    for img_file in reversed(img_files):
        img = imread(img_file)
        img = imresize(img, (img_size, img_size))

        mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

        mask = imread(mask_file)

        mask = imresize(mask, (img_size, img_size), interp='nearest')
        mask1 = mask[:, :, 0]

        batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
        pred1 = model.predict(standardize(batched1)).reshape(
            img_size, img_size)

        mask1 = mask1.astype(float) / 255

        dice = np_dice_coef(mask1, pred1)

        print('dice1: ', dice)
        print('sum ', np.sum(mask1))
        print('shap ', mask1.shape)

        pred1 = beautify(pred1)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            #plt.subplot(2, 2, 2)
            #plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.subplot(2, 2, 4)
            plt.imshow(mask1)
            plt.show()
def main(img_file):
    with CustomObjectScope(custom_objects()):
        model1 = load_model(SAVED_MODEL1)

    img = imread(img_file)
    img = imresize(img, (img_size, img_size))

    mask_file = re.sub('img2.jpg$', 'msk1.png', img_file)

    mask = imread(mask_file)
    mask = imresize(mask, (img_size, img_size))

    batched1 = img.reshape(1, img_size, img_size, 3).astype(float)
    pred1 = model1.predict(standardize(batched1)).reshape(img_size, img_size)

    if True:
        plt.subplot(2, 2, 1)
        plt.imshow(img)
        plt.subplot(2, 2, 2)
        plt.imshow(pred1)
        plt.subplot(2, 2, 3)
        plt.imshow(mask)
        plt.show()
Ejemplo n.º 9
0
def main(weight_file):
    model = MobileUNet(input_shape=(128, 128, 3), alpha=1, alpha_up=0.25)

    model.summary()

    model.load_weights(weight_file, by_name=True)

    images = np.load('data/id_pack/images-128.npy')
    masks = np.load('data/id_pack/masks-128.npy')
    # only hair
    masks = masks[:, :, :, 0].reshape(-1, size, size)

    _, images, _, masks = train_test_split(images,
                                           masks,
                                           test_size=0.2,
                                           random_state=seed)

    for img, mask in zip(images, masks):
        batched1 = img.reshape(1, size, size, 3).astype(float)
        batched2 = img.reshape(1, size, size, 3).astype(float)

        t1 = time.time()
        pred1 = model.predict(standardize(batched1)).reshape(size, size)
        elapsed = time.time() - t1
        print('elapsed1: ', elapsed)

        dice = np_dice_coef(mask.astype(float) / 255, pred1)
        print('dice1: ', dice)

        if True:
            plt.subplot(2, 2, 1)
            plt.imshow(img)
            plt.subplot(2, 2, 2)
            plt.imshow(mask)
            plt.subplot(2, 2, 3)
            plt.imshow(pred1)
            plt.show()
Ejemplo n.º 10
0
    x, y = utils_load_dataset.load_mimic_patient(
        patient_id=int(args.dataset_option),
        csv_filename=os.path.join(dir_data, 'hr.csv'))
    x, y, x_test, y_test = util.format_data_torch(
        *data.train_val_split(x, y, frac_train=0.75, seed=args.seed))
else:
    print('dataset not found')

if args.validation_mode:
    # split training set into smaller training set and validation set, use as training set and test set going forward
    x, y, x_test, y_test = data.train_val_split(x,
                                                y,
                                                frac_train=0.8,
                                                seed=args.seed + 50)

x, y, x_test, y_test = data.standardize(x, y, x_test, y_test)

# center around zero
x = x - 0.5
x_test = x_test - 0.5

## check prior
x_plot = torch.linspace(-0.5, 0.5, 100).reshape(-1, 1)

y_samp_prior = net.sample_functions_prior(x_plot, n_samp=1000)
fig, ax = util.plot_functions(x_plot,
                              y_samp_prior,
                              x=x,
                              y=y,
                              x_test=x_test,
                              y_test=y_test)
Ejemplo n.º 11
0
def main(pb_file, img_file):
    """
    Predict and visualize by TensorFlow.
    :param pb_file:
    :param img_file:
    :return:
    """
    with tf.gfile.GFile(pb_file, "rb") as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())

    with tf.Graph().as_default() as graph:
        tf.import_graph_def(graph_def, name=prefix)

    for op in graph.get_operations():
        print(op.name)

    x = graph.get_tensor_by_name('%s/input_1:0' % prefix)
    y = graph.get_tensor_by_name('%s/output_0:0' % prefix)

    loaded_image = cv2.cvtColor(cv2.imread(img_file, -1), cv2.COLOR_BGR2RGB)
    resized_image = cv2.resize(loaded_image, (128, 128))
    input_image = np.expand_dims(np.float32(resized_image[:128, :128]),
                                 axis=0) / 255.0

    # images = np.load(img_file).astype(float)
    # img_h = images.shape[1]
    # img_w = images.shape[2]

    with tf.Session(graph=graph) as sess:
        # for img in images:
        # batched = img.reshape(-1, img_h, img_w, 3)
        normalized = standardize(input_image)

        converter = tf.contrib.lite.TFLiteConverter.from_session(
            sess, [x], [y])
        tflite_model = converter.convert()
        open("artifacts/converted_model.tflite", "wb").write(tflite_model)

        # Load TFLite model and allocate tensors.
        interpreter = tf.contrib.lite.Interpreter(model_content=tflite_model)
        interpreter.allocate_tensors()

        # Get input and output tensors.
        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()

        # Test model on random input data.
        # input_shape = input_details[0]['shape']
        input_data = np.array(normalized, dtype=np.float32)
        interpreter.set_tensor(input_details[0]['index'], input_data)

        interpreter.invoke()
        output_data = interpreter.get_tensor(output_details[0]['index'])
        # print(output_data)

        # pred = sess.run(y, feed_dict={
        #     x: normalized
        # })
        plt.imshow(output_data.reshape(128, 128))
        plt.show()
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] camera sensor warming up...")
vs = VideoStream().start()
time.sleep(2.0)

with CustomObjectScope(custom_objects()):
    model1 = load_model(SAVED_MODEL1)

# loop over the frames from the video stream
while True:
    frame = vs.read()
    resized = imutils.resize(frame, height=size)

    crop_frame = resized[y:y + height, x:x + width]
    reshaped = crop_frame.reshape(1, size, size, 3).astype(float)

    pred1 = model1.predict(standardize(reshaped)).reshape(size, size)

    # show the frame
    cv2.imshow("Frame", crop_frame)
    cv2.imshow("Segment", pred1)

    key = cv2.waitKey(1) & 0xFF

    # if the `q` key was pressed, break from the loop
    if key == ord("q"):
        break

cv2.destroyAllWindows()
vs.stop()
Ejemplo n.º 13
0
def main(experiments, train_set, epochs, batch_size, l2, units, dropout,
         patience):
    # Set PRNG seeds so that all runs have the same initial conditions
    helpers.seed()

    # Standardize and split data
    x, y = data.load(train_set)
    x = data.standardize(x, np.mean(x, axis=(0, 1, 2)),
                         np.std(x, axis=(0, 1, 2)))
    x_train, x_validation, y_train, y_validation = sklearn.model_selection.train_test_split(
        x, y, test_size=0.15, shuffle=True, stratify=y)
    del x
    del y

    # Settings
    run = os.path.join(
        experiments,
        f'lambda{l2}_units{units}_dropout{dropout}_patience{patience}')
    helpers.create_or_recreate_dir(run)
    model_filename = os.path.join(run, 'model.h5')
    csv_filename = os.path.join(run, 'train.csv')

    callbacks = [
        helpers.TrainingTimeLogger(),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                             factor=0.1,
                                             patience=10,
                                             verbose=1,
                                             mode='max',
                                             min_delta=EPSILON,
                                             cooldown=0,
                                             min_lr=0),
        tf.keras.callbacks.EarlyStopping(monitor='val_acc',
                                         min_delta=EPSILON,
                                         patience=patience,
                                         verbose=1,
                                         mode='max',
                                         baseline=None),
        tf.keras.callbacks.CSVLogger(filename=csv_filename,
                                     separator=',',
                                     append=False),
    ]

    # Train
    model = custom2(l2, units, dropout)
    model.compile(loss='binary_crossentropy',
                  optimizer=tf.keras.optimizers.SGD(lr=10e-4,
                                                    momentum=0.9,
                                                    decay=0.0,
                                                    nesterov=False),
                  metrics=['accuracy'])
    model.fit(x=x_train,
              y=y_train,
              validation_data=(x_validation, y_validation),
              batch_size=batch_size,
              epochs=epochs,
              verbose=1,
              callbacks=callbacks,
              shuffle=True)
    model.save(model_filename)
    del model
Ejemplo n.º 14
0
# -*- coding: utf-8 -*-
import numpy as np
import tests
import data
import matplotlib

# %matplotlib auto #view plot in separated window #tap into the console
matplotlib.rcParams.update({'font.size': 30})

folder = 'datasets'
file_names = ['iris', 'wine', 'glass', 'pima', 'seeds']

metrics = ['euclidean', 'manhattan']
weights = ['uniform', 'distance']
scoring = ['accuracy', 'f1_macro']

# Prepare data
X, y = data.read_data(folder, file_names[4])
X = data.standardize(X)

# Run tests
k_test_set = np.arange(1, 21)
weight = 'uniform'
#tests.test_metrics_and_k(X, y, scoring, k_test_set, metrics, weight, True, 5)
#tests.test_weights_and_k(X, y, scoring, k_test_set, weights, 'manhattan', True, 5)

n_test_set = np.arange(2, 15)
tests.test_ncrossvalidation_with_const_k(X, y, n_test_set, scoring, 9,
                                         'manhattan', 'uniform')
#tests.test_ncrossvalidation_with_const_n(X, y, k_test_set, scoring, n, metric, weight)
Ejemplo n.º 15
0
def run_gp(dataset, dataset_option, seed, x_plot, sig2, dir_data, dir_out):

    ## dataset
    if dataset == 'motorcycle':
        x, y, x_test, y_test = reshape_dataset(
            *data.load_motorcycle(dir_data, seed=seed))
    elif dataset == 'finance':
        x, y, x_test, y_test = reshape_dataset(
            *data.load_finance2(dir_data, seed=seed))
    elif dataset == 'mimic_gap':
        x, y, x_test, y_test = reshape_dataset(
            *utils_load_dataset.load_mimic_patient_gap(
                dir_data, subject_id=dataset_option))
    elif dataset == 'GPdata':
        x, y, x_val, y_val, x_test, y_test, _, _ = data.load_GPdata(
            dir_data, lengthscale=dataset_option)
        x, y, x_test, y_test = reshape_dataset(x, y, x_test, y_test)
    elif dataset == 'finance_nogap':
        x, y, x_test, y_test = reshape_dataset(
            *data.load_finance2(dir_data, gaps=None, seed=seed))
    elif dataset == 'mimic':
        x, y = utils_load_dataset.load_mimic_patient(
            patient_id=int(dataset_option),
            csv_filename=os.path.join(dir_data, 'hr.csv'))
        x, y, x_test, y_test = reshape_dataset(
            *data.train_val_split(x, y, frac_train=0.75, seed=seed))

    x, y, x_test, y_test = data.standardize(x, y, x_test, y_test)

    ## fit gp
    kernel = GPy.kern.RBF(input_dim=1, variance=1.0, lengthscale=1.0)
    m = GPy.models.GPRegression(x, y, kernel)
    m.Gaussian_noise.variance.fix()
    m.Gaussian_noise.variance = sig2
    m.kern.lengthscale.constrain_bounded(1 / (5 * 2 * np.pi), 10)
    m.optimize_restarts(num_restarts=10, verbose=False)

    # ploterior metrics
    y_plot_samp = m.posterior_samples_f(x_plot, size=1000)
    y_plot_samp = np.moveaxis(y_plot_samp, [0, 1, 2], [1, 2, 0])

    y_plot_pred, y_plot_pred_var = m.predict(x_plot,
                                             full_cov=False,
                                             include_likelihood=False)
    y_test_pred, y_test_pred_var = m.predict(x_test,
                                             full_cov=False,
                                             include_likelihood=False)

    y_test_samp = m.posterior_samples_f(x_test, size=1000)
    y_test_samp = np.moveaxis(y_test_samp, [0, 1, 2], [1, 2, 0])

    ll_test = np.mean(m.log_predictive_density(x_test, y_test))

    rmse_test = np.sqrt(np.mean((y_test_pred - y_test)**2))

    # save
    samples = {
        'x_plot': x_plot,
        'y_plot_pred': np.squeeze(y_plot_pred),
        'y_plot_pred_var': y_test_pred_var,
        'y_plot_samp': y_plot_samp,
        'y_test_pred': np.squeeze(y_test_pred),
        'y_test_pred_var': y_test_pred_var,
        'y_test_samp': y_test_samp,
        'x': x,
        'y': y,
        'x_test': x_test,
        'y_test': y_test,
        'sig2': m.Gaussian_noise.variance.item(),
        'll_test': ll_test,
        'rmse_test': rmse_test,
        'kernel_lengthscale': m.kern.lengthscale.item(),
        'kernel_variance': m.kern.variance.item()
    }

    if not os.path.exists(dir_out):
        os.makedirs(dir_out)
    np.save(os.path.join(dir_out, 'samples.npy'), samples)