コード例 #1
0
#% Load model
model = load_model(filepath + 'unet_exp_' + str(exp) + '.h5', compile=False)
area = 11
# Prediction
ref_final, pre_final, prob_recontructed, ref_reconstructed, mask_no_considered_, mask_ts, time_ts = prediction(
    model, image_array, image_ref, final_mask, mask_ts_, patch_size, area)

# Metrics
cm = confusion_matrix(ref_final, pre_final)
metrics = compute_metrics(ref_final, pre_final)
print('Confusion  matrix \n', cm)
print('Accuracy: ', metrics[0])
print('F1score: ', metrics[1])
print('Recall: ', metrics[2])
print('Precision: ', metrics[3])

# Alarm area
total = (cm[1, 1] + cm[0, 1]) / len(ref_final) * 100
print('Area to be analyzed', total)

print('training time', end_training)
print('test time', time_ts)

#%% Show the results
# prediction of the whole image
fig1 = plt.figure('whole prediction')
plt.imshow(prob_recontructed)
# Show the test tiles
fig2 = plt.figure('prediction of test set')
plt.imshow(prob_recontructed * mask_ts)
コード例 #2
0
# ref_final, pre_final, prob_recontructed, ref_reconstructed, mask_no_considered_, mask_ts, time_ts = prediction(model, image_array, image_ref, final_mask, mask_ts_, patch_size, area)

# Metrics
true_labels = np.reshape(patches_test_ref, (patches_test_ref.shape[0]* patches_test_ref.shape[1]*patches_test_ref.shape[2]))

predicted_labels = np.reshape(patches_pred, (patches_pred.shape[0]* patches_pred.shape[1]*patches_pred.shape[2]))

cm = confusion_matrix(true_labels, predicted_labels)
metrics = compute_metrics(true_labels, predicted_labels)
print('Confusion  matrix \n', cm)
print('Accuracy: ', metrics[0])
print('F1score: ', metrics[1])
print('Recall: ', metrics[2])
print('Precision: ', metrics[3])

# Alarm area
total = (cm[1,1]+cm[0,1])/len(true_label)*100
print('Area to be analyzed',total)

print('training time', end_training)
print('test time', time_ts)

#%% Show the results
# prediction of the whole image
fig1 = plt.figure('whole prediction')
plt.imshow(prob_recontructed)

# Show the test tiles
# fig2 = plt.figure('prediction of test set')
# plt.imshow(prob_recontructed*mask_ts)
コード例 #3
0
idx = 418
for j in range(50):
    idx = int(reach[j])
    trainx = tss_trainx[idx + i * 100:idx + 1 + i * 100]
    trainy = tss_trainy[idx + i * 100:idx + 1 + i * 100]
    if not trainy:
        trainy = -1
    ############ We use the DeepExplain repository (https://github.com/marcoancona/DeepExplain) to interpret the CNN model predictions##########
    with DeepExplain(session=sess) as de:
        # We run `explain()` several time to compare different attribution methods
        attributions = {
            # Gradient-based
            'Saliency maps': de.explain('saliency', x1 * trainy, x, trainx),
            #'Gradient * Input':     de.explain('grad*input', x1 * trainy, x, trainx),
            'Integrated Gradients': de.explain('intgrad', x1 * trainy, x,
                                               trainx),
            #'Epsilon-LRP':          de.explain('elrp', logits * yi, X, xi),
            #'DeepLIFT (Rescale)':   de.explain('deeplift', logits * yi, X, xi),
            #Perturbation-based
            #'_Occlusion [1x1]':      de.explain('occlusion', logits * yi, X, xi),
            #'_Occlusion [3x3]':      de.explain('occlusion', logits * yi, X, xi, window_shape=(3,))
        }
        print('Done', i)
    grads.append(attributions['Integrated Gradients'][0])
grads = np.stack(grads)
grads_sum = np.mean(grads, axis=0)
im = filters.gaussian_filter(grads_sum, 4)
pdb.set_trace()
plt.imshow(im)
plt.show()
コード例 #4
0
            g = img_train_ref[i][j][1]
            b = img_train_ref[i][j][2]
            rgb = (r, g, b)
            rgb_key = str(rgb)
            binary_img_train_ref[i][j] = label_dict[rgb_key]

    return binary_img_train_ref


root_path = './DATASETS/homework3'
# Load images
img_train_path = 'Image_Train.tif'
img_train = load_tiff_image(os.path.join(root_path,
                                        img_train_path))
img_train = img_train.transpose((1, 2, 0))
plt.imshow(img_train)
plt.show()
print(f'Img train max: {img_train.max()}, Img train min: {img_train.min()}')
# img_train = plt.imread(os.path.join(root_path,
#                                    img_train_path))
# print(type(img_train))
# Normalizes the image
# scaler = StandardScaler()
# scaler = MinMaxScaler(feature_range=(-1,1))
# img_reshaped = img_train.reshape((img_train.shape[0]*img_train.shape[1]), img_train.shape[2])
# scaler.fit(img_reshaped)
# image_normalized = scaler.transform(img_reshaped)
# img_normalized = image_normalized.reshape(img_train.shape[0], img_train.shape[1], img_train.shape[2])
img_normalized = img_train / 127.5 - 1
print(f'Img norm max: {img_normalized.max()}, Img norm min: {img_normalized.min()}')
# img_normalized = img_normalized.transpose((1, 2, 0))
コード例 #5
0
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 24 20:37:24 2017

@author: ZSHUJON
"""

from utils import imread, plt

road_image = imread('test_images/solidYellowCurve2.jpg')

plt.imshow(road_image)
コード例 #6
0
                (60, 60), font, 1, (255, 255, 255), 2)
    cv2.putText(blend_on_road,
                'Offset from center: {:.02f}m'.format(offset_meter), (60, 90),
                font, 1, (255, 255, 255), 2)

    processed_frames += 1

    return blend_on_road


if __name__ == '__main__':

    # step 1: calibrate the camera
    mtx, dist = calibrate('camera_cal')
    mode = "video"
    processed_frames = 0
    line_lt, line_rt = Line(buffer_len=10), Line(buffer_len=10)

    if mode == "image":
        for test_img in glob.glob('test_images/*.jpg'):
            img = mpimg.imread(test_img)
            img_out = process_pipeline(img, True)
            plt.imshow(img_out)
            plt.show()

    elif mode == "video":
        selector = 'project'
        clip = VideoFileClip(
            '{}_video.mp4'.format(selector)).fl_image(process_pipeline)
        clip.write_videofile('out_{}.mp4'.format(selector), audio=False)