Пример #1
0
def process_image(image_path, output_path):
    explainer = GradCAM()

    img = image.load_img(image_path, target_size=(224, 224))
    img = image.img_to_array(img)
    data = ([img], None)

    original_image = Image.open(image_path)
    width = int(original_image.size[0] / 4)
    height = int(original_image.size[1] / 4)
    original_image.thumbnail((width, height), Image.ANTIALIAS)

    class_index, class_name, prob_value = get_predictions(img, model)
    print('class_index:', class_index)
    heatmap = explainer.explain(data, model, "block5_conv3", class_index)
    #heatmap = explainer.explain(data, model, "block5_conv3", class_index)

    # overlay the text prediction on the heatmap overlay
    heatmap_with_prediction_overlayed = overlay_prediction_on_image(
        array_to_PIL(heatmap), class_name[-1], prob_value[0] * 100, width,
        height)

    # place the images side by side
    joined_image = join_images(original_image,
                               heatmap_with_prediction_overlayed)
    joined_image.save(output_path)
Пример #2
0
 def grad_cam(self, value, index):
     for classe in self.labels:
         grad_cam = GradCAM()
         grid = grad_cam.explain((value, self.setY[index]),
                                 self.model,
                                 class_index=classe,
                                 layer_name=self.target_layers)
         name = "n_{}_".format(index) + str(classe) + "_grad_cam.png"
         grad_cam.save(grid, ".", sg.PATH_GRAD + name)
Пример #3
0
    def on_epoch_end(self, epoch, logs=None):
        """
        Draw GradCAM outputs at each epoch end to Tensorboard.

        Args:
            epoch (int): Epoch index
            logs (dict): Additional information on epoch
        """
        explainer = GradCAM()
        heatmap = explainer.explain(self.validation_data, self.model,
                                    self.layer_name, self.class_index)

        # Using the file writer, log the reshaped image.
        with self.file_writer.as_default():
            tf.summary.image("Grad CAM", np.array([heatmap]), step=epoch)
Пример #4
0
def test_should_explain_output(mocker):
    mock_get_gradients = mocker.patch(
        "tf_explain.core.grad_cam.GradCAM.get_gradients_and_filters",
        return_value=(
            [mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
            [mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
        ),
    )
    mocker.sentinel.cam_1.numpy = lambda: mocker.sentinel.cam_1
    mocker.sentinel.cam_2.numpy = lambda: mocker.sentinel.cam_2
    mock_generate_output = mocker.patch(
        "tf_explain.core.grad_cam.GradCAM.generate_ponderated_output",
        return_value=[mocker.sentinel.cam_1, mocker.sentinel.cam_2],
    )
    mocker.patch(
        "tf_explain.core.grad_cam.heatmap_display",
        side_effect=[mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2],
    )
    mocker.patch("tf_explain.core.grad_cam.grid_display",
                 side_effect=lambda x: x)

    explainer = GradCAM()
    data = ([mocker.sentinel.image_1,
             mocker.sentinel.image_2], mocker.sentinel.labels)
    grid = explainer.explain(
        data,
        mocker.sentinel.model,
        mocker.sentinel.class_index,
        mocker.sentinel.layer_name,
        mocker.sentinel.use_guided_grads,
    )

    for heatmap, expected_heatmap in zip(
            grid, [mocker.sentinel.heatmap_1, mocker.sentinel.heatmap_2]):
        assert heatmap == expected_heatmap

    mock_get_gradients.assert_called_once_with(
        mocker.sentinel.model,
        [mocker.sentinel.image_1, mocker.sentinel.image_2],
        mocker.sentinel.layer_name,
        mocker.sentinel.class_index,
        mocker.sentinel.use_guided_grads,
    )

    mock_generate_output.assert_called_once_with(
        [mocker.sentinel.conv_output_1, mocker.sentinel.conv_output_2],
        [mocker.sentinel.guided_grads_1, mocker.sentinel.guided_grads_2],
    )
Пример #5
0
def predict_with_gradcam(model,
                         imgNP,
                         labels,
                         selected_labels,
                         layer_name='bn',
                         expected=None,
                         predictions=None,
                         showPlot=False):
    '''
    TODO: Explainer requires model to sent for every call. This may be expensive.
          Need to find a way to Initialize the model or share with predict engine. 
          Else the memory required may double.
    '''
    #preprocessed_input = load_image(img, image_dir, df)
    if predictions is None:
        predictions = model.predict(imgNP)

    #print("Loading original image")
    plt.figure(figsize=(15, 10))
    plt.subplot(151)
    plt.title(("Original - " +
               expected) if expected is not None else "Original")
    plt.axis('off')
    plt.imshow(imgNP[0], cmap='gray')

    explainer = GradCAM()

    j = 1
    for i in range(len(labels)):
        if labels[i] in selected_labels:
            #print("Generating gradcam for class",labels[i])
            #gradcam = grad_cam(model, imgNP, i, layer_name)
            #print("the class index is :", i)
            gradcam = explainer.explain(validation_data=(imgNP, labels),
                                        model=model,
                                        layer_name=layer_name,
                                        class_index=i)
            plt.subplot(151 + j)
            #plt.title(labels[i]+": p="+str(predictions[0][i]))
            plt.title("{:}: p={:.2f}%".format(labels[i],
                                              predictions[0][i] * 100))
            plt.axis('off')
            plt.imshow(imgNP[0], cmap='gray')
            plt.imshow(gradcam, cmap='jet', alpha=min(0.5, predictions[0][i]))
            j += 1
    if showPlot:
        plt.show()
Пример #6
0
def save_all_explainer(validation_data,
                       model,
                       name_conv,
                       n_conv=1,
                       dir_save_im='./',
                       save_name='outputs'):
    explainerGradCam = GradCAM()
    explainerActiv = ExtractActivations()
    explainerOccl = OcclusionSensitivity()
    explainerSmoothGrad = SmoothGrad()

    for i in range(1, n_conv + 1):
        output = explainerActiv.explain(validation_data, model,
                                        '{}_{}'.format(name_conv, i))
        explainerActiv.save(output, dir_save_im,
                            '{}-activ-conv{}.jpg'.format(save_name, i))

        output = explainerGradCam.explain(validation_data, model,
                                          '{}_{}'.format(name_conv, i), 0)
        explainerGradCam.save(output, dir_save_im,
                              '{}-gradCam0-conv{}.jpg'.format(save_name, i))

    output = explainerSmoothGrad.explain(validation_data, model, 0)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth0.jpg'.format(save_name))

    output = explainerSmoothGrad.explain(validation_data, model, 1)
    explainerSmoothGrad.save(output, dir_save_im,
                             '{}-smooth1.jpg'.format(save_name))

    output = explainerOccl.explain(validation_data, model, 0, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens0.jpg'.format(save_name))
    output = explainerOccl.explain(validation_data, model, 1, 5)
    explainerOccl.save(output, dir_save_im,
                       '{}-occlSens1.jpg'.format(save_name))
Пример #7
0
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain2 = data_input2_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain3 = data_input3_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')
        img_explain4 = data_input4_vali[
            ind_in_images_vali[0:25], :, :, :].astype('float32')

        data = ([img_explain1, img_explain2, img_explain3, img_explain4], None)

        # Save output
        output_dir = '.'
        output_name = 'grad_cam_class_%d_input_%d.png' % (class_index,
                                                          input_index)

        output = explainer.explain(data, model, name_of_conv, class_index,
                                   input_index)
        explainer.save(output, output_dir, output_name)

#%%
for class_index in range(7):
    for input_index in range(4):
        ind_in_images_vali = np.where(labels_vali == class_index)[0]

        fig = plt.figure(figsize=(10, 10))
        for m in range(25):
            plt.subplot(5, 5, m + 1)
            plt.xticks([])
            plt.yticks([])
            plt.grid(False)
            plt.imshow(images_vali[ind_in_images_vali[m], :, :, input_index],
                       cmap='gray')
Пример #8
0
shuffle(files)

# f = "2019-11-12_07-25-14_840.jpg"
for f in files:

    img_pil = image.load_img(path=testDir + f, target_size=(224, 224, 3))

    img = image.img_to_array(img_pil)

    im_np = preprocess_input(img)

    pred = model.predict(np.expand_dims(im_np, axis=0))
    prob = np.round(np.max(pred) * 100)
    ti = shortNamesList[np.argmax(pred)]

    grid1 = explainer.explain((np.expand_dims(im_np, axis=0), None), model,
                              'mixed10', np.argmax(pred))
    grid_occ = explainer_occ.explain((np.expand_dims(im_np, axis=0), None),
                                     model, np.argmax(pred), 75)

    fig = plt.figure(figsize=(18, 8))

    ax1 = fig.add_subplot(1, 2, 1)
    ax1.imshow(img_pil)
    ax1.imshow(grid1, alpha=0.6)

    # ax2 = fig.add_subplot(1, 3, 2)
    # ax2.imshow(grid1, alpha=0.6)

    ax2 = fig.add_subplot(1, 2, 2)
    ax2.imshow(img_pil)
    ax2.imshow(grid_occ, alpha=0.6)
Пример #9
0
import tensorflow as tf

from tf_explain.core.grad_cam import GradCAM

IMAGE_PATH = './cat.jpg'

if __name__ == '__main__':
    model = tf.keras.applications.vgg16.VGG16(weights='imagenet',
                                              include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = GradCAM()
    # Compute GradCAM on VGG16
    grid = explainer.explain(data, model, 'block5_conv3',
                             tabby_cat_class_index)
    explainer.save(grid, '.', 'grad_cam.png')
Пример #10
0
def gradCAM(X, model, layer_name, class_index):
    explainerGradCam = GradCAM()
    outputs = explainerGradCam.explain((X, None), model, layer_name,
                                       class_index)
    return cv2.cvtColor(outputs, cv2.COLOR_RGB2BGR)
Пример #11
0
    test_generator = eval_test_datagen.flow_from_directory(
        testDir,
        target_size=(224, 224),
        batch_size=1,
        class_mode='categorical')

    labels = (test_generator.class_indices)
    labels = dict((v, k) for k, v in labels.items())

    count = 0
    filepaths = test_generator.filepaths
    for img, label in test_generator:

        pred = model.predict(img)

        grid1 = explainer.explain((img, None), model, 'mixed10',
                                  np.argmax(pred))

        c = LIST_OF_CLASSES[np.argmax(pred)]

        im = Image.open(
            test_generator.filepaths[test_generator.index_array[count]])
        plt.figure()
        # plt.subplot(211)
        # plt.imshow(np.squeeze(img))
        # # plt.title(c)
        # plt.subplot(212)
        img2 = np.squeeze(img)
        img2 = img2 + 1
        img2 = img2 / 2

        fig = plt.figure(figsize=(18, 8))
Пример #12
0
def explain_tfexplain():

    global graph
    data = {"success": "failed"}
#      # ensure an image was properly uploaded to our endpoint
    if flask.request.method == "POST":
        if flask.request.form.get("image"):
            
            
            

            explainer = request.args.get("explainer")
            #with graph.as_default():
            model_path = flask.request.form.get("model_path")
            model = load_model(model_path)
           
            
#                  # read the image in PIL format
            image64 = flask.request.form.get("image")
            image = base64.b64decode(image64)
            image = Image.open(io.BytesIO(image))
            image = prepare_image(image, target=(224, 224))
            image = image*(1./255)
            #img = tf.keras.preprocessing.image.img_to_array(image)
            prediction = model.predict(image)
            topClass = getTopXpredictions(prediction, 1)
            print(topClass[0])
            image = np.squeeze(image)
            

            if explainer == "GRADCAM":
                im = ([image], None)
                from tf_explain.core.grad_cam import GradCAM
                exp = GradCAM()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])  
                #exp.save(imgFinal, ".", "grad_cam.png")  
                   
            elif explainer == "OCCLUSIONSENSITIVITY":
                im = ([image], None)
                from tf_explain.core.occlusion_sensitivity import OcclusionSensitivity
                exp = OcclusionSensitivity()   
                imgFinal = exp.explain(im, model,class_index=topClass[0][0], patch_size=10)  
                #exp.save(imgFinal, ".", "grad_cam.png")  

            elif explainer == "GRADIENTSINPUTS":
                im = (np.array([image]), None)
                from tf_explain.core.gradients_inputs import GradientsInputs
                exp = GradientsInputs()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "VANILLAGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.vanilla_gradients import VanillaGradients
                exp = VanillaGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "SMOOTHGRAD":
                im = (np.array([image]), None)
                from tf_explain.core.smoothgrad  import SmoothGrad
                exp = SmoothGrad()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")

            elif explainer == "INTEGRATEDGRADIENTS":
                im = (np.array([image]), None)
                from tf_explain.core.integrated_gradients  import IntegratedGradients
                exp = IntegratedGradients()
                imgFinal = exp.explain(im, model, class_index=topClass[0][0])
                #exp.save(imgFinal, ".", "gradients_inputs.png")
            
            elif explainer == "ACTIVATIONVISUALIZATION":
                #need some solution to find out and submit layers name
                im = (np.array([image]), None)
                from tf_explain.core.activations  import ExtractActivations
                exp = ExtractActivations()
                imgFinal = exp.explain(im, model, layers_name=["activation_1"])
                #exp.save(imgFinal, ".", "gradients_inputs.png")


         




          

            img = pilimage.fromarray(imgFinal)
            imgByteArr = inputoutput.BytesIO()
            img.save(imgByteArr, format='JPEG')
            imgByteArr = imgByteArr.getvalue()

            img64 = base64.b64encode(imgByteArr)
            img64_string = img64.decode("utf-8")

            data["explanation"] = img64_string
            data["prediction"] = str(topClass[0][0])
            data["prediction_score"] = str(topClass[0][1])
            data["success"] = "success"
                    
    return flask.Response(json.dumps(data), mimetype="text/plain")
Пример #13
0
    expanded_image = np.expand_dims(image, axis=0)
    
    class_names = ['Focal Leakage', 'Normal', 'Papilledema', 'Punctuate Leakage', 'Vessel Leakage']
    classes = model.predict(expanded_image)[0]
    (focal, normal, papilledema, punctuate, vessel) = classes
    pred = list(classes)
    for i in pred:
        x =  max(pred)
        y =  pred.index(x)
    predicted_class_index = y
    classes = class_names[y]
    x = int(x*100)
    st.subheader(classes + ':  ' + str(x) + '%')
    
    
    
    if grad_cam_button:
        data = ([image], None)
        explainer = GradCAM()
        grad_cam_grid = explainer.explain(
            data, model, class_index=y, layer_name="mixed7"
        )
        st.image(grad_cam_grid, width = 200)

    if occlusion_sensitivity_button:
        data = ([image], None)
        explainer = OcclusionSensitivity()
        sensitivity_occlusion_grid = explainer.explain(data, model, y , patch_size_value)
        st.image(sensitivity_occlusion_grid, width = 200)

Пример #14
0
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')

test_loss, test_acc = model.evaluate(images_vali, labels_vali, verbose=2)

print(test_acc)
"""Our simple CNN has achieved a test accuracy of over 70%. Not bad for a few lines of code! For another CNN style, 
see an example using the Keras subclassing API and a `tf.GradientTape` 
[here](https://www.tensorflow.org/tutorials/quickstart/advanced)."""

#%%
# Instantiation of the explainer
explainer = GradCAM()

for class_index in range(7):
    ind_in_images_vali = np.where(labels_vali == class_index)[0]

    img_explain = images_vali[ind_in_images_vali[0:25], :, :, :]
    # print(img_single.size)

    data = (img_explain, None)

    # Save output
    output_dir = '.'
    output_name = 'grad_cam_class_%d.png' % class_index

    output = explainer.explain(data, model, "conv3", class_index)
    explainer.save(output, output_dir, output_name)
Пример #15
0



####################################  Load Data #####################################
folder    = './drive/MyDrive/processed_data/'
te_data   = np.load(folder+'data_test.npy')
FOV       = np.load(folder+'FOV_te.npy')
te_mask   = np.load(folder+'mask_test.npy')

te_data  = np.expand_dims(te_data, axis=3)


print('Dataset loaded')
#te_data2  = dataset_normalized(te_data)
te_data2 = te_data /255.

explainer = GradCAM()


dataaa = (te_data2[1].reshape(1,512,512,1),None)
grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "grad_cam.png")

explainer = OcclusionSensitivity()

grid = explainer.explain(dataaa, model, class_index=1)  

explainer.save(grid, ".", "occ_cam.png")
Пример #16
0
            (mask, withoutMask) = model.predict(expanded_face)[0]

            predicted_class = 0
            label = "No Mask"
            if mask > withoutMask:
                label = "Mask"
                predicted_class = 1

            color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
            label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
            cv2.putText(image, label, (startX, startY - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
            cv2.rectangle(image, (startX, startY), (endX, endY), color, 2)
            st.image(image, width=640)
            st.write('### ' + label)

    if grad_cam_button:
        data = ([face], None)
        explainer = GradCAM()
        grad_cam_grid = explainer.explain(
            data, model, class_index=predicted_class, layer_name="Conv_1"
        )
        st.image(grad_cam_grid)

    if occlusion_sensitivity_button:
        data = ([face], None)
        explainer = OcclusionSensitivity()
        sensitivity_occlusion_grid = explainer.explain(data, model, predicted_class, patch_size_value)
        st.image(sensitivity_occlusion_grid)
Пример #17
0
# Show image
plt.figure()
plt.imshow(img)
plt.axis('off')
plt.show()

# Reshape data
img = img.reshape(-1, *img.shape)

# Prepage data
data = (img, None)
label_idx = np.where(np.array(classes) == label)[0][0]

explainer = GradCAM()
grid = explainer.explain(validation_data=data,
                         model=model.model,
                         class_index=label_idx,
                         layer_name='conv5_block3_3_conv')

plt.figure()
plt.imshow(grid)
plt.axis('off')
plt.show()

explainer = OcclusionSensitivity()
grid = explainer.explain(validation_data=data,
                         model=model.model,
                         class_index=label_idx,
                         patch_size=10)

plt.figure()
plt.imshow(grid)
Пример #18
0
import tensorflow as tf

from tf_explain.core.grad_cam import GradCAM

IMAGE_PATH = "./cat.jpg"

if __name__ == "__main__":
    model = tf.keras.applications.vgg16.VGG16(weights="imagenet",
                                              include_top=True)

    img = tf.keras.preprocessing.image.load_img(IMAGE_PATH,
                                                target_size=(224, 224))
    img = tf.keras.preprocessing.image.img_to_array(img)

    model.summary()
    data = ([img], None)

    tabby_cat_class_index = 281
    explainer = GradCAM()
    # Compute GradCAM on VGG16
    grid = explainer.explain(data,
                             model,
                             class_index=tabby_cat_class_index,
                             layer_name="block5_conv3")
    explainer.save(grid, ".", "grad_cam.png")
def write():
    st.title(' Face Mask Detector')

    net = load_face_detector_and_model()
    model = load_cnn_model()

    selected_option = st.radio("Choose", ('File', 'Webcam'))

    if selected_option == 'File':
        #uploaded_image = st.sidebar.file_uploader("Choose a JPG file", type="jpg")
        uploaded_image = st.sidebar.file_uploader("Choose a JPG file",
                                                  type=FILE_TYPES)
        confidence_value = st.sidebar.slider('Confidence:', 0.0, 1.0, 0.5, 0.1)
        if uploaded_image:
            image1 = Image.open(uploaded_image)
            st.sidebar.image(image1,
                             caption='Uploaded Image.',
                             use_column_width=True)
            #st.sidebar.info('Uploaded image:')
            #st.sidebar.image(uploaded_image, width=240)
            #f = open(uploaded_image, 'rb')

            #file = st.file_uploader("Upload file", type=FILE_TYPES)
            show_file = st.empty()
            if not uploaded_image:
                show_file.info("Please upload a file of type: " +
                               ", ".join(FILE_TYPES))
                return
            file_type = get_file_type(uploaded_image)
            if file_type == FileType.IMAGE:
                show_file.image(image1)
            elif file_type == FileType.PYTHON:
                st.code(uploaded_image.getvalue())
            else:
                data = pd.read_csv(uploaded_image)
                st.dataframe(data.head(10))

            f = open(get_file_name(uploaded_image), 'rb')
            img_bytes = f.read()
            f.close()

            grad_cam_button = st.sidebar.button('Grad CAM')
            patch_size_value = st.sidebar.slider('Patch size:', 10, 90, 20, 10)
            occlusion_sensitivity_button = st.sidebar.button(
                'Occlusion Sensitivity')
            image = cv2.imdecode(np.fromstring(img_bytes, np.uint8), 1)
            #image = cv2.imdecode(np.fromstring(uploaded_image.read(), np.uint8), 1)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            orig = image.copy()
            (h, w) = image.shape[:2]
            blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),
                                         (104.0, 177.0, 123.0))
            net.setInput(blob)
            detections = net.forward()

            for i in range(0, detections.shape[2]):
                confidence = detections[0, 0, i, 2]
                if confidence > confidence_value:
                    box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
                    (startX, startY, endX, endY) = box.astype("int")
                    (startX, startY) = (max(0, startX), max(0, startY))
                    (endX, endY) = (min(w - 1, endX), min(h - 1, endY))

                    face = image[startY:endY, startX:endX]
                    face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
                    face = cv2.resize(face, (224, 224))
                    face = img_to_array(face)
                    face = preprocess_input(face)
                    expanded_face = np.expand_dims(face, axis=0)

                    (mask, withoutMask) = model.predict(expanded_face)[0]

                    predicted_class = 0
                    label = "No Mask"
                    if mask > withoutMask:
                        label = "Mask"
                        predicted_class = 1

                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    label = "{}: {:.2f}%".format(label,
                                                 max(mask, withoutMask) * 100)
                    cv2.putText(image, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(image, (startX, startY), (endX, endY), color,
                                  2)
                    st.image(image, width=640)
                    st.write('### ' + label)

            if grad_cam_button:
                data = ([face], None)
                explainer = GradCAM()
                grad_cam_grid = explainer.explain(data,
                                                  model,
                                                  class_index=predicted_class,
                                                  layer_name="Conv_1")
                st.image(grad_cam_grid)

            if occlusion_sensitivity_button:
                data = ([face], None)
                explainer = OcclusionSensitivity()
                sensitivity_occlusion_grid = explainer.explain(
                    data, model, predicted_class, patch_size_value)
                st.image(sensitivity_occlusion_grid)

    # PROGRAM FOR WEB CAM
    if selected_option == 'Webcam':
        labels_dict = {0: 'without_mask', 1: 'with_mask'}
        color_dict = {0: (0, 0, 255), 1: (0, 255, 0)}
        size = 4

        webcam = cv2.VideoCapture(0)  #Use camera 0
        st.write("Webcam On")
        stframe_cam = st.empty()
        # We load the xml file
        classifier = cv2.CascadeClassifier(
            'src/pages/Services/frecog/haarcascade_frontalface_default.xml')

        while True:
            (rval, im) = webcam.read()
            stframe_cam.image(im)
            # st.write("Webcam Read")
            #if im:
            #ret, framecar = vf.read()
            im = cv2.flip(im, 1, 1)  #Flip to act as a mirror

            # Resize the image to speed up detection
            mini = cv2.resize(im, (im.shape[1] // size, im.shape[0] // size))

            # detect MultiScale / faces
            faces = classifier.detectMultiScale(mini)

            # Draw rectangles around each face
            for f in faces:
                (x, y, w, h) = [v * size
                                for v in f]  #Scale the shapesize backup
                #Save just the rectangle faces in SubRecFaces
                face_img = im[y:y + h, x:x + w]
                resized = cv2.resize(face_img, (150, 150))
                normalized = resized / 255.0
                reshaped = np.reshape(normalized, (1, 150, 150, 3))
                reshaped = np.vstack([reshaped])
                result = model.predict(reshaped)
                #print(result)

                label = np.argmax(result, axis=1)[0]

                cv2.rectangle(im, (x, y), (x + w, y + h), color_dict[label], 2)
                cv2.rectangle(im, (x, y - 40), (x + w, y), color_dict[label],
                              -1)
                cv2.putText(im, labels_dict[label], (x, y - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)

                # Show the image
                stframe_cam.image('LIVE', im)
                #cv2.imshow('LIVE',   im)
                key = cv2.waitKey(10)
                # if Esc key is press then break out of the loop
                if key == 27:  #The Esc key
                    break
        # Stop video
        webcam.release()

        # Close all started windows
        cv2.destroyAllWindows()


#write()
#uploaded_image.close()