예제 #1
0
    def infer(self, image_file, autocrop=False):
        """Run inference on the given image"""
        # Load and preprocess image
        image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
        if autocrop:
            image, _ = auto_body_crop(image)
        image = cv2.resize(image, (self.input_width, self.input_height), cv2.INTER_CUBIC)
        image = image.astype(np.float32) / 255.0
        image = np.expand_dims(np.stack((image, image, image), axis=-1), axis=0)

        # Create feed dict
        feed_dict = {IMAGE_INPUT_TENSOR: image, TRAINING_PH_TENSOR: False}

        # Run inference
        graph, sess, saver = self.load_graph()
        with graph.as_default():
            # Load checkpoint
            self.load_ckpt(sess, saver)

            # Run image through model
            class_, probs = sess.run([CLASS_PRED_TENSOR, CLASS_PROB_TENSOR], feed_dict=feed_dict)
            print('\nPredicted Class: ' + CLASS_NAMES[class_[0]])
            print('Confidences:' + ', '.join(
                '{}: {}'.format(name, conf) for name, conf in zip(CLASS_NAMES, probs[0])))
            print('**DISCLAIMER**')
            print('Do not use this prediction for self-diagnosis. '
                  'You should check with your local authorities for '
                  'the latest advice on seeking medical assistance.')
예제 #2
0
                def infer(self, image_file, autocrop=False):
                    """Run inference on the given image"""
                    # Load and preprocess image
                    image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE)
                    if autocrop:
                        image, _ = auto_body_crop(image)
                    image = cv2.resize(image,
                                       (self.input_width, self.input_height),
                                       cv2.INTER_CUBIC)
                    image = image.astype(np.float32) / 255.0
                    image = np.expand_dims(np.stack((image, image, image),
                                                    axis=-1),
                                           axis=0)

                    # Create feed dict
                    feed_dict = {
                        IMAGE_INPUT_TENSOR: image,
                        TRAINING_PH_TENSOR: False
                    }

                    # Run inference
                    graph, sess, saver = self.load_graph()
                    with graph.as_default():
                        # Load checkpoint
                        self.load_ckpt(sess, saver)

                        # Run image through model
                        class_, probs = sess.run(
                            [CLASS_PRED_TENSOR, CLASS_PROB_TENSOR],
                            feed_dict=feed_dict)
                        print('\nPredicted Class: ' + CLASS_NAMES[class_[0]])
                        print('Confidences:' + ', '.join(
                            '{}: {}'.format(name, conf)
                            for name, conf in zip(CLASS_NAMES, probs[0])))

                        N = probs[0][0]
                        P = probs[0][1]
                        C = probs[0][2]

                        result = CLASS_NAMES[class_[0]]
                        print('**DISCLAIMER**')
                        print(
                            'Do not use this prediction for self-diagnosis. '
                            'You should check with your local authorities for '
                            'the latest advice on seeking medical assistance.')
                        confidence = {
                            'normal': str("%.2f" % (N * 100)),
                            'pneumonia': str("%.2f" % (P * 100)),
                            'covid': str("%.2f" % (C * 100))
                        }

                        detection = {
                            'detections': {
                                'prediction': result,
                                'confidence': confidence,
                                'imageType': 'CT Scan'
                            }
                        }
                        my_logger.info(
                            'CT Scan Detection : {}'.format(detection))
                    try:
                        if (not len(firebase_admin._apps)):
                            cred = credentials.Certificate(
                                credential_json_file)
                            fa = firebase_admin.initialize_app(
                                cred, {
                                    "databaseURL": databaseURL,
                                    'storageBucket': storageBucket
                                })
                            fc = firebase_admin.firestore.client(fa)
                            db = firestore.client
                            doc_ref = db.collection(
                                u'stripe_customers/{0}/results'.format(
                                    userId)).document(currentTime)
                            doc_ref.update(detection)
                            my_logger.error(
                                'CT Scan Detection : Saved to firestore and message acknowledged'
                            )
                        else:
                            print('alredy initialize')
                            db = firestore.client()
                            doc_ref = db.collection(
                                u'stripe_customers/{0}/results'.format(
                                    userId)).document(currentTime)
                            doc_ref.update(detection)
                            my_logger.error(
                                'CT Scan Detection : Saved to firestore and message acknowledged'
                            )
                    except Exception as e:
                        print(e)
                        my_logger.error('CT Scan Detection : NOT Saved', e)
예제 #3
0
def uploaded_ct():
    if request.method == 'POST':
        # check if the post request has the file part
        if 'file' not in request.files:
            flash('No file part')
            return redirect('upload.html')
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            flash('No file selected for uploading')
            return redirect('upload.html')
        if file:
            filename = secure_filename(file.filename)
            file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            file.save(file_path)
            # Load and preprocess image
            origin_im = cv2.imread(file_path)  # read file
            origin_im = cv2.cvtColor(origin_im, cv2.COLOR_BGR2RGB)
            print(origin_im.shape)
            h, w, c = origin_im.shape
            image = cv2.imread(file_path, cv2.IMREAD_GRAYSCALE)  # read file
            image, _ = auto_body_crop(image)
            image = cv2.resize(image, (512, 512), cv2.INTER_CUBIC)
            image = image.astype(np.float32) / 255.0
            image = np.expand_dims(np.stack((image, image, image), axis=-1),
                                   axis=0)
            with tf.Graph().as_default() as graph:
                tf.import_graph_def(
                    graph_def1,
                    input_map=None,
                    return_elements=None,
                    name="",
                    #op_dict=None,
                    #producer_op_list=None
                )
                image_tensor = graph.get_tensor_by_name("Placeholder:0")
                pred_tensor = graph.get_tensor_by_name("softmax_tensor:0")
                TRAINING_PH_TENSOR = graph.get_tensor_by_name("is_training:0")

                sess = tf.Session(graph=graph)
                gradCam = GradCAM(graph=graph,
                                  classes=[0, 1, 2],
                                  outLayer="softmax_tensor:0",
                                  targetLayer="resnet_model/add_15:0")
                grads = gradCam.compute_grads()
                size_upsample = (w, h)
                # Create feed dict
                feed_dict = {image_tensor: image, TRAINING_PH_TENSOR: False}
                pred = sess.run(pred_tensor, feed_dict)

                output, grads_val = sess.run(
                    [gradCam.target, grads[pred.argmax(axis=1)[0]]], feed_dict)
                cam3 = generate_cam(output[0], grads_val[0], size_upsample)

                # Overlay cam on image
                cam3 = np.uint8(255 * cam3)
                cam3 = cv2.applyColorMap(cam3, cv2.COLORMAP_JET)
                new_im = cam3 * 0.3 + origin_im * 0.5
                filename1 = my_random_string(10) + filename
                filename2 = my_random_string(12) + filename
                os.rename(file_path,
                          os.path.join(app.config['UPLOAD_FOLDER'], filename1))
                cv2.imwrite(
                    os.path.join(app.config['UPLOAD_FOLDER'], filename2),
                    new_im)
                print("GradCAM CT image saved ")

            print(pred)
            inv_mapping = {0: 'normal', 1: 'pneumonia', 2: 'COVID-19'}
            pred_class = inv_mapping[pred.argmax(axis=1)[0]]
            pred_proba = "{:.2f}".format((pred.max(axis=1)[0]) * 100)
            print(pred_proba)
            print(pred_class)
            result = pred_class.capitalize()
            return render_template('results_ct.html',
                                   result=result,
                                   probability=pred_proba,
                                   imagesource='/assets/images/' + filename1,
                                   imagesource1='/assets/images/' + filename2)