示例#1
0
def perform_inference(file_path):
    # a=request.form['featurea'] image name input
    # Inference.py
    # Inference.py

    mapping = {'normal': 0, 'pneumonia': 1, 'COVID-19': 2}
    inv_mapping = {0: 'normal', 1: 'pneumonia', 2: 'COVID-19'}

    sess = tf.Session()
    tf.get_default_graph()
    saver = tf.train.import_meta_graph(os.path.join('models/COVIDNet-CXR3-B', 'model.meta'))
    saver.restore(sess, os.path.join('models/COVIDNet-CXR3-B', 'model-1014'))

    graph = tf.get_default_graph()

    image_tensor = graph.get_tensor_by_name('input_1:0')
    pred_tensor = graph.get_tensor_by_name('norm_dense_1/Softmax:0')

    x = process_image_file(file_path, 0.08, 480)
    x = x.astype('float32') / 255.0
    pred = sess.run(pred_tensor, feed_dict={image_tensor: np.expand_dims(x, axis=0)})

    print('Prediction: {}'.format(inv_mapping[pred.argmax(axis=1)[0]]))
    all_predictions = ('Normal: {:.3f}, Pneumonia: {:.3f}, COVID-19: {:.3f}'.format(pred[0][0], pred[0][1], pred[0][2]))
    # send the values like render_template('gout.html',values)
    predicted_output = inv_mapping[pred.argmax(axis=1)[0]]
    return predicted_output, all_predictions
示例#2
0
def eval(sess, graph, testfile, testfolder, input_tensor, output_tensor, input_size):
    image_tensor = graph.get_tensor_by_name(input_tensor)
    pred_tensor = graph.get_tensor_by_name(output_tensor)

    y_test = []
    pred = []
    for i in range(len(testfile)):
        line = testfile[i].split()
        x = process_image_file(os.path.join(testfolder, line[1]), 0.08, input_size)
        x = x.astype('float32') / 255.0
        y_test.append(mapping[line[2]])
        pred.append(np.array(sess.run(pred_tensor, feed_dict={image_tensor: np.expand_dims(x, axis=0)})).argmax(axis=1))
    y_test = np.array(y_test)
    pred = np.array(pred)

    matrix = confusion_matrix(y_test, pred)
    matrix = matrix.astype('float')
    #cm_norm = matrix / matrix.sum(axis=1)[:, np.newaxis]
    print(matrix)
    #class_acc = np.array(cm_norm.diagonal())
    class_acc = [matrix[i,i]/np.sum(matrix[i,:]) if np.sum(matrix[i,:]) else 0 for i in range(len(matrix))]
    print('Sens Normal: {0:.3f}, Pneumonia: {1:.3f}, COVID-19: {2:.3f}'.format(class_acc[0],
                                                                               class_acc[1],
                                                                               class_acc[2]))
    ppvs = [matrix[i,i]/np.sum(matrix[:,i]) if np.sum(matrix[:,i]) else 0 for i in range(len(matrix))]
    print('PPV Normal: {0:.3f}, Pneumonia {1:.3f}, COVID-19: {2:.3f}'.format(ppvs[0],
                                                                             ppvs[1],
                                                                             ppvs[2]))
def run_inference_tflite(model_file_path, image, top_percent, input_size,
                         label_file, verbose):
    if os.path.exists(model_file_path) and os.path.exists(image):
        interpreter = tflite.Interpreter(model_path=model_file_path)
        interpreter.allocate_tensors()

        input_details = interpreter.get_input_details()
        output_details = interpreter.get_output_details()

        # check the type of the input tensor
        floating_model = input_details[0]['dtype'] == np.float32

        # NxHxWxC, H:1, W:2
        height = input_details[0]['shape'][1]
        width = input_details[0]['shape'][2]

        img = process_image_file(image, top_percent, input_size)
        img = img.astype('float32') / 255.0

        # add N dim
        input_data = np.expand_dims(img, axis=0)

        # The following was suggested by an example script, but it was throwing off the results compared to the
        # non-tflite model after commenting out, the results become the same as the non-tflite model when classifying
        # the same photos with both models.
        # if floating_model:
        # input_data = (np.float32(input_data) - args.input_mean) / args.input_std

        interpreter.set_tensor(input_details[0]['index'], input_data)

        start_time = time.time()
        interpreter.invoke()
        stop_time = time.time()

        output_data = interpreter.get_tensor(output_details[0]['index'])
        results = np.squeeze(output_data)

        top_k = results.argsort()[-5:][::-1]
        labels = load_labels(label_file)

        print("Prediction: " + labels[top_k[0]])

        # Print Further details
        if verbose:
            for i in top_k:
                if floating_model:
                    print('{:.3f}: {}'.format(float(results[i]), labels[i]))
                else:
                    print('{:.3f}: {}'.format(float(results[i] / 255.0),
                                              labels[i]))

            print('time: {:.3f}ms'.format((stop_time - start_time) * 1000))

        return labels[top_k[0]]

    else:
        print("One of the input files does not exist or couldn't be found.")
        return None
示例#4
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='COVID-Net Lung Severity Scoring')
    parser.add_argument('--weightspath_geo', default='models/COVIDNet-SEV-GEO', type=str, help='Path to output folder')
    parser.add_argument('--weightspath_opc', default='models/COVIDNet-SEV-OPC', type=str, help='Path to output folder')
    parser.add_argument('--metaname', default='model.meta', type=str, help='Name of ckpt meta file')
    parser.add_argument('--ckptname', default='model', type=str, help='Name of model ckpts')
    parser.add_argument('--imagepath', default='assets/ex-covid.jpeg', type=str,
                        help='Full path to image to perfom scoring on')
    parser.add_argument('--input_size', default=480, type=int, help='Size of input (ex: if 480x480, --input_size 480)')
    parser.add_argument('--top_percent', default=0.08, type=float, help='Percent top crop from top of image')

    args = parser.parse_args()

    x = process_image_file(args.imagepath, args.top_percent, args.input_size)
    x = x.astype('float32') / 255.0

    # check if models exists
    infer_geo = os.path.exists(os.path.join(args.weightspath_geo, args.metaname))
    infer_opc = os.path.exists(os.path.join(args.weightspath_opc, args.metaname))

    if infer_geo:
        model_geo = MetaModel(os.path.join(args.weightspath_geo, args.metaname),
                              os.path.join(args.weightspath_geo, args.ckptname))
        output_geo = model_geo.infer(x)

        print('Geographic severity: {:.3f}'.format(output_geo[0]))
        print('Geographic extent score for right + left lung (0 - 8): {:.3f}'.format(output_geo[0] * 8))
        print('For each lung: 0 = no involvement; 1 = <25%; 2 = 25-50%; 3 = 50-75%; 4 = >75% involvement.')
    def PubsubCallback(self, message):

        msg_id = message.message_id
        filename = 'severity_' + msg_id + '.log'

        my_logger = logging.getLogger(msg_id)
        my_logger.setLevel(logging.INFO)
        handler = logging.handlers.RotatingFileHandler(
            filename, maxBytes=20)  # will create logger file
        my_logger.addHandler(handler)
        my_logger.info('Started with message_id : {}'.format(msg_id))

        #print(message)
        sub_data = message.data.decode('utf-8')  #decoding Pubsub message
        d = json.loads(sub_data)
        userId = d['userId']
        bucket = d['bucket']
        url = d['url']
        fileName = d['fileName']
        currentTime = d['currentTime']
        time = d['time']
        date = d['date']

        credential_json_file = ['file_name']
        databaseURL = ['databaseURL']
        storageBucket = ['storageBucket']

        if (not len(firebase_admin._apps)):
            cred = credentials.Certificate(credential_json_file)
            fa = firebase_admin.initialize_app(cred, {
                "databaseURL": databaseURL,
                'storageBucket': storageBucket
            })
            fc = firebase_admin.firestore.client(fa)
            db = firestore.client()
        blob = storage.bucket(storageBucket).blob(url)
        my_logger.info(
            'images name after tagging with message_id :{}.jpeg'.format(
                msg_id))
        blob.download_to_filename('assets/severity_{}.jpeg'.format(msg_id))

        try:

            def score_prediction(softmax, step_size):
                vals = np.arange(3) * step_size + (step_size / 2.)
                vals = np.expand_dims(vals, axis=0)
                return np.sum(softmax * vals, axis=-1)

            class MetaModel:
                def __init__(self, meta_file, ckpt_file):
                    self.meta_file = meta_file
                    self.ckpt_file = ckpt_file

                    self.graph = tf.Graph()
                    with self.graph.as_default():
                        self.saver = tf.compat.v1.train.import_meta_graph(
                            self.meta_file)
                        self.input_tr = self.graph.get_tensor_by_name(
                            'input_1:0')
                        self.phase_tr = self.graph.get_tensor_by_name(
                            'keras_learning_phase:0')
                        self.output_tr = self.graph.get_tensor_by_name(
                            'MLP/dense_1/MatMul:0')

                def infer(self, image):
                    with tf.compat.v1.Session(graph=self.graph) as sess:
                        self.saver.restore(sess, self.ckpt_file)

                        outputs = defaultdict(list)
                        outs = sess.run(self.output_tr,
                                        feed_dict={
                                            self.input_tr:
                                            np.expand_dims(image, axis=0),
                                            self.phase_tr:
                                            False
                                        })
                        outputs['logits'].append(outs)

                        for k in outputs.keys():
                            outputs[k] = np.concatenate(outputs[k], axis=0)

                        outputs['softmax'] = np.exp(
                            outputs['logits']) / np.sum(np.exp(
                                outputs['logits']),
                                                        axis=-1,
                                                        keepdims=True)
                        outputs['score'] = score_prediction(
                            outputs['softmax'], 1 / 3.)

                    return outputs['score']

                    ops.reset_default_graph()
        except Exception as e:
            print(e)

        if __name__ == '__main__':
            try:
                parser = argparse.ArgumentParser(
                    description='COVID-Net Lung Severity Scoring')
                parser.add_argument('--weightspath_geo',
                                    default='models/COVIDNet-SEV-GEO',
                                    type=str,
                                    help='Path to output folder')
                parser.add_argument('--weightspath_opc',
                                    default='models/COVIDNet-SEV-OPC',
                                    type=str,
                                    help='Path to output folder')
                parser.add_argument('--metaname',
                                    default='model.meta',
                                    type=str,
                                    help='Name of ckpt meta file')
                parser.add_argument('--ckptname',
                                    default='model',
                                    type=str,
                                    help='Name of model ckpts')
                parser.add_argument(
                    '--imagepath',
                    default='assets/severity_{}.jpeg'.format(msg_id),
                    type=str,
                    help='Full path to image to perfom scoring on')
                parser.add_argument(
                    '--input_size',
                    default=480,
                    type=int,
                    help='Size of input (ex: if 480x480, --input_size 480)')
                parser.add_argument('--top_percent',
                                    default=0.08,
                                    type=float,
                                    help='Percent top crop from top of image')

                args = parser.parse_args()

                x = process_image_file(args.imagepath, args.top_percent,
                                       args.input_size)
                x = x.astype('float32') / 255.0
                # check if models exists

                infer_geo = os.path.exists(
                    os.path.join(args.weightspath_geo, args.metaname))
                infer_opc = os.path.exists(
                    os.path.join(args.weightspath_opc, args.metaname))

                if infer_geo:
                    model_geo = MetaModel(
                        os.path.join(args.weightspath_geo, args.metaname),
                        os.path.join(args.weightspath_geo, args.ckptname))
                    output_geo = model_geo.infer(x)

                    print('Geographic severity: {:.3f}'.format(output_geo[0]))
                    Geographic_severity = '{:.3f}'.format(output_geo[0])
                    GS = float(Geographic_severity)
                    print(
                        'Geographic extent score for right + left lung (0 - 8): {:.3f}'
                        .format(output_geo[0] * 8))
                    Geographic_extent_score = '{:.3f}'.format(output_geo[0] *
                                                              8)
                    Geographic_extent_score = float(Geographic_extent_score)
                    print(
                        'For each lung: 0 = no involvement; 1 = <25%; 2 = 25-50%; 3 = 50-75%; 4 = >75% involvement.'
                    )

                if infer_opc:
                    model_opc = MetaModel(
                        os.path.join(args.weightspath_opc, args.metaname),
                        os.path.join(args.weightspath_opc, args.ckptname))
                    output_opc = model_opc.infer(x)

                    print('Opacity severity: {:.3f}'.format(output_opc[0]))
                    Opacity_severity = ('{:.3f}'.format(output_opc[0]))
                    OS = float(Opacity_severity)
                    print(
                        'Opacity extent score for right + left lung (0 - 6): {:.3f}'
                        .format(output_opc[0] * 6))
                    Opacity_extent_score = ('{:.3f}'.format(output_opc[0] * 6))
                    Opacity_extent_score = float(Opacity_extent_score)

                print(
                    'For each lung: 0 = no opacity; 1 = ground glass opacity; 2 =consolidation; 3 = white-out.'
                )
                print('**DISCLAIMER**')
                print(
                    'Do not use this prediction for self-diagnosis. You should check with your local authorities for the latest advice on seeking medical assistance.'
                )

                detection = {
                    'Geograph_and_Opacity': {
                        'Geographic_severity': ("%.2f" % (GS * 100)),
                        'Geographic_extent_score':
                        (round(Geographic_extent_score, 2)),
                        'Opacity_severity': ("%.2f" % (OS * 100)),
                        'Opacity_extent_score': (round(Opacity_extent_score,
                                                       2))
                    }
                }
            except Exception as e:
                my_logger.error('ERROR : ', e)
                print(e)

            try:
                if (not len(firebase_admin._apps)):
                    cred = credentials.Certificate(credential_json_file)
                    fa = firebase_admin.initialize_app(
                        cred, {
                            "databaseURL": databaseURL,
                            'storageBucket': storageBucket
                        })
                    fc = firebase_admin.firestore.client(fa)
                    db = firestore.client()
                    doc_ref = db.collection(
                        u'stripe_customers/{0}/results'.format(
                            userId)).document(currentTime)
                    doc_ref.update(detection)
                    my_logger.info(detection)
                    my_logger.info(
                        'Geograph : Saved in firestore & message Acknowledge')
                    message.ack()
                else:
                    # print('alredy initialize')
                    db = firestore.client()
                    doc_ref = db.collection(
                        u'stripe_customers/{0}/results'.format(
                            userId)).document(currentTime)
                    doc_ref.update(detection)
                    my_logger.info(detection)
                    my_logger.info(
                        'Geograph : Saved in firestore & message Acknowledge')
                    message.ack()

            except Exception as e:
                my_logger.error(
                    'Detection : NOT Saved , message Not Acknowledge')

            storage_client = store.Client.from_service_account_json(
                credential_json_file)
            log_bucket = storage_client.get_bucket('bucket-name')
            log_blob = log_bucket.blob('{0}/logs/{1}/severity-{2}'.format(
                userId, date, currentTime))
            log_blob.upload_from_filename('severity_{}.log'.format(msg_id))
            os.remove('./severity_{}.log'.format(msg_id))
            os.remove('./assets/severity_{}.jpeg'.format(msg_id))

            print("waiting for new message")
示例#6
0
#definiowane z palca normalnie przekwazywane w funkcji
testfile = args.testfile
testfolder = os.path.join(args.datadir, 'test')
#
from data import process_image_file
from sklearn.metrics import confusion_matrix

image_tensor = args.in_tensorname
pred_tensor = args.out_tensorname

y_test = []
pred = []
for i in range(len(testfile)):
    line = testfile[i].split()
    x = process_image_file(os.path.join(testfolder, line[1]), 0.08,
                           args.input_size)
    x = x.astype('float32') / 255.0
    y_test.append(mapping[line[2]])
    pred.append(np.array(model.predict(x)).argmax(axis=1))
y_test = np.array(y_test)
pred = np.array(pred)

matrix = confusion_matrix(y_test, pred)
matrix = matrix.astype('float')
#cm_norm = matrix / matrix.sum(axis=1)[:, np.newaxis]
print(matrix)
#class_acc = np.array(cm_norm.diagonal())
class_acc = [
    matrix[i, i] / np.sum(matrix[i, :]) if np.sum(matrix[i, :]) else 0
    for i in range(len(matrix))
]
示例#7
0
     def PubsubCallback(self,message):
        msg_id =  message.message_id
        filename = msg_id+'.log'

        my_logger = logging.getLogger(msg_id)
        my_logger.setLevel(logging.INFO)
        handler = logging.handlers.RotatingFileHandler(filename, maxBytes=20) # will create logger file
        my_logger.addHandler(handler)
        my_logger.info('Started with message_id : {}'.format(msg_id))

        sub_data = message.data.decode('utf-8') #decoding Pubsub message
        d = json.loads(sub_data)
        userId = d['userId']
        bucket = d['bucket']
        url = d['url']
        fileName = d['fileName']
        currentTime = d['currentTime']
        time = d['time']
        date = d['date']
        path = d['path']

        credential_json_file = ['file_name']
        databaseURL = ['databaseURL']
        storageBucket = ['storageBucket']

        if (not len(firebase_admin._apps)):
            cred = credentials.Certificate(credential_json_file)
            fa=firebase_admin.initialize_app(cred, {"databaseURL": databaseURL,'storageBucket':storageBucket})
            fc=firebase_admin.firestore.client(fa)
            db = firestore.client()
        blob = storage.bucket(storageBucket).blob(url)
        my_logger.info('images name after tagging with message_id :{}.jpeg'.format(msg_id))
        blob.download_to_filename('assets/{}.jpeg'.format(msg_id)) # tagging image with unique message id

        dirr = 'assets/{}'.format(msg_id)
        os.mkdir(dirr)
        my_logger.info('Processing model now')

        try:
            parser = argparse.ArgumentParser(description='COVID-Net Inference')
            parser.add_argument('--model', default="COVID-Net-Model.json", help="Path to model specification")
            parser.add_argument('--weightspath', default='models/COVIDNet-CXR-Large', type=str, help='Path to output folder')
            parser.add_argument('--metaname', default='model.meta', type=str, help='Name of ckpt meta file')
            parser.add_argument('--ckptname', default='model-8485', type=str, help='Name of model ckpts')
            parser.add_argument('--imagepath', default='assets/{}.jpeg'.format(msg_id), type=str, help='Full path to image to be inferenced')
            parser.add_argument('--in_tensorname', default='input_1:0', type=str, help='Name of input tensor to graph')
            parser.add_argument('--out_tensorname', default='dense_3/Softmax:0', type=str, help='Name of output tensor from graph')
            parser.add_argument('--input_size', default=224, type=int, help='Size of input (ex: if 480x480, --input_size 480)')
            parser.add_argument('--top_percent', default=0.08, type=float, help='Percent top crop from top of image')
            parser.add_argument('--outdir', default='assets/{}'.format(msg_id) , help="Output directory")
            parser.add_argument('--image_output_size', default=650, type=int, help='output heatmap image Size 650x650')

            args = parser.parse_args()
            model_info = json.load(open(args.model))
            mapping = {'normal': 0, 'pneumonia': 1, 'COVID-19': 2}
            inv_mapping = {0: 'normal', 1: 'pneumonia', 2: 'COVID-19'}

            sess = tf.compat.v1.Session()
            tf.compat.v1.get_default_graph()
            saver = tf.compat.v1.train.import_meta_graph(os.path.join(args.weightspath, args.metaname))
            saver.restore(sess, os.path.join(args.weightspath, args.ckptname))

            graph = tf.compat.v1.get_default_graph()

            image_tensor = graph.get_tensor_by_name(args.in_tensorname)
            pred_tensor = graph.get_tensor_by_name(args.out_tensorname)

            x = process_image_file(args.imagepath, args.top_percent, args.input_size)
            x = x.astype('float32') / 255.0
            pred = sess.run(pred_tensor, feed_dict={image_tensor: np.expand_dims(x, axis=0)})

            print('Prediction: {}'.format(inv_mapping[pred.argmax(axis=1)[0]]))
            print('Confidence')
            print('Normal: {:.3f}, Pneumonia: {:.3f}, COVID-19: {:.3f}'.format(pred[0][0], pred[0][1], pred[0][2]))
            print('**DISCLAIMER**')
            print('Do not use this prediction for self-diagnosis. You should check with your local authorities for the latest advice on seeking medical assistance.')

            N = pred[0][0] # Normal
            P = pred[0][1] # Pneumonia
            C = pred[0][2] # COVID-19

            result = ('{}'.format(inv_mapping[pred.argmax(axis=1)[0]]))

            confidence = {
                        'normal': str("%.2f"%(N*100)),
                        'pneumonia': str("%.2f"%(P*100)),
                        'covid':str("%.2f"%(C*100))
                    }

            detection = {'detections':
                    {'prediction':result,
                        'confidence' :confidence
                        }
                    }
            heatmap_path = path+"heatmap.png"
            heatmap_path = {'heatmap_path':heatmap_path}
            my_logger.info('finish successfully and message acknowledge')
            my_logger.info('Detection : {}'.format(detection))

        except BaseException as error:
            my_logger.error('{}'.format(error))
            my_logger.info('error occurred need to reprocess message NOT acknowledge')

        #heatmap Genegration
        try:
            classes = [0,1,2]
            targetLayer=model_info["final_conv_tensor"]
            outLayer=args.out_tensorname
            if targetLayer is None:
                tensor_names = [t.name for op in tf.get_default_graph().get_operations() for t in op.values() if
                           "save" not in str(t.name)]
                for tensor_name in reversed(tensor_names):
                    tensor = graph.get_tensor_by_name(tensor_name)
                    if len(tensor.shape) == 4:
                        target = tensor
            else:
                target = graph.get_tensor_by_name(targetLayer)
            results = {} # grads of classes with keys being classes and values being normalized gradients
            for classIdx in classes:
                one_hot = tf.sparse_to_dense(classIdx, [len(classes)], 1.0)
                signal = tf.multiply(graph.get_tensor_by_name(outLayer),one_hot)
                loss = tf.reduce_mean(signal)

                grads = tf.gradients(loss, target)[0]

                norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads)))+tf.constant(1e-5))

                results[classIdx] = norm_grads

            grads = results
            origin_im =process_image_file(args.imagepath, args.top_percent, args.image_output_size)

            size_upsample = (origin_im.shape[1],origin_im.shape[0]) # (w, h)
            output, grads_val = sess.run([target, grads[mapping[result]]], feed_dict={image_tensor: np.expand_dims(x, axis=0)})

            conv_layer_out = output[0]
            grads_val = grads_val[0]
            upsample_size = size_upsample

            weights = np.mean(grads_val, axis=(0,1))
            cam = np.zeros(conv_layer_out.shape[0:2], dtype=np.float32)
            for i, w in enumerate(weights):
                cam += w*conv_layer_out[:,:,i]
            cam = np.maximum(cam, 0)
            cam = cam/np.max(cam)
            cam = cv2.resize(cam, upsample_size)

            cam3 = np.expand_dims(cam, axis=2)
            cam3 = np.tile(cam3,[1,1,3])

            # Overlay cam on image
            cam3 = np.uint8(255*cam3)
            cam3 = cv2.applyColorMap(cam3, cv2.COLORMAP_JET)

            new_im = cam3*0.3 + origin_im*0.5

            im_name = args.imagepath.split("/")[-1]
            ext = im_name.split(".")[-1]

            # Save the GradCAM
            cv2.imwrite(os.path.join(args.outdir, 'heatmap.png'), new_im)
            print("GradCAM image is save in ", args.outdir)
            ops.reset_default_graph()
            my_logger.info('Heatmaps saved : {}'.format(args.outdir))

            message.ack()

        except Exception as e :
            my_logger.error('heatmaps : not saved',e)

        credential_json_file = ['file_name']
        databaseURL = ['databaseURL']
        storageBucket = ['storageBucket']
        try:
            if (not len(firebase_admin._apps)):
                cred = credentials.Certificate(credential_json_file)
                fa=firebase_admin.initialize_app(cred, {"databaseURL": databaseURL,'storageBucket':storageBucket})
                fc=firebase_admin.firestore.client(fa)
                db = firestore.client()
                blob = storage.bucket(storageBucket).blob('{}heatmap.png'.format(path))
                blob.upload_from_filename('assets/{0}/heatmap.png'.format(msg_id))
                my_logger.error('Heatmaps : Saved to Storage')
                doc_ref = db.collection(u'stripe_customers/{0}/results'.format(userId)).document(currentTime)
                doc_ref.update(detection)
                doc_ref.update(heatmap_path)
                my_logger.error('Detection : Saved to firestore')
            else:
                # print('alredy initialize')
                db = firestore.client()
                blob = storage.bucket(storageBucket).blob('{}heatmap.png'.format(path))
                blob.upload_from_filename('assets/{0}/heatmap.png'.format(msg_id))
                my_logger.error('Heatmaps : Saved to Storage')
                doc_ref = db.collection(u'stripe_customers/{0}/results'.format(userId)).document(currentTime)
                doc_ref.update(detection)
                doc_ref.update(heatmap_path)
                my_logger.error('Detection : Saved to firestore')

        except Exception as e :
            my_logger.error('Detection : NOT Saved',e)

        storage_client = store.Client.from_service_account_json(credential_json_file)
        log_bucket = storage_client.get_bucket('logs_bucket')
        log_blob = log_bucket.blob('{0}/logs/{1}/{2}'.format(userId,date,currentTime))
        log_blob.upload_from_filename('{}.log'.format(msg_id))
        #print(msg_id)
        os.remove('./{}.log'.format(msg_id))
        os.remove('./assets/{}.jpeg'.format(msg_id))
        shutil.rmtree('./assets/{}'.format(msg_id))
        print("end of pubsubcallback")
示例#8
0
def eval(sess, graph, testfile, testfolder, input_tensor, output_tensor,
         input_size):
    image_tensor = graph.get_tensor_by_name(input_tensor)
    pred_tensor = graph.get_tensor_by_name(output_tensor)

    y_test = []
    pred = []

    normal = []
    pneumonia = []
    covid = []
    """I ADD MY COD"""
    pred1 = []
    """END OF MY CODE"""

    for i in range(len(testfile)):
        line = testfile[i].split()
        x = process_image_file(os.path.join(testfolder, line[1]), 0.08,
                               input_size)
        x = x.astype('float32') / 255.0
        y_test.append(mapping[line[2]])
        res = np.array(
            sess.run(pred_tensor,
                     feed_dict={image_tensor:
                                np.expand_dims(x, axis=0)})).argmax(axis=1)
        if res == 0:
            normal.append(line)
        elif res == 1:
            pneumonia.append(line)
        else:
            covid.append(line)
        pred.append(np.array(res))
        #"""I ADD MY COD"""
        # get the probability of each class
        #pred1.append(np.array(sess.run(pred_tensor, feed_dict={image_tensor: np.expand_dims(x, axis=0)})))
        #"""END OF MY CODE"""
    pred = np.array(pred)
    y_test = np.array(y_test)

    normal = pd.DataFrame(np.matrix(normal))
    pneumonia = pd.DataFrame(np.matrix(pneumonia))
    covid = pd.DataFrame(np.matrix(covid))
    normal.to_csv("normal.txt",
                  sep=" ",
                  columns=None,
                  header=False,
                  index=False)
    pneumonia.to_csv("pneunomia.txt",
                     sep=" ",
                     columns=None,
                     header=False,
                     index=False)
    covid.to_csv("covid.txt", sep=" ", columns=None, header=False, index=False)
    #pred = np.array(pred)

    #"""I ADD MY COD"""
    #pred1 = np.array(pred1).reshape(-1, 3)
    #"""END OF MY CODE"""

    matrix = confusion_matrix(y_test, pred)
    matrix = matrix.astype('float')
    #cm_norm = matrix / matrix.sum(axis=1)[:, np.newaxis]
    print(matrix)
    #class_acc = np.array(cm_norm.diagonal())

    #""" I ADD MY CODE """

    #print(f"Accuracy of model: {accuracy_score(y_test, pred)}")

    #class_spec = [(np.sum(matrix) - np.sum(matrix[i, :]) - np.sum(matrix[:, i]) + matrix[i, i]) / (
    #            np.sum(matrix) - np.sum(matrix[i, :])) if np.sum(matrix[i, :]) else 0 for i in range(len(matrix))]

    #print('Spec Normal: {0:.3f}, Pneumonia: {1:.3f}, COVID-19: {2:.3f}'.format(class_spec[0],
    #                                                                           class_spec[1],
    #                                                                           class_spec[2]))

    print(f"Built in methood\n {classification_report(y_test, pred)}")