예제 #1
0
def predict(ns, cluster_ip):
    data = get_image_data()
    images = preprocess_input(data)

    payload = {
        "instances": [images[0].tolist()]
    }

    #     file_path = "./dogs_image.json"
    #     with open(file_path, 'w') as outfile:
    #         json.dump(payload, outfile)
    #     print("printed")

    # sending post request to TensorFlow Serving server
    headers = {'Host': 'imagenet.' + ns + '.' + cluster_ip + '.xip.io'}
    print(headers)
    url = PREDICT_TEMPLATE.format(cluster_ip)
    print("Calling ", url)
    r = requests.post(url, json=payload, headers=headers)
    resp_json = json.loads(r.content.decode('utf-8'))
    preds = np.array(resp_json["predictions"])
    label = decode_predictions(preds, top=1)

    plt.imshow(data[0])
    plt.title(label[0])
    plt.show()
예제 #2
0
def upload_image():
    # check if the post request has the file part
    if 'image' not in request.files:
        return jsonify(
            {'error': 'No posted image. Should be attribute named image.'})
    file = request.files['image']

    # if user does not select file, browser also
    # submit a empty part without filename
    if file.filename == '':
        return jsonify({'error': 'Empty filename submitted.'})
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        #file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        x = []
        ImageFile.LOAD_TRUNCATED_IMAGES = False
        img = Image.open(BytesIO(file.read()))
        img.load()
        img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.ANTIALIAS)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        x = x[:, :, :, 0:3]
        pred = model.predict(x)
        lst = decode_predictions(pred, top=5)

        items = []
        for itm in lst[0]:
            items.append({'name': itm[1], 'prob': float(itm[2])})

        response = {'pred': items}
        print(response)
        return jsonify(response)
    else:
        return jsonify({'error': 'File has invalid extension'})
def upload_image():
    if 'image' not in request.files:
        return render_template('ImageML.html', prediction='No posted image. Should be attribute named image')
    file = request.files['image']
    
    if file.filename =='':
        return render_template('ImageML.html', prediction = 'You did not select an image')
    
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        print("***"+filename)
        x = []
        ImageFile.LOAD_TRUNCATED_IMAGES = False
        img = Image.open(BytesIO(file.read()))
        img.load()
        img  = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.ANTIALIAS)
        x  = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x  = preprocess_input(x)
        pred = model.predict(x)
        lst =  decode_predictions(pred, top=3)
        
        items = []
        for item in lst[0]:
            items.append({'name': item[1], 'prob': float(item[2])})
        
        response = {'pred': items}
        return render_template('ImageML.html', prediction = 'I would say the image is most likely {}'.format(response))
    else:
        return render_template('ImageML.html', prediction = 'Invalid File extension')
예제 #4
0
def explain(
    img_url=None,
    cluster_ip=None,
    hostname=None,
    model_name=None,
):
    resp_pred, _ = request(
        img_url=img_url,
        cluster_ip=cluster_ip,
        hostname=hostname,
        model_name=model_name,
        op='predict',
    )
    resp = json.loads(resp_pred.content.decode('utf-8'))
    # label = resp['pred_decode']
    label = decode_predictions(resp, top=1)
    print(label)
    response, op, input_filename = request(
        img_url=img_url,
        cluster_ip=cluster_ip,
        hostname=hostname,
        model_name=model_name,
        op='explain',
    )
    # decode_response(response, op)
    # save_raw_response(response, op, input_filename)
    msg = get_content_only(response, op)
    print(msg)
    return msg
예제 #5
0
def classify(arg):
    #'./data/people',
    data,sample = arg.split('/')
    path = os.path.join(data,sample)
    print(path)
    dirs = os.listdir(path)
    num_test=4
    classified_list={}
    model = MobileNet(weights='imagenet')
    if os.path.exists(os.path.join('./cloth_label.txt')):
        f = open('./cloth_label.txt','r')
        categories = f.read().split()
        f.close()
    else :
        return False
    print(categories)
    for category in categories:
        classified_list[category]=0

    for dir in dirs :
        dir_path = os.path.join(data,sample,dir)
        files = os.listdir(dir_path)
        for file in files:
            name,extension = os.path.splitext(os.path.join(dir_path,file))
            if extension=='.jpg' or extension=='.png' or extension=='.jpeg':     
                src = os.path.join(dir_path,file)
                print("src : "+src)
                img = image.load_img(src,target_size=(224,224))
                x = image.img_to_array(img)
                x = np.expand_dims(x,axis=0)
                x=preprocess_input(x)
                preds = model.predict(x)
                labels = decode_predictions(preds,top=num_test)[0]
                flag =False
                
                for label in labels:
                    label = list(label)
                    #print(classified_list.get(label[1]))
                    if classified_list.get(label[1])!=None:
                        """ save_dir='./cloth_and_people'
                        shutil.move(src,dst) """
                        """ if not(os.path.isdir(os.path.join(save_dir))):
                            os.makedirs(os.path.join(save_dir))
                        dst = os.path.join(save_dir,file) """
                        flag=True
                        break
                if not(flag):
                    label = 'etc'
                    new_filename = 'etc_'+file
                    print(new_filename)
                    os.rename(os.path.join(dir_path,file),os.path.join(dir_path,new_filename))
                    """ save_dir='./etc' """
                    """ if dir =='./data/cloth_and_people':
                        save_dir = 'etc_'+str(num_test)
                    else:
                        save_dir='etc_'+str(num_test)+'_etc' """
                    """ if not(os.path.exists(save_dir)):
예제 #6
0
 def classify(self, img_storage):
     byte_storage = BytesIO(img_storage.read())
     byte_storage.seek(0)
     img = Image.open(byte_storage)
     img = img.resize((224, 224))
     x = img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     predictions = self.model.predict(x)
     return decode_predictions(predictions)
예제 #7
0
def upload_image():
    #Do sanity checks
    #Check whether post req has a file which has an img attribute
    if 'image' not in request.files:
        return render_template(
            'ImageML.html',
            prediction=
            'No posted image. Should contain an attribute named image.')

    file = request.files['image']

    if file.filename == '':
        return render_template('ImageML.html',
                               prediction='You did not select an image.')

    if file and allowed_file(file.filename):
        filename = secure_filename(
            file.filename)  #rename file name in case there are weird chars
        print("***" + filename)

        #Preprocessing Steps
        x = []
        ImageFile.LOAD_TRUNCATED_IMAGES = False
        img = Image.open(BytesIO(file.read()))
        img.load()
        # Need right size
        img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT), Image.ANTIALIAS)
        # convert img to array
        x = image.img_to_array(img)
        # Expand the dims
        x = np.expand_dims(
            x, axis=0)  # So that the array shape starts with (1,...)
        x = preprocess_input(x)
        pred = model.predict(x)
        lst = decode_predictions(pred, top=3)

        items = []
        for item in lst[0]:
            items.append({'name': item[1], 'prob': float(item[2])})

        response = {'pred': items}

        return render_template(
            'ImageML.html',
            prediction='The image is most likely {}'.format(response))

    else:
        return render_template('ImageML.html',
                               prediction='Invalid file extension.')
def predict():
    if request.method == 'POST':
        # Get the image from post request
        img = base64_to_pil(request.json)

        # Make prediction
        preds = model_predict(img, model)

        # Process your result for human
        pred_proba = "{:.3f}".format(np.amax(preds))
        pred_class = decode_predictions(preds, top=1)

        result = str(pred_class[0][0][1])
        # result = result.replace('_', ' ').capitalize()

        # Serialize the result, you can add additional fields
        return jsonify(result=result, probability=pred_proba)

    return None
예제 #9
0
def predict(model_name, namespace, cluster_ip):
    data = get_image_data()
    images = preprocess_input(data)

    payload = {"instances": [images[0].tolist()]}

    # sending post request to TensorFlow Serving server
    headers = {
        'Host': model_name + '.' + namespace + '.' + cluster_ip + '.xip.io'
    }
    print(headers)
    url = PREDICT_TEMPLATE.format(cluster_ip)
    print("Calling ", url)
    r = requests.post(url, json=payload, headers=headers)
    resp_json = json.loads(r.content.decode('utf-8'))
    preds = np.array(resp_json["predictions"])
    label = decode_predictions(preds, top=1)

    plt.imshow(data[0])
    plt.title(label[0])
    plt.show()
예제 #10
0
def run_trt_model(model_name):

    model_path =  f"{model_name}.onnx"
    img_path = "elephhant2.jpeg"

    engine = build_engine(model_path)
    context = engine.create_execution_context()

    # in_cpu, out_cpu, in_gpu, out_gpu, stream = alloc_buf(engine)
    h_input, h_output, d_input, d_output, stream = alloc_buf(engine)

    load_input(img_path, h_input)

    t1 = time.time()
    # load_input("elephant.png",h_input)
    res = inference(context, h_input, h_output, d_input, d_output, stream)

    out = '{} Time : {} {} - Predicted: {} \n'.format(model_name, time.time() - t1, img_path,
                                                      decode_predictions(res.reshape(1, -1), top=3)[0][0][1])
    with open("trt_eport.txt", 'a+') as fin:
        fin.write(out)
예제 #11
0
def decode_response(response, op):
    if response.status_code == 200:
        resp = json.loads(response.content.decode('utf-8'))

        if op == 'predict':
            preds = np.array(resp["predictions"])
            # label = resp['pred_decode']
            label = decode_predictions(preds, top=1)

            SAVEPATH = 'output_predict.jpg'
            show_img(preds, title=str(label), savepath=SAVEPATH)
            print("{}: result saved: {}".format(op, SAVEPATH))

        elif op == 'explain':

            model_input_img = resp['raw']['instance']
            model_output = resp['raw']['prediction']
            # pred = model_output['predictions']
            # label = model_output['pred_decode'][1:]
            label = model_output
            # label = decode_predictions(model_output['predictions'], top=1)[1:]

            explainer_typ = resp['meta']['name']
            exp_superpixels = resp['anchor']
            exp_segment_grid = resp['segments']
            exp_tried_seg = resp['raw']['examples'][0]['covered']
            exp_scores = {
                k: v
                for k, v in resp.items() if k in ['coverage', 'precision']
            }

            # show_img(model_input_img, savepath=None)
            # show_img(exp_superpixels, savepath=None)
            # show_img(exp_segment_grid, savepath=None)

            show_img(resp['segments'])

            explain_score = 'c: {0}, p: {1}'.format(resp['coverage'],
                                                    resp['precision'])
            img_labels = [
                '\n'.join(['input', str(label)]),
                '\n'.join(['superpixel', explain_score]),
                '\n'.join(['segment_grid', explain_score]),
            ]
            img_outputs = [model_input_img, exp_superpixels, exp_segment_grid]

            fig, axes = plt.subplots(nrows=1, ncols=3)
            for ax, _img, _label in zip(axes.ravel(), img_outputs, img_labels):
                ax.set_axis_off()
                ax.imshow(_img, aspect='equal')
                ax.set_title(_label)

            SAVEPATH = 'output_explain.jpg'
            fig.savefig(SAVEPATH)
            print("{}: result saved: {}".format(op, SAVEPATH))

            # # (Optional) Tried segments
            # fig, axes = plt.subplots(nrows=10, ncols=1, figsize=(20*10, 20))
            # for _, ax in zip(resp['raw']['examples'][0]['covered'], axes.ravel()):
            #     ax.imshow(_, aspect='equal')
            #     ax.set_axis_off()
            #     fig.tight_layout(rect=[0, 0.02, 1, 0.97])
            #     fig.suptitle('tried_segments', y=0.98, horizontalalignment='center')

    else:
        print("Received response code and content", response.status_code,
              response.content)
예제 #12
0
import tensorflow as tf
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2 as Net
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions
import numpy as np
import time

model = Net(weights='imagenet')

img_path = 'images/grace_hopper.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
# decode the results into a list of tuples (class, description, probability)
# (one such list for each sample in the batch)
print('Predicted:', decode_predictions(preds, top=3)[0])

times = []
for i in range(20):
    start_time = time.time()
    preds = model.predict(x)
    delta = (time.time() - start_time)
    times.append(delta)
mean_delta = np.array(times).mean()
fps = 1 / mean_delta
print('average(sec):{:.2f},fps:{:.2f}'.format(mean_delta, fps))
예제 #13
0
import numpy as np
import cv2
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions

#include_top=True,完整的模型
#include_top=False,去掉最后的3个全连接层,用来做fine-tuning专用,专门开源了这类模型。
model = MobileNet(weights='imagenet')
print(model.summary())

img_path = "elephant.jpg"
img = image.load_img(img_path, target_size=(224, 224))
#将输入数据转换为0~1之间
img = image.img_to_array(img) / 255.0
# 为batch添加第四维,axis=0表示在0位置添加,因为MobileNet的Iput层结构是(None,224,224,3)
img = np.expand_dims(img, axis=0)
print(img.shape)

predictions = model.predict(img)
print('Predicted:', decode_predictions(predictions, top=3)[0])
print(predictions)

description = decode_predictions(predictions, top=3)[0][0][1]

src = cv2.imread(img_path)
cv2.putText(src, description, (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
            (255, 0, 0), 2)
cv2.imshow("Predicted", src)
cv2.waitKey()
예제 #14
0
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import tag_constants
from tensorflow.keras.applications.mobilenet import preprocess_input, decode_predictions

def preprocess(img_path, img_size):
    img = cv2.imread(img_path, cv2.IMREAD_COLOR)[:, :, ::-1]
    img = np.float32(img)/255.0
    img = cv2.resize(img, (img_size, img_size))
    return img

# 讀進照片並進行前處理
img_path = 'tabby_tiger_cat.jpg'
img_size = 224
img = preprocess(img_path, img_size)

input_saved_model = 'mobilenet_saved_model_TFTRT_FP16'
saved_model_loaded = tf.saved_model.load(
	input_saved_model,
    tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
print(signature_keys)

infer = saved_model_loaded.signatures['serving_default']
print(infer.structured_outputs)
keys = list(infer.structured_outputs.keys())

preds = infer(img)[keys[0]].numpy()
print('Predicted: {}'.format(decode_predictions(preds, top=1)[0]))

예제 #15
0
from tensorflow.keras.applications.mobilenet import MobileNet, decode_predictions

mobile = MobileNet()
# mobile.summary()

import cv2
import time


img = cv2.imread('./img/bird.jpg', -1)
img = cv2.resize(img, (224, 224))

start = time.time()
yhat = mobile.predict(img.reshape(-1, 224, 224, 3))
time = time.time() - start
# label_key = np.argmax(yhat)
label = decode_predictions(yhat)
label = label[0][0]

print("테스트 시 소요 시간 : {}".format(time))
print('%s (%.2f%%)' % (label[1], label[2]*100))
img = img[:,:,::-1]
plt.figure(figsize=(11,11))
plt.imshow(img)
plt.axis("off")
plt.show()
예제 #16
0
    def set_model(self, model_name, top_n=5):
        if model_name == 'densenet':
            self.model = densenet.DenseNet121(include_top=True,
                                              weights='imagenet',
                                              input_tensor=None,
                                              input_shape=None,
                                              pooling=None,
                                              classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: densenet.decode_predictions(x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1608.06993' target='_blank'>
                Densely Connected Convolutional Networks</a> (CVPR 2017 Best Paper Award)</li>
                </ul>
                """

        elif model_name == 'inception_resnet_v2':
            self.model = inception_resnet_v2.InceptionResNetV2(
                include_top=True,
                weights='imagenet',
                input_tensor=None,
                input_shape=None,
                pooling=None,
                classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_resnet_v2.decode_predictions(
                x, top=top_n)
            self.ref = """
                <ul>
                <li><a href='https://arxiv.org/abs/1602.07261' target='_blank'>
                Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning</a></li>
                </ul>
                """

        elif model_name == 'inception_v3':
            self.model = inception_v3.InceptionV3(include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  input_shape=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: inception_v3.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1512.00567' target='_blank'>
                Rethinking the Inception Architecture for Computer Vision</a></li>
                </ul>
                """

        elif model_name == 'mobilenet':
            self.model = mobilenet.MobileNet(input_shape=None,
                                             alpha=1.0,
                                             depth_multiplier=1,
                                             dropout=1e-3,
                                             include_top=True,
                                             weights='imagenet',
                                             input_tensor=None,
                                             pooling=None,
                                             classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1704.04861' target='_blank'>
                MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications</a></li>
                </ul>
                """

        elif model_name == 'mobilenet_v2':
            self.model = mobilenet_v2.MobileNetV2(input_shape=None,
                                                  alpha=1.0,
                                                  include_top=True,
                                                  weights='imagenet',
                                                  input_tensor=None,
                                                  pooling=None,
                                                  classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: mobilenet_v2.decode_predictions(x,
                                                                     top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1801.04381' target='_blank'>
                MobileNetV2: Inverted Residuals and Linear Bottlenecks</a></li>
                </ul>
                """

        elif model_name == 'nasnet':
            self.model = nasnet.NASNetLarge(input_shape=None,
                                            include_top=True,
                                            weights='imagenet',
                                            input_tensor=None,
                                            pooling=None,
                                            classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: nasnet.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li><a href='https://arxiv.org/abs/1707.07012' target='_blank'>
                Learning Transferable Architectures for Scalable Image Recognition</a></li>
                </ul>
                """

        elif model_name == 'resnet50':
            self.model = resnet50.ResNet50(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: resnet50.decode_predictions(x, top=top_n)
            self.ref = """<ul>
                <li>ResNet : 
                <a href='https://arxiv.org/abs/1512.03385' target='_blank'>Deep Residual Learning for Image Recognition
                </a></li>
                </ul>
                """

        elif model_name == 'vgg16':
            self.model = vgg16.VGG16(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg16.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>
            Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'vgg19':
            self.model = vgg19.VGG19(include_top=True,
                                     weights='imagenet',
                                     input_tensor=None,
                                     input_shape=None,
                                     pooling=None,
                                     classes=1000)
            self.target_size = (224, 224)
            self.decoder = lambda x: vgg19.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1409.1556' target='_blank'>Very Deep Convolutional Networks for Large-Scale Image Recognition</a></li>
            </ul>"""

        elif model_name == 'xception':
            self.model = xception.Xception(include_top=True,
                                           weights='imagenet',
                                           input_tensor=None,
                                           input_shape=None,
                                           pooling=None,
                                           classes=1000)
            self.target_size = (299, 299)
            self.decoder = lambda x: xception.decode_predictions(x, top=top_n)
            self.ref = """<ul>
            <li><a href='https://arxiv.org/abs/1610.02357' target='_blank'>Xception: Deep Learning with Depthwise Separable Convolutions</a></li>
            </ul>"""

        else:
            logger.ERROR('There has no model name !!!')
예제 #17
0
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.applications.mobilenet import preprocess_input
from tensorflow.keras.applications.mobilenet import decode_predictions

# 建立 MobileNet 模型
model = MobileNet(weights="imagenet", include_top=True)
# 載入測試圖片
img = load_img("koala.png", target_size=(224, 224))
x = img_to_array(img)  # 轉換成 Numpy陣列
print("x.shape: ", x.shape)
# Reshape (1, 224, 224, 3)
img = x.reshape((1, x.shape[0], x.shape[1], x.shape[2]))
# 資料預處理
img = preprocess_input(img)
print("img.shape: ", img.shape)
# 使用模型進行預測
Y_pred = model.predict(img)
# 解碼預測結果
label = decode_predictions(Y_pred)
result = label[0][0]  # 取得最可能的結果
print("%s (%.2f%%)" % (result[1], result[2] * 100))
예제 #18
0
image_path = IMAGES_DIR / 'lena.jpg'
image_path = IMAGES_DIR / 'military-raptor.jpg'  # warplane
image_path = IMAGES_DIR / 'baboon.png'  # baboon
image_path = IMAGES_DIR / 'fighter_jet.jpg'  # warplane
image_path = IMAGES_DIR / 'watch.png'  # stopwatch
image_path = IMAGES_DIR / 'bear.tif'  # brown_bear
image_path = IMAGES_DIR / 'wild_flowers.tif'  # greenhouse
image_path = IMAGES_DIR / 'elephant.jpg'  # tusker

print(image_path)
# read image in RGB format
image = imageio.imread(str(image_path))
print(image.shape)
image = vision.resize(image, 224, 224)
batch = expand_to_batch(image)
batch = preprocess_input(batch)

b_model = model_mobilenet(weights="imagenet")
b_y = b_model.predict(batch)
b_label = decode_predictions(b_y)
b_label = b_label[0][0]
# print the classification
print('OUR: %s (%.2f%%)' % (b_label[1], b_label[2] * 100))

a_model = MobileNet(weights="imagenet")
a_y = a_model.predict(batch)
a_label = decode_predictions(a_y)
a_label = a_label[0][0]
# print the classification
print('KERAS: %s (%.2f%%)' % (a_label[1], a_label[2] * 100))
예제 #19
0
#These models are developed on powerful computers so we may as well use them for transfer learning
#For VGG16 the images need to be 224x224. 
from keras.preprocessing.image import load_img
image = load_img('images/cab.jpg', target_size=(224, 224))

#Convert pixels to Numpy array                                        
from keras.preprocessing.image import img_to_array
image = img_to_array(image)

# Reshape data for the model. VGG expects multiple images of size 224x224x3, 
#therefore the input shape needs to be (1, 224, 224, 3)
#image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
import numpy as np
image = np.expand_dims(image, axis=0)

#Data needs to be preprocessed same way as the training dataset, to get best results
#preprocessing from Keras does this job. 
#Notice the change in pixel values (Preprocessing subtracts mean RGB value of training set from each pixel)
from keras.applications.vgg16 import preprocess_input
image = preprocess_input(image)


# predict the probability across all output categories.
#Probability for each of the 1000 classes will be calculated.
pred = model.predict(image)

#Print the probabilities of the top 5 classes
from tensorflow.keras.applications.mobilenet import decode_predictions
pred_classes = decode_predictions(pred, top=5)
for i in pred_classes[0]:
    print(i)