Пример #1
0
def compute_saliency(model,
                     guided_model,
                     img_path,
                     preprocessed_input,
                     layer_name='block5_conv3',
                     cls=-1,
                     visualize=True,
                     save=True):
    """Compute saliency using all three approaches.
        -layer_name: layer to compute gradients;
        -cls: class number to localize (-1 for most probable class).
    """
    #preprocessed_input = load_image(img_path)

    predictions = model.predict(preprocessed_input)
    top_n = 5
    top = decode_predictions(predictions, top=top_n)[0]
    classes = np.argsort(predictions[0])[-top_n:][::-1]
    print('Model prediction:')
    for c, p in zip(classes, top):
        print('\t{:15s}\t({})\twith probability {:.3f}'.format(p[1], c, p[2]))
    if cls == -1:
        cls = np.argmax(predictions)
    class_name = decode_predictions(np.eye(1, 1000, cls))[0][0][1]
    print("Explanation for '{}'".format(class_name))

    gradcam = grad_cam(model, preprocessed_input, cls, layer_name)
    gb = guided_backprop(guided_model, preprocessed_input, layer_name)
    guided_gradcam = gb * gradcam[..., np.newaxis]

    if save:
        jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)
        jetcam = (np.float32(jetcam) +
                  load_image(img_path, H, W, preprocess=False)) / 2
        cv2.imwrite('gradcam.jpg', np.uint8(jetcam))
        cv2.imwrite('guided_backprop.jpg', deprocess_image(gb[0]))
        cv2.imwrite('guided_gradcam.jpg', deprocess_image(guided_gradcam[0]))

    if visualize:
        plt.figure(figsize=(15, 10))
        plt.subplot(131)
        plt.title('GradCAM')
        plt.axis('off')
        plt.imshow(load_image(img_path, H, W, preprocess=False))
        plt.imshow(gradcam, cmap='jet', alpha=0.5)

        plt.subplot(132)
        plt.title('Guided Backprop')
        plt.axis('off')
        plt.imshow(np.flip(deprocess_image(gb[0]), -1))

        plt.subplot(133)
        plt.title('Guided GradCAM')
        plt.axis('off')
        plt.imshow(np.flip(deprocess_image(guided_gradcam[0]), -1))
        plt.show()

    return gradcam, gb, guided_gradcam
Пример #2
0
def confront_prediction(file):
    #加载图片并进行处理,使之符合网络的输入格式
    img = image.load_img(file, target_size=(299, 299))  #加载猫的照片
    input_image = image.img_to_array(img)  #将其转换为数组(299,299,3)
    # 归一化图片到[-1,1.]
    input_image /= 255.  #变为0-1
    input_image -= 0.5  #变为-0.5—0.5
    input_image *= 2.  #变为-1—1
    # 为图片添加第四维(batch),使之符合模型的输入
    input_image = np.expand_dims(
        input_image,
        axis=0)  #用于扩展数组的形状,axis=0表示在0的地方扩展形状,将(299,299,3)变为(1,299,299,3)

    # model = inception_v3.InceptionV3(weights='imagenet')#从网络加载
    # model = load_model("static/trained_models/inception_v3_on_imageNet.h5")#从本地加载
    #进行预测,并输出预测结果
    global graph
    with graph.as_default():
        predictions = model.predict(input_image)  #使用模型进行预测
        predicted_classes = inception_v3.decode_predictions(
            predictions, top=1)  #top=1表示输出概率最高的前1个类别
    # print(predicted_classes)#查看输出的结果
    imagenet_id, name, confidence = predicted_classes[0][0]  #取概率最高的预测类别的信息
    # print("This is a {0} with {1:.4}% confidence!".format(name, confidence * 100))
    return "This is a {0}!".format(name)
Пример #3
0
def predict_from_saved_model(model_path, image_path, decode_fn, top=5):
    predict_fn = predictor.from_saved_model(export_dir=model_path)

    with open(image_path, 'rb') as f:
        b64_x = f.read()
    b64_x = base64.urlsafe_b64encode(b64_x)
    input_instance = {'inputs': [b64_x]}

    preds = predict_fn(input_instance)['outputs'][0]
    if decode_fn == 'inception':
        preds = np.expand_dims(preds, 0)
        print('Predicted:')
        for p in decode_predictions(preds, top=5)[0]:
            print("Score {}, Label {}".format(p[2], p[1]))
    elif decode_fn == 'transfert':
        preds = [(fruit_mapping[idx], pred) for idx, pred in enumerate(preds)]
        preds = sorted(preds, key=lambda pred: pred[1], reverse=True)
        if top:
            preds = preds[:top]
        print('Predicted:')
        for pred in preds:
            print("Score {}, Label {}".format(pred[1], pred[0]))
    else:
        print('Predicted scores:')
        print(preds)
Пример #4
0
def predict(image_file):
    img = image.load_img(image_file, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    global graph
    with graph.as_default():
        preds = model.predict(x)
    top3 = decode_predictions(preds, top=3)[0]
    answer = dict(zip(('label', 'description', 'probability'), top3[0]))
    print(answer)
    predictions = [{
        'label': label,
        'description': description,
        'probability': round(probability, 2)
    } for label, description, probability in top3]
    return {
        'answer': {
            'label': answer['label'],
            'description': answer['description'],
            'probability': round(answer['probability'], 2)
        },
        'top3': predictions
    }
Пример #5
0
def imagelabel2json(tmpfile, model):
    print("画像解析開始")

    img = image.load_img(tmpfile, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)

    recognize = decode_predictions(preds)
    en_label = recognize[0][0][1]
    label = en_label
    score = str(recognize[0][0][2])

    with open(IMAGENET_JSON_PATH, 'r') as f:
        obj = json.load(f)
        for i in obj:
            if i['en'] == en_label:
                label = i['ja']
                break

    dict = {'label': label, 'score': score}

    f = open(LABEL_JSON_PATH, 'w')
    json.dump(dict, f)

    print("画像解析終了")
Пример #6
0
def classify(img):
    arr = preprocess_input(np.expand_dims(img.astype(np.float32), axis=0))
    preds = model.predict(arr)
    _, lab, p = decode_predictions(preds, top=3)[0][0]
    if p < 0.5:
        lab = ''
    return lab
Пример #7
0
    def identifica(self, nomeImagem):
        """ [Identificação de imagem por rede neural]
        
            Arguments:
                nomeImagem {[string]} -- [nome da imagem a ser identificada pela rede neural]
            
            Returns:
                [Lista] -- [Contendo as 5 identificaçoes encontrada da possibilidade 
                            de reconhecimento da imagem recebida.]
        """

        # Rede neural identificando imagem.
        ar_X = image.img_to_array(image.load_img(nomeImagem, target_size=(299, 299))) 

        # Alterando os valores da matrix de um ranger de 0-255 para -1 a 1.
        ar_X /= 255
        ar_X -= 0.5
        ar_X *= 2

        # aplicando reshape.
        ar_X = ar_X.reshape([1, ar_X.shape[0], ar_X.shape[1], ar_X.shape[2]])
        ar_Y = self.iv3.predict(ar_X)

        # apos processamento retorna a classificação processada.
        resultado = decode_predictions(ar_Y)
        self.X = np.copy(ar_X)

        return resultado
Пример #8
0
def predict(model, img_names, target_size, top_n=3):
  """Run model prediction on image
  Args:
    model: keras model
    img: PIL format image
    target_size: (w,h) tuple
    top_n: # of top predictions to return
  Returns:
    list of predicted labels and their probabilities
  """
  xs = []
  for img_name in img_names:
      img = Image.open(img_name)
      if img.size != target_size:
        img = img.resize(target_size)
      x = image.img_to_array(img)
      xs.append(x)

  xs = np.asarray(xs)
  xs = preprocess_input(xs)
  preds = model.predict(xs)
  res = decode_predictions(preds, top=top_n)

  for i in range(len(img_names)):
      print img_names[i]
      print res[i]
      print 
Пример #9
0
def main():
    base_model = load_base_model('ResNet50', None)
    # Use correct image preprocessing for model
    if base_model.name == ('inception_v3'):
        preprocess_input = inception_v3_preprocess_input
    else:
        preprocess_input = preprocess_input_wrapper
    # Rewrite this bad boy
    waitForIn = threading.Thread(target=waitForTerminate)
    waitForIn.start()
    while True:
        images, filenames = load_images('image', 224)
        if len(filenames) == 0:
            if kill:
                print("The user terminated the program\n")
                exit(0)
            time.sleep(2)
        else:
            if kill:
                print("The user terminated the program\n")
                exit(0)
            preds = base_model.predict(images)
            print()
            print(filenames[0])
            print()
            print('Predicted:', decode_predictions(preds, top=10))
            os.remove('image/' + filenames[0])
Пример #10
0
    def predict(self, model, image_path):
        '''
            inputs:
                image_paths: list of image paths or can be a string too (for single image)
        '''

        image = load_img(image_path, target_size=self.image_size)
        image = img_to_array(image)
        image = image.reshape(
            (1, image.shape[0], image.shape[1], image.shape[2]))
        image = preprocess_input(image)
        model_preds = model.predict(image)
        sort_index_preds = numpy.flip(numpy.argsort(model_preds))[0]

        accumulate = 0.
        res = []
        if self.model == 'InceptionV3':
            pr = decode_predictions(model_preds, top=int(self.max_items))[0]
            accumulate = 0.
            for i in pr:
                if i[2] < self.min_threshold:
                    return res, accumulate
                accumulate += i[2]
                res.append(i[1])
                if accumulate > self.threshold:
                    return res, accumulate
            return res, accumulate
        for i in sort_index_preds:
            if model_preds[0][i] < self.min_threshold:
                return res, accumulate
            accumulate += model_preds[0][i]
            res.append(self.categories[i])
            if accumulate > self.threshold:
                return res, accumulate
def my_detection(filename):
    # Load pre-trained image recognition model
    model = inception_v3.InceptionV3()

    # Load the image file and convert it to a numpy array
    img = image.load_img(filename, target_size=(299, 299))
    input_image = image.img_to_array(img)
    print(input_image.shape)

    # Scale the image so all pixel intensities are between [-1, 1] as the model expects
    input_image /= 255.
    input_image -= 0.5
    input_image *= 2.

    # Add a 4th dimension for batch size (as Keras expects)
    input_image = np.expand_dims(input_image, axis=0)

    # Run the image through the neural network
    print(input_image.shape)
    predictions = model.predict(input_image)

    # Convert the predictions into text and print them
    predicted_classes = inception_v3.decode_predictions(predictions, top=1)
    imagenet_id, name, confidence = predicted_classes[0][0]
    print("This is a {} with {:.4}% confidence!".format(
        name, confidence * 100))
Пример #12
0
    def open(self):
        for ele in root.winfo_children():
            ele.destroy()
            
        myFormats = [('JPEG / JFIF','*.jpg')]
        global file_path_string
        file_path_string = tkFileDialog.askopenfilename(filetypes=myFormats)
        im = Image.open(file_path_string)
        tkimage = ImageTk.PhotoImage(im)
        myvar = Label(root,image = tkimage)
        myvar.image = tkimage
        myvar.pack()

        global file_path_string
        img_path = file_path_string
        img = image.load_img(img_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        global model
        preds = model.predict(x)
        
        global file_path_string
        myvar = Label(root, text=file_path_string + "\n" + str(decode_predictions(preds, top=3)[0]))
        myvar.pack()

	self.open_button = Button(root, text="Open File", command=self.open)
        self.open_button.pack()

        root.close_button = Button(root, text="Close Program", command=root.quit)
        root.close_button.pack()
def predict():
	# initialize the data dictionary that will be returned from the
	# view
	data = {"success": False}
	# ensure an image was properly uploaded to our endpoint
	if flask.request.method == "POST":
		if flask.request.files.get("image"):
			# read the image in PIL format
			image = flask.request.files["image"].read()
			image = Image.open(io.BytesIO(image))

			# preprocess the image and prepare it for classification
			image = prepare_image(image, target=(224, 224))

			# classify the input image and then initialize the list
			# of predictions to return to the client
			preds = model.predict(image)
			results = decode_predictions(preds)
			data["predictions"] = []

			# loop over the results and add them to the list of
			# returned predictions
			for (imagenetID, label, prob) in results[0]:
				r = {"label": label, "probability": float(prob)}
				data["predictions"].append(r)

			# indicate that the request was a success
			data["success"] = True

	# return the data dictionary as a JSON response
	return flask.jsonify(data)
Пример #14
0
    def get(self, resource_id):
        arr = [
            'taj.jpg', 'state.jpg', 'yos.jpg', 'waipio.jpg', 'owens.jpg',
            'sonoma.jpg'
        ]
        model = tf.keras.applications.inception_v3.InceptionV3(
            include_top=True,
            weights='imagenet',
            input_tensor=None,
            input_shape=None,
            pooling=None,
            classes=1000)
        # model = tf.keras.models.load_model('./inception.h5')
        for xi in arr:
            path = os.path.abspath('files/' + xi)
            img = image.load_img(path, target_size=(299, 299))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)

            preds = model.predict(x)
            # print(preds)
            print('Predicted:', decode_predictions(preds, top=5)[0])

        return {'images': 'data'}
Пример #15
0
    def classify(self, img_path, top=5):
        """Classify image and return top matches.
	    :param img_path: input file path of image.
		:param top: number of top results to return.
		:returns: predictions about detected classes in image.
		:rtype: list[list[tuple(str: class_id, str: class_name, float: score)]]
	    """
        # Open image
        img = Image.open(img_path)

        # Image resizing and preparation for keras.
        if img.size != self.target_size:
            img = img.resize(self.target_size)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = inception_v3.preprocess_input(x)

        # Predictions
        preds = []
        with self.graph.as_default():
            preds = self.inception_model.predict(x)

        # Decode predictions
        return inception_v3.decode_predictions(preds, top=top)
Пример #16
0
def interception(image:InceptionImageModel):

    iv3 = InceptionV3()
    URL = 'http://localhost:8000/images/' + str(image.image)
    IMAGE_PATH = './media/temp/temp.png'


    with urllib.request.urlopen(URL) as url:
        with open(IMAGE_PATH, 'wb') as f:
            f.write(url.read())

    img = Image.open(IMAGE_PATH)
    img = img.resize([299, 299])
    x = keras_image.img_to_array(img)

    #cambio de rando
    x /=255
    x -= 0.5
    x *=2

    #redimencion
    x = x.reshape(1 , x.shape[0] ,  x.shape[1] ,  x.shape[2])
    x.shape

    y = iv3.predict(x)

    predict = decode_predictions(y)[0][0]
    name = predict[1]
    percentage = predict[2]

    return name , percentage
Пример #17
0
def predict(model, img):
    img = img.resize((299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    return decode_predictions(preds, top=3)[0]
Пример #18
0
def pretrained():

	keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000)

	model = ResNet50(weights='imagenet')
	'''
	img_path = 'chair1.jpg'
	img = image.load_img(img_path, target_size=(224, 224))
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = preprocess_input(x)
	'''

	images = glob.glob('p2a-c/*.jpg')
	i = 0
	for fname in images:

		img = image.load_img(fname, target_size=(224, 224))
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x)

		print "-------------------"
		print "FILE NAME: " + str(i)
		print fname
		print "-------------------"
		print ""
		print ""
		i = i + 1
		preds = model.predict(x)
		print('Predicted:', decode_predictions(preds, top=3)[0])

	print ""
	print ""
	print ""
	print "See Confusion matrix in attached .xslx sheet"

	print ""
	print ""
	print ""
	print "Accuracy is 0.7586"

	print ""
	print ""
	print ""
	print "Recall is 0.7545"


	print ""
	print ""
	print ""
	print "Precision is 0.9152"


	print ""
	print ""
	print ""
	print "The f-score is 0.857"
Пример #19
0
def on_message(client, userdata, msg):
    # mqtt_message = json.dumps(
    # {
    # "hash": hash,
    # "image_location": dst,
    # "color": color,
    # "robot": "resource:com.diy3.Robot#1152",
    # "asset": "resource:com.diy3.CapturedImage#"+str(ciid)})
    matches = msg.payload.decode("utf-8")
    matches = ast.literal_eval(matches)
    print(matches['color'])
    image_location = matches['image_location']
    asset = matches['asset']
    print(matches['image_location'])
    print(matches['robot'])
    print(matches['hash'])
    # think I am going to need to call classify_image.py

    img = image.load_img(image_location, target_size=(299, 299))
    input_image = image.img_to_array(img)

    # Scale the image so all pixel intensities are between [-1, 1] as the model expects
    input_image /= 255.
    input_image -= 0.5
    input_image *= 2.

    # Add a 4th dimension for batch size (as Keras expects)
    input_image = np.expand_dims(input_image, axis=0)

    # Run the image through the neural network
    predictions = model.predict(input_image)

    # Convert the predictions into text and print them
    predicted_classes = inception_v3.decode_predictions(predictions, top=1)
    imagenet_id, name, confidence = predicted_classes[0][0]
    print("This is a {} with {:.4}% confidence!".format(
        name, confidence * 100))

    nameLikelihood = confidence * 100
    nameLikelihood = str(round(nameLikelihood, 2)) + "%"
    objectName = name.encode('utf-8')

    # send to composer-rest-server api

    time.sleep(5)
    url = "http://<URL>:3000/api/com.diy3.ClassifyTransaction"
    payload = "{\n \"$class\": \"com.diy3.ClassifyTransaction\",\n \"asset\": " '"' + asset + '"' ",\n \"ai\": \"resource:com.diy3.AI#9230\",\n \"nameLikelihood\": " '"' + nameLikelihood + '"' ",\n \"objectName\":" '"' + objectName + '"' " \n }"
    #  payload = "{\n \"$class\": \"com.diy3.ClassifyTransaction\",\n \"asset\":\""'"'+asset+'"'",\n \"ai\": \"resource:com.diy3.AI#9230\",\n \"nameLikelihood\": "'"'+nameLikelihood+'"'",\n \"objectName\": \""'"'+str(objectName)+'"'" \n}"

    headers = {
        'Content-Type': "application/json",
        'Cache-Control': "no-cache",
    }
    response = requests.request("POST", url, data=payload, headers=headers)
    print(response.text)
    # completed api call
    print('send transaction to composer-rest-server')
    print('ready for the next image')
Пример #20
0
    def predict(input_path):
        model = inception_v3.InceptionV3()

        formatted_image = image_formatter(input_path)
        predictions = model.predict(formatted_image)

        predicted_classes = inception_v3.decode_predictions(predictions)
        imagenet_id, name, confidence = predicted_classes[0][0]
        return name, confidence
Пример #21
0
    def predict(self, path, top=5):
        img = image.load_img(path, target_size=(self.size, self.size))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        pred = self._model.predict(x)
        pred = decode_predictions(pred, top=top)[0]
        return pred
Пример #22
0
 def predict(self, img_path):
     img = image.load_img(img_path, target_size=(299, 299))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     preds = self.model.predict(x)
     decoded = decode_predictions(preds)[0][0]
     res = {'prediction': decoded[1], 'confidence': decoded[2]}
     return res
Пример #23
0
def predict(image):
    model = InceptionV3()

    pred = model.predict(image)
    decoded_predictions = decode_predictions(pred, top=10)
    response = 'InceptionV3 predictions:   ' + str(decoded_predictions[0][0:5])
    print(response)
    np.argmax(pred[0])
    return response
Пример #24
0
def describe(model, input_image, verbose=False):
    input_image = np.expand_dims(input_image, 0)
    predictions = model.predict(input_image)

    if verbose:
        predicted_classes = decode_predictions(predictions, top=1)
        imagenet_id, name, confidence = predicted_classes[0][0]
        print("This is a {} with {:.4}% confidence!".format(name, confidence * 100))
    return np.argmax(predictions)
Пример #25
0
def classify_inception(image_path):
    try:
        img = image.load_img(image_path, target_size=(224, 224))
    except (OSError, IOError):
        return [0, 0, 0.5]
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = inception_v3.preprocess_input(x)
    preds = inception_model.predict(x)
    return inception_v3.decode_predictions(preds, top=1)[0][0]
Пример #26
0
def classify(img):
    h, w, _ = img.shape
    dw = (w - 299) // 2
    dh = (h - 299) // 2
    win = img[dh:299 + dh, dw:299 + dw]
    arr = preprocess_input(np.expand_dims(win.astype(np.float32), axis=0))
    preds = model.predict(arr)
    _, lab, p = decode_predictions(preds, top=3)[0][0]
    if p < 0.5: lab = ''
    return win, lab
Пример #27
0
 def transformOutputToComparables(self, collected, output_col, get_uri):
     values = {}
     topK = {}
     for row in collected:
         uri = get_uri(row)
         predictions = row[output_col]
         self.assertEqual(len(predictions), ImageNetConstants.NUM_CLASSES)
         values[uri] = np.expand_dims(predictions, axis=0)
         topK[uri] = decode_predictions(values[uri], top=5)[0]
     return values, topK
Пример #28
0
 def transformOutputToComparables(self, collected, output_col, get_uri):
     values = {}
     topK = {}
     for row in collected:
         uri = get_uri(row)
         predictions = row[output_col]
         self.assertEqual(len(predictions), ImageNetConstants.NUM_CLASSES)
         values[uri] = np.expand_dims(predictions, axis=0)
         topK[uri] = decode_predictions(values[uri], top=5)[0]
     return values, topK
Пример #29
0
def translateObject():
    print(request.headers)
    print(request.files['image1'])
    image = request.files['image1']
    image.save("object.png")
    language = request.form['language']
    print(language)
    basewidth = 300
    try:
        image = Image.open("object.png")
        for orientation in ExifTags.TAGS.keys():
            if ExifTags.TAGS[orientation] == 'Orientation':
                break
        exif = dict(image._getexif().items())

        if exif[orientation] == 3:
            image = image.rotate(180, expand=True)
        elif exif[orientation] == 6:
            image = image.rotate(270, expand=True)
        elif exif[orientation] == 8:
            image = image.rotate(90, expand=True)
        wpercent = (basewidth / float(image.size[0]))
        hsize = int((float(image.size[1]) * float(wpercent)))
        image = image.resize((basewidth, hsize), Image.ANTIALIAS)
        image.save("object.png")
        image.close()

    except (AttributeError, KeyError, IndexError):
        # cases: image don't have getexif
        pass

    target_size = (299, 299)
    image = processImage("object.png", target_size)
    with graph.as_default():
        results = model.predict(image)
        labels = decode_predictions(results)
        print(labels)
        translations = get_translation(labels[0], language)
        print(translations)
        print(labels)

    response = []
    for i in range(len(labels[0])):
        translation = translations[i]
        english = labels[0][i][1]
        english = english.split("_")
        english = " ".join(english)
        print(english)
        confidence = round(labels[0][i][2] * 100, 2)
        response.append((translation, english, confidence))

    json = {'predictions': response}
    print(json)
    return jsonify(json)
Пример #30
0
def _infer_classes(files_classes_proba: dict):
    files_classes = {}
    for fname, classes_proba in files_classes_proba.items():
        proba_arr = np.expand_dims(np.array(classes_proba), 0)
        class_pred = inception_v3.decode_predictions(
            proba_arr, top=1)[0][0]  # 1 for batch, 1 for top-n
        class_name = class_pred[1]  # we drop id and probability
        class_name = _shorten_class_name(class_name)
        files_classes[fname] = class_name

    return files_classes
Пример #31
0
def classify_inception(image_path):
    """Classify image and return top match."""
    img = Image.open(image_path)
    target_size = (224, 224)
    if img.size != target_size:
        img = img.resize(target_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = inception_v3.preprocess_input(x)
    preds = inception_model.predict(x)
    return inception_v3.decode_predictions(preds, top=1)[0][0]
Пример #32
0
def predict(image_file):
    img = image.load_img(image_file, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x,axis=0)
    x = preprocess_input(x)

    global graph
    with graph.as_default():
        preds = model.predict(x)

    top3 = decode_predictions(preds,top=3)[0]

    predictions = [{'label': label, 'description': description, 'probability': probability * 100.0}
                    for label,description, probability in top3]
    return predictions
Пример #33
0
 def _executeTensorflow(self, graph, input_tensor_name, output_tensor_name,
                        df, id_col="filePath", input_col="image"):
     with tf.Session(graph=graph) as sess:
         output_tensor = graph.get_tensor_by_name(output_tensor_name)
         image_collected = df.collect()
         values = {}
         topK = {}
         for img_row in image_collected:
             image = np.expand_dims(imageStructToArray(img_row[input_col]), axis=0)
             uri = img_row[id_col]
             output = sess.run([output_tensor],
                               feed_dict={
                                   graph.get_tensor_by_name(input_tensor_name): image
                               })
             values[uri] = np.array(output[0])
             topK[uri] = decode_predictions(values[uri], top=5)[0]
     return values, topK
Пример #34
0
def executeKerasInceptionV3(image_df, uri_col="filePath"):
    """
    Apply Keras InceptionV3 Model on input DataFrame.
    :param image_df: Dataset. contains a column (uri_col) for where the image file lives.
    :param uri_col: str. name of the column indicating where each row's image file lives.
    :return: ({str => np.array[float]}, {str => (str, str, float)}).
      image file uri to prediction probability array,
      image file uri to top K predictions (class id, class description, probability).
    """
    K.set_learning_phase(0)
    model = InceptionV3(weights="imagenet")

    values = {}
    topK = {}
    for row in image_df.select(uri_col).collect():
        raw_uri = row[uri_col]
        image = loadAndPreprocessKerasInceptionV3(raw_uri)
        values[raw_uri] = model.predict(image)
        topK[raw_uri] = decode_predictions(values[raw_uri], top=5)[0]
    return values, topK